From 5b145459895bf1dfa11f12ba25200b461d0f8071 Mon Sep 17 00:00:00 2001 From: Friedrich Gonzalez Date: Wed, 8 May 2024 21:31:15 +0200 Subject: [PATCH] mod vendor Signed-off-by: Friedrich Gonzalez --- vendor/cloud.google.com/go/.gitignore | 12 - .../.release-please-manifest-submodules.json | 111 - .../go/.release-please-manifest.json | 3 - vendor/cloud.google.com/go/CHANGES.md | 2424 -- vendor/cloud.google.com/go/CODE_OF_CONDUCT.md | 44 - vendor/cloud.google.com/go/CONTRIBUTING.md | 327 - vendor/cloud.google.com/go/README.md | 139 - vendor/cloud.google.com/go/RELEASING.md | 141 - vendor/cloud.google.com/go/SECURITY.md | 7 - .../go/compute/internal/version.go} | 13 +- .../go/compute/metadata/CHANGES.md | 19 + .../go/compute/metadata/LICENSE | 202 + .../go/compute/metadata/README.md | 27 + .../go/compute/metadata/metadata.go | 4 +- .../metadata/tidyfix.go} | 6 +- vendor/cloud.google.com/go/doc.go | 228 - vendor/cloud.google.com/go/iam/CHANGES.md | 63 + .../go/iam/apiv1/iampb}/iam_policy.pb.go | 4 +- .../go/iam/apiv1/iampb}/options.pb.go | 4 +- .../go/iam/apiv1/iampb}/policy.pb.go | 76 +- vendor/cloud.google.com/go/iam/iam.go | 2 +- .../go/internal/.repo-metadata-full.json | 202 +- vendor/cloud.google.com/go/internal/README.md | 27 +- .../go/internal/cloudbuild.yaml | 25 + ...elease-please-config-yoshi-submodules.json | 334 - .../go/release-please-config.json | 10 - .../go/storage/.release-please-manifest.json | 3 - vendor/cloud.google.com/go/storage/CHANGES.md | 26 + vendor/cloud.google.com/go/storage/README.md | 2 +- vendor/cloud.google.com/go/storage/bucket.go | 136 +- vendor/cloud.google.com/go/storage/client.go | 9 +- vendor/cloud.google.com/go/storage/copy.go | 15 +- .../go/storage/grpc_client.go | 132 +- vendor/cloud.google.com/go/storage/hmac.go | 4 +- .../go/storage/http_client.go | 20 +- .../storage/internal/apiv2/storage_client.go | 104 +- .../internal/apiv2/stubs/storage.pb.go | 3094 +- .../go/storage/internal/version.go | 2 +- .../go/storage/post_policy_v4.go | 2 +- .../go/storage/release-please-config.json | 12 - vendor/cloud.google.com/go/storage/storage.go | 145 +- vendor/cloud.google.com/go/storage/writer.go | 5 +- vendor/cloud.google.com/go/testing.md | 236 - .../azure-sdk-for-go/sdk/azcore/CHANGELOG.md | 42 + .../Azure/azure-sdk-for-go/sdk/azcore/doc.go | 178 +- .../sdk/azcore/internal/exported/exported.go | 3 +- .../azcore/internal/pollers/async/async.go | 13 +- .../sdk/azcore/internal/pollers/body/body.go | 4 + .../sdk/azcore/internal/pollers/loc/loc.go | 9 +- .../sdk/azcore/internal/pollers/op/op.go | 4 + .../sdk/azcore/internal/shared/constants.go | 2 +- .../sdk/azcore/policy/policy.go | 23 +- .../sdk/azcore/runtime/pipeline.go | 8 +- .../sdk/azcore/runtime/policy_api_version.go | 75 + .../sdk/azcore/runtime/policy_logging.go | 4 +- .../sdk/azcore/runtime/policy_retry.go | 21 +- .../sdk/azcore/runtime/request.go | 55 +- .../sdk/azcore/runtime/response.go | 3 +- .../sdk/azcore/tracing/constants.go | 41 + .../sdk/azcore/tracing/tracing.go | 168 + .../sdk/azidentity/CHANGELOG.md | 30 + .../azure-sdk-for-go/sdk/azidentity/README.md | 6 +- .../sdk/azidentity/TROUBLESHOOTING.md | 5 + .../sdk/azidentity/azidentity.go | 46 +- .../azidentity/client_assertion_credential.go | 74 + .../client_certificate_credential.go | 60 +- .../azidentity/client_secret_credential.go | 15 +- .../azidentity/default_azure_credential.go | 10 +- .../sdk/azidentity/device_code_credential.go | 13 +- .../sdk/azidentity/environment_credential.go | 26 +- .../azure-sdk-for-go/sdk/azidentity/errors.go | 16 + .../interactive_browser_credential.go | 13 +- .../sdk/azidentity/managed_identity_client.go | 20 +- .../azidentity/managed_identity_credential.go | 32 +- .../username_password_credential.go | 13 +- .../sdk/azidentity/version.go | 2 +- .../sdk/internal/diag/diag.go | 2 +- .../sdk/internal/temporal/resource.go | 5 +- .../sdk/storage/azblob/CHANGELOG.md | 39 +- .../sdk/storage/azblob/README.md | 531 +- .../sdk/storage/azblob/appendblob/client.go | 276 + .../sdk/storage/azblob/appendblob/models.go | 166 + .../storage/azblob/appendblob/responses.go | 23 + .../sdk/storage/azblob/autorest.md | 171 - .../sdk/storage/azblob/blob/client.go | 423 + .../sdk/storage/azblob/blob/constants.go | 241 + .../sdk/storage/azblob/blob/models.go | 492 + .../sdk/storage/azblob/blob/responses.go | 104 + .../retry_reader.go} | 140 +- .../sdk/storage/azblob/blob/utils.go | 79 + .../storage/azblob/bloberror/error_codes.go | 150 + .../azblob/{ => blockblob}/chunkwriting.go | 47 +- .../sdk/storage/azblob/blockblob/client.go | 481 + .../sdk/storage/azblob/blockblob/constants.go | 40 + .../sdk/storage/azblob/blockblob/models.go | 311 + .../sdk/storage/azblob/blockblob/responses.go | 111 + .../sdk/storage/azblob/client.go | 171 + .../sdk/storage/azblob/common.go | 36 + .../sdk/storage/azblob/connection.go | 39 - .../sdk/storage/azblob/constants.go | 53 +- .../sdk/storage/azblob/container/client.go | 334 + .../sdk/storage/azblob/container/constants.go | 166 + .../sdk/storage/azblob/container/models.go | 263 + .../sdk/storage/azblob/container/responses.go | 38 + .../sdk/storage/azblob/doc.go | 9 +- .../sdk/storage/azblob/highlevel.go | 316 - .../storage/azblob/internal/base/clients.go | 89 + .../internal/exported/access_conditions.go | 43 + .../exported/access_policy.go} | 6 +- .../azblob/internal/exported/exported.go | 33 + .../exported/shared_key_credential.go} | 67 +- .../exported/user_delegation_credential.go | 64 + .../azblob/internal/exported/version.go | 12 + .../internal/generated/appendblob_client.go | 19 + .../azblob/internal/generated/autorest.md | 304 + .../azblob/internal/generated/blob_client.go | 17 + .../internal/generated/block_blob_client.go | 19 + .../internal/generated/container_client.go | 17 + .../internal/generated/pageblob_client.go | 17 + .../internal/generated/service_client.go | 17 + .../generated/zz_appendblob_client.go | 653 + .../generated/zz_blob_client.go} | 1478 +- .../internal/generated/zz_blockblob_client.go | 960 + .../generated/zz_constants.go} | 263 +- .../generated/zz_container_client.go} | 770 +- .../generated/zz_models.go} | 2696 +- .../internal/generated/zz_models_serde.go | 481 + .../internal/generated/zz_pageblob_client.go | 1287 + .../generated/zz_response_types.go} | 850 +- .../generated/zz_service_client.go} | 228 +- .../generated/zz_time_rfc1123.go} | 3 +- .../generated/zz_time_rfc3339.go} | 3 +- .../generated/zz_xml_helper.go} | 3 +- .../azblob/internal/shared/batch_transfer.go | 78 + .../{ => internal/shared}/bytes_writer.go | 6 +- .../azblob/internal/shared/section_writer.go | 53 + .../storage/azblob/internal/shared/shared.go | 238 + .../{ => internal/shared}/transfer_manager.go | 6 +- .../sdk/storage/azblob/internal/zc_shared.go | 150 - .../sdk/storage/azblob/migrationguide.md | 76 + .../sdk/storage/azblob/models.go | 70 + .../sdk/storage/azblob/pageblob/client.go | 403 + .../sdk/storage/azblob/pageblob/constants.go | 65 + .../models.go} | 282 +- .../sdk/storage/azblob/pageblob/responses.go | 38 + .../sdk/storage/azblob/responses.go | 51 + .../sdk/storage/azblob/sas/account.go | 316 + .../sdk/storage/azblob/sas/query_params.go | 440 + .../{zc_sas_service.go => sas/service.go} | 252 +- .../{zc_parsing_urls.go => sas/url_parts.go} | 98 +- .../sdk/storage/azblob/section_writer.go | 53 - .../sdk/storage/azblob/service/client.go | 287 + .../sdk/storage/azblob/service/constants.go | 92 + .../sdk/storage/azblob/service/models.go | 220 + .../sdk/storage/azblob/service/responses.go | 41 + .../sdk/storage/azblob/test-resources.json | 513 + .../storage/azblob/zc_append_blob_client.go | 154 - .../sdk/storage/azblob/zc_blob_client.go | 278 - .../storage/azblob/zc_blob_lease_client.go | 98 - .../storage/azblob/zc_block_blob_client.go | 201 - .../storage/azblob/zc_connection_string.go | 88 - .../sdk/storage/azblob/zc_container_client.go | 253 - .../azblob/zc_container_lease_client.go | 102 - .../sdk/storage/azblob/zc_page_blob_client.go | 261 - .../sdk/storage/azblob/zc_response_error.go | 17 - .../sdk/storage/azblob/zc_response_helpers.go | 35 - .../sdk/storage/azblob/zc_sas_account.go | 243 - .../sdk/storage/azblob/zc_sas_query_params.go | 427 - .../sdk/storage/azblob/zc_service_client.go | 266 - .../sdk/storage/azblob/zc_storage_error.go | 236 - .../sdk/storage/azblob/zc_validators.go | 107 - .../storage/azblob/zm_access_conditions.go | 43 - .../azblob/zm_append_blob_client_util.go | 184 - .../sdk/storage/azblob/zm_blob_client_util.go | 478 - .../azblob/zm_blob_lease_client_util.go | 160 - .../azblob/zm_block_blob_client_util.go | 272 - .../sdk/storage/azblob/zm_client_util.go | 55 - .../azblob/zm_container_client_util.go | 271 - .../azblob/zm_container_lease_client_util.go | 166 - .../sdk/storage/azblob/zm_highlevel_util.go | 201 - .../zm_serialize_and_desearilize_util.go | 68 - .../storage/azblob/zm_service_client_util.go | 226 - .../azblob/zz_generated_appendblob_client.go | 648 - .../azblob/zz_generated_blockblob_client.go | 953 - .../azblob/zz_generated_pageblob_client.go | 1247 - .../sdk/storage/azblob/zz_generated_pagers.go | 287 - .../apps/confidential/confidential.go | 157 +- .../apps/errors/errors.go | 3 +- .../apps/internal/base/base.go | 23 +- .../internal/base/internal/storage/items.go | 6 + .../internal/storage/partitioned_storage.go | 8 +- .../internal/base/internal/storage/storage.go | 36 +- .../apps/internal/exported/exported.go | 34 + .../apps/internal/json/json.go | 2 +- .../apps/internal/local/server.go | 3 +- .../apps/internal/oauth/oauth.go | 32 +- .../oauth/ops/accesstokens/accesstokens.go | 71 +- .../internal/oauth/ops/authority/authority.go | 22 +- .../internal/oauth/ops/internal/comm/comm.go | 13 +- .../apps/internal/oauth/ops/ops.go | 1 + .../apps/internal/version/version.go | 2 +- .../armon/go-metrics/prometheus/prometheus.go | 7 +- .../aws/credentials/processcreds/provider.go | 24 +- .../aws/aws-sdk-go/aws/crr/cache.go | 122 + .../aws/aws-sdk-go/aws/crr/endpoint.go | 132 + .../aws/aws-sdk-go/aws/crr/sync_map.go | 30 + .../aws/aws-sdk-go/aws/crr/sync_map_1_8.go | 49 + .../aws/aws-sdk-go/aws/endpoints/defaults.go | 4178 +- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../internal/shareddefaults/shared_config.go | 18 +- .../shared_config_resolve_home.go | 18 + .../shared_config_resolve_home_go1.12.go | 13 + .../private/protocol/json/jsonutil/build.go | 19 +- .../protocol/json/jsonutil/unmarshal.go | 13 + .../protocol/jsonrpc/unmarshal_error.go | 63 +- .../protocol/query/queryutil/queryutil.go | 34 +- .../aws-sdk-go/private/protocol/rest/build.go | 18 +- .../private/protocol/rest/unmarshal.go | 18 +- .../private/protocol/xml/xmlutil/build.go | 32 +- .../private/protocol/xml/xmlutil/unmarshal.go | 18 +- .../aws/aws-sdk-go/service/dynamodb/api.go | 27190 ++++++++++++ .../service/dynamodb/customizations.go | 98 + .../aws/aws-sdk-go/service/dynamodb/doc.go | 45 + .../aws-sdk-go/service/dynamodb/doc_custom.go | 27 + .../dynamodb/dynamodbiface/interface.go | 303 + .../aws/aws-sdk-go/service/dynamodb/errors.go | 388 + .../aws-sdk-go/service/dynamodb/service.go | 112 + .../aws-sdk-go/service/dynamodb/waiters.go | 107 + .../aws/aws-sdk-go/service/sts/api.go | 230 +- vendor/github.com/cespare/xxhash/v2/README.md | 31 +- .../github.com/cespare/xxhash/v2/testall.sh | 10 + vendor/github.com/cespare/xxhash/v2/xxhash.go | 47 +- .../cespare/xxhash/v2/xxhash_amd64.s | 336 +- .../cespare/xxhash/v2/xxhash_arm64.s | 183 + .../v2/{xxhash_amd64.go => xxhash_asm.go} | 2 + .../cespare/xxhash/v2/xxhash_other.go | 22 +- .../cespare/xxhash/v2/xxhash_safe.go | 1 + .../cespare/xxhash/v2/xxhash_unsafe.go | 3 +- .../go-systemd/v22/activation/files_unix.go | 70 + .../v22/activation/files_windows.go} | 15 +- .../go-systemd/v22/activation/listeners.go | 103 + .../go-systemd/v22/activation/packetconns.go | 38 + .../go-systemd/v22/journal/journal_unix.go | 75 +- .../go-systemd/v22/journal/journal_windows.go | 8 + .../cortex/pkg/chunk/encoding/chunk.go | 109 + .../cortex/pkg/chunk/encoding/factory.go | 51 + .../pkg/chunk/encoding/instrumentation.go | 91 + .../pkg/chunk/encoding/prometheus_chunk.go | 140 + .../cortex/pkg/cortexpb/cortex.pb.go | 89 +- .../cortex/pkg/cortexpb/cortex.proto | 2 +- .../cortex/pkg/cortexpb/slicesPool.go | 61 + .../cortex/pkg/cortexpb/timeseries.go | 36 + .../cortex/pkg/ingester/client/client.go | 16 +- .../cortex/pkg/ingester/client/custom.go | 20 + .../cortex/pkg/ingester/client/ingester.pb.go | 207 +- .../cortex/pkg/ingester/client/ingester.proto | 3 +- .../cortexproject/cortex/pkg/ring/http.go | 19 +- .../cortex/pkg/ring/kv/client.go | 16 +- .../cortex/pkg/ring/kv/codec/clonable.go | 6 + .../cortex/pkg/ring/kv/codec/codec.go | 63 +- .../cortex/pkg/ring/kv/codec/multikey.go | 25 + .../cortex/pkg/ring/kv/consul/client.go | 4 + .../cortex/pkg/ring/kv/consul/mock.go | 4 + .../cortex/pkg/ring/kv/dynamodb/client.go | 325 + .../cortex/pkg/ring/kv/dynamodb/dynamodb.go | 268 + .../cortex/pkg/ring/kv/dynamodb/metrics.go | 99 + .../cortex/pkg/ring/kv/etcd/etcd.go | 4 + .../cortex/pkg/ring/kv/memberlist/kv.pb.go | 27 +- .../cortex/pkg/ring/kv/memberlist/kv.proto | 2 +- .../ring/kv/memberlist/memberlist_client.go | 8 +- .../pkg/ring/kv/memberlist/mergeable.go | 15 +- .../cortex/pkg/ring/kv/metrics.go | 5 + .../cortexproject/cortex/pkg/ring/kv/mock.go | 5 + .../cortexproject/cortex/pkg/ring/kv/multi.go | 5 + .../cortex/pkg/ring/kv/prefix.go | 5 + .../cortex/pkg/ring/lifecycler.go | 10 +- .../cortexproject/cortex/pkg/ring/model.go | 106 +- .../cortex/pkg/ring/replication_strategy.go | 17 +- .../cortexproject/cortex/pkg/ring/ring.go | 15 +- .../cortexproject/cortex/pkg/ring/ring.pb.go | 55 +- .../cortexproject/cortex/pkg/ring/ring.proto | 2 +- .../cortex/pkg/ring/util/string_utils.go | 12 - .../pkg/storage/bucket/s3/bucket_client.go | 139 +- .../cortex/pkg/storage/tsdb/caching_bucket.go | 38 +- .../cortex/pkg/storage/tsdb/config.go | 31 +- .../cortex/pkg/storage/tsdb/index_cache.go | 22 +- .../pkg/storage/tsdb/redis_client_config.go | 107 + .../cortex/pkg/util/grpcclient/grpcclient.go | 6 +- .../grpcencoding/snappyblock/snappyblock.go | 143 + .../cortex/pkg/util/grpcencoding/zstd/zstd.go | 86 + .../cortex/pkg/util/validation/limits.go | 29 +- .../github.com/dgryski/go-rendezvous/LICENSE | 21 - .../github.com/dgryski/go-rendezvous/rdv.go | 79 - .../github.com/dustin/go-humanize/.travis.yml | 16 +- .../dustin/go-humanize/README.markdown | 2 +- .../github.com/dustin/go-humanize/bigbytes.go | 20 +- .../github.com/dustin/go-humanize/commaf.go | 1 + vendor/github.com/dustin/go-humanize/ftoa.go | 3 + .../github.com/dustin/go-humanize/number.go | 2 +- vendor/github.com/dustin/go-humanize/si.go | 4 + vendor/github.com/efficientgo/core/COPYRIGHT | 2 + .../efficientgo/{tools => }/core/LICENSE | 0 .../{tools/core/pkg => core}/errcapture/do.go | 7 +- .../core/pkg => core}/errcapture/doc.go | 30 +- .../github.com/efficientgo/core/errors/doc.go | 13 + .../efficientgo/core/errors/errors.go | 168 + .../efficientgo/core/errors/stacktrace.go | 58 + .../core/pkg => core}/logerrcapture/do.go | 5 +- .../core/pkg => core}/logerrcapture/doc.go | 32 +- .../efficientgo/core/merrors/doc.go | 29 + .../errors.go => core/merrors/merrors.go} | 0 .../efficientgo/core/testutil/doc.go | 6 + .../core/testutil/internal/difflib.go | 645 + .../core/pkg => core}/testutil/testorbench.go | 0 .../efficientgo/core/testutil/testutil.go | 233 + .../efficientgo/tools/core/pkg/merrors/doc.go | 20 - .../tools/core/pkg/testutil/doc.go | 6 - .../tools/core/pkg/testutil/testutil.go | 181 - .../efficientgo/tools/extkingpin/LICENSE | 201 + .../efficientgo/tools/extkingpin/doc.go | 20 + .../tools/extkingpin/pathorcontent.go | 143 + vendor/github.com/fatih/color/README.md | 14 +- vendor/github.com/fatih/color/color.go | 46 +- vendor/github.com/fatih/color/doc.go | 137 +- .../github.com/fsnotify/fsnotify/.gitignore | 10 +- vendor/github.com/fsnotify/fsnotify/AUTHORS | 62 - .../github.com/fsnotify/fsnotify/CHANGELOG.md | 113 + .../fsnotify/fsnotify/CONTRIBUTING.md | 72 +- vendor/github.com/fsnotify/fsnotify/LICENSE | 47 +- vendor/github.com/fsnotify/fsnotify/README.md | 205 +- .../fsnotify/fsnotify/backend_fen.go | 162 + .../fsnotify/fsnotify/backend_inotify.go | 459 + .../fsnotify/fsnotify/backend_kqueue.go | 707 + .../fsnotify/fsnotify/backend_other.go | 66 + .../fsnotify/fsnotify/backend_windows.go | 746 + vendor/github.com/fsnotify/fsnotify/fen.go | 38 - .../github.com/fsnotify/fsnotify/fsnotify.go | 80 +- .../fsnotify/fsnotify/fsnotify_unsupported.go | 36 - .../github.com/fsnotify/fsnotify/inotify.go | 351 - .../fsnotify/fsnotify/inotify_poller.go | 187 - vendor/github.com/fsnotify/fsnotify/kqueue.go | 535 - vendor/github.com/fsnotify/fsnotify/mkdoc.zsh | 208 + .../{open_mode_bsd.go => system_bsd.go} | 4 - .../{open_mode_darwin.go => system_darwin.go} | 4 - .../github.com/fsnotify/fsnotify/windows.go | 586 - .../github.com/go-logfmt/logfmt/CHANGELOG.md | 46 +- vendor/github.com/go-logfmt/logfmt/README.md | 30 +- vendor/github.com/go-logfmt/logfmt/decode.go | 17 + .../github.com/go-redis/redis/v8/.gitignore | 3 - .../go-redis/redis/v8/.golangci.yml | 4 - .../go-redis/redis/v8/.prettierrc.yml | 4 - .../github.com/go-redis/redis/v8/CHANGELOG.md | 177 - vendor/github.com/go-redis/redis/v8/Makefile | 35 - vendor/github.com/go-redis/redis/v8/README.md | 175 - .../github.com/go-redis/redis/v8/RELEASING.md | 15 - .../github.com/go-redis/redis/v8/cluster.go | 1750 - .../go-redis/redis/v8/cluster_commands.go | 109 - .../github.com/go-redis/redis/v8/command.go | 3478 -- .../github.com/go-redis/redis/v8/commands.go | 3475 -- vendor/github.com/go-redis/redis/v8/doc.go | 4 - vendor/github.com/go-redis/redis/v8/error.go | 144 - .../go-redis/redis/v8/internal/arg.go | 56 - .../redis/v8/internal/hashtag/hashtag.go | 78 - .../go-redis/redis/v8/internal/hscan/hscan.go | 201 - .../redis/v8/internal/hscan/structmap.go | 93 - .../go-redis/redis/v8/internal/internal.go | 29 - .../go-redis/redis/v8/internal/log.go | 26 - .../go-redis/redis/v8/internal/once.go | 60 - .../go-redis/redis/v8/internal/pool/conn.go | 121 - .../go-redis/redis/v8/internal/pool/pool.go | 557 - .../redis/v8/internal/pool/pool_single.go | 58 - .../redis/v8/internal/pool/pool_sticky.go | 201 - .../redis/v8/internal/proto/reader.go | 332 - .../go-redis/redis/v8/internal/proto/scan.go | 180 - .../redis/v8/internal/proto/writer.go | 155 - .../go-redis/redis/v8/internal/rand/rand.go | 50 - .../go-redis/redis/v8/internal/safe.go | 12 - .../go-redis/redis/v8/internal/unsafe.go | 21 - .../go-redis/redis/v8/internal/util.go | 46 - .../go-redis/redis/v8/internal/util/safe.go | 12 - .../redis/v8/internal/util/strconv.go | 19 - .../go-redis/redis/v8/internal/util/unsafe.go | 23 - .../github.com/go-redis/redis/v8/iterator.go | 77 - .../github.com/go-redis/redis/v8/options.go | 429 - .../github.com/go-redis/redis/v8/package.json | 8 - .../github.com/go-redis/redis/v8/pipeline.go | 147 - vendor/github.com/go-redis/redis/v8/pubsub.go | 668 - vendor/github.com/go-redis/redis/v8/redis.go | 773 - vendor/github.com/go-redis/redis/v8/result.go | 180 - vendor/github.com/go-redis/redis/v8/ring.go | 736 - vendor/github.com/go-redis/redis/v8/script.go | 65 - .../github.com/go-redis/redis/v8/sentinel.go | 796 - vendor/github.com/go-redis/redis/v8/tx.go | 149 - .../github.com/go-redis/redis/v8/universal.go | 215 - .../github.com/go-redis/redis/v8/version.go | 6 - vendor/github.com/golang-jwt/jwt/.travis.yml | 11 - .../golang-jwt/jwt/MIGRATION_GUIDE.md | 22 - vendor/github.com/golang-jwt/jwt/claims.go | 146 - vendor/github.com/golang-jwt/jwt/errors.go | 59 - vendor/github.com/golang-jwt/jwt/token.go | 108 - .../golang-jwt/jwt/{ => v4}/.gitignore | 0 .../golang-jwt/jwt/{ => v4}/LICENSE | 0 .../golang-jwt/jwt/v4/MIGRATION_GUIDE.md | 22 + .../golang-jwt/jwt/{ => v4}/README.md | 70 +- .../github.com/golang-jwt/jwt/v4/SECURITY.md | 19 + .../jwt/{ => v4}/VERSION_HISTORY.md | 11 + vendor/github.com/golang-jwt/jwt/v4/claims.go | 269 + .../github.com/golang-jwt/jwt/{ => v4}/doc.go | 0 .../golang-jwt/jwt/{ => v4}/ecdsa.go | 24 +- .../golang-jwt/jwt/{ => v4}/ecdsa_utils.go | 8 +- .../github.com/golang-jwt/jwt/v4/ed25519.go | 85 + .../golang-jwt/jwt/v4/ed25519_utils.go | 64 + vendor/github.com/golang-jwt/jwt/v4/errors.go | 112 + .../golang-jwt/jwt/{ => v4}/hmac.go | 6 +- .../golang-jwt/jwt/{ => v4}/map_claims.go | 101 +- .../golang-jwt/jwt/{ => v4}/none.go | 2 +- .../golang-jwt/jwt/{ => v4}/parser.go | 49 +- .../golang-jwt/jwt/v4/parser_option.go | 29 + .../github.com/golang-jwt/jwt/{ => v4}/rsa.go | 6 +- .../golang-jwt/jwt/{ => v4}/rsa_pss.go | 7 +- .../golang-jwt/jwt/{ => v4}/rsa_utils.go | 16 +- .../golang-jwt/jwt/{ => v4}/signing_method.go | 17 +- .../golang-jwt/jwt/v4/staticcheck.conf | 1 + vendor/github.com/golang-jwt/jwt/v4/token.go | 143 + vendor/github.com/golang-jwt/jwt/v4/types.go | 145 + .../client/client.go | 52 +- .../client/util/util.go | 35 +- .../gax-go/v2/.release-please-manifest.json | 2 +- .../googleapis/gax-go/v2/CHANGES.md | 15 + .../googleapis/gax-go/v2/apierror/apierror.go | 45 +- .../googleapis/gax-go/v2/content_type.go | 112 + .../googleapis/gax-go/v2/internal/version.go | 2 +- vendor/github.com/grafana/regexp/README.md | 7 - vendor/github.com/grafana/regexp/backtrack.go | 26 +- vendor/github.com/grafana/regexp/exec.go | 71 +- vendor/github.com/grafana/regexp/onepass.go | 26 +- vendor/github.com/grafana/regexp/regexp.go | 40 +- .../github.com/grafana/regexp/syntax/prog.go | 25 +- .../github.com/hashicorp/consul/api/LICENSE | 2 + .../github.com/hashicorp/consul/api/README.md | 56 +- vendor/github.com/hashicorp/consul/api/acl.go | 101 +- .../github.com/hashicorp/consul/api/agent.go | 37 +- vendor/github.com/hashicorp/consul/api/api.go | 53 +- .../hashicorp/consul/api/config_entry.go | 109 +- .../consul/api/config_entry_discoverychain.go | 29 +- .../consul/api/config_entry_exports.go | 7 +- .../consul/api/config_entry_gateways.go | 99 + .../api/config_entry_inline_certificate.go | 43 + .../hashicorp/consul/api/config_entry_mesh.go | 6 + .../consul/api/config_entry_routes.go | 242 + .../consul/api/config_entry_status.go | 57 + .../github.com/hashicorp/consul/api/debug.go | 9 +- .../github.com/hashicorp/consul/api/health.go | 1 + .../hashicorp/consul/api/operator_license.go | 7 +- .../hashicorp/consul/api/operator_raft.go | 25 + .../hashicorp/consul/api/operator_usage.go | 53 + .../hashicorp/consul/api/peering.go | 30 +- .../hashicorp/consul/api/prepared_query.go | 10 +- vendor/github.com/hashicorp/consul/api/txn.go | 1 + .../github.com/hashicorp/go-hclog/README.md | 12 +- .../hashicorp/go-hclog/colorize_unix.go | 4 +- .../hashicorp/go-hclog/colorize_windows.go | 9 +- .../github.com/hashicorp/go-hclog/global.go | 6 +- .../hashicorp/go-hclog/interceptlogger.go | 7 +- .../hashicorp/go-hclog/intlogger.go | 227 +- .../github.com/hashicorp/go-hclog/logger.go | 43 +- .../hashicorp/go-hclog/nulllogger.go | 2 + .../github.com/hashicorp/go-hclog/stdlog.go | 21 +- .../github.com/hashicorp/golang-lru/LICENSE | 2 + .../hashicorp/golang-lru/simplelru/lru.go | 6 +- .../golang-lru/simplelru/lru_interface.go | 5 +- .../hashicorp/serf/coordinate/client.go | 2 +- .../hashicorp/serf/coordinate/config.go | 7 + .../klauspost/compress/.gitattributes | 2 + .../github.com/klauspost/compress/.gitignore | 32 + .../klauspost/compress/.goreleaser.yml | 141 + .../github.com/klauspost/compress/README.md | 621 + .../klauspost/compress/compressible.go | 85 + .../klauspost/compress/fse/README.md | 79 + .../klauspost/compress/fse/bitreader.go | 122 + .../klauspost/compress/fse/bitwriter.go | 168 + .../klauspost/compress/fse/bytereader.go | 47 + .../klauspost/compress/fse/compress.go | 682 + .../klauspost/compress/fse/decompress.go | 376 + .../github.com/klauspost/compress/fse/fse.go | 144 + vendor/github.com/klauspost/compress/gen.sh | 4 + .../klauspost/compress/huff0/.gitignore | 1 + .../klauspost/compress/huff0/README.md | 89 + .../klauspost/compress/huff0/bitreader.go | 229 + .../klauspost/compress/huff0/bitwriter.go | 111 + .../klauspost/compress/huff0/bytereader.go | 44 + .../klauspost/compress/huff0/compress.go | 749 + .../klauspost/compress/huff0/decompress.go | 1167 + .../compress/huff0/decompress_amd64.go | 226 + .../compress/huff0/decompress_amd64.s | 830 + .../compress/huff0/decompress_generic.go | 299 + .../klauspost/compress/huff0/huff0.go | 337 + .../compress/internal/cpuinfo/cpuinfo.go | 34 + .../internal/cpuinfo/cpuinfo_amd64.go | 11 + .../compress/internal/cpuinfo/cpuinfo_amd64.s | 36 + .../compress/internal/snapref/LICENSE | 27 + .../compress/internal/snapref/decode.go | 264 + .../compress/internal/snapref/decode_other.go | 113 + .../compress/internal/snapref/encode.go | 289 + .../compress/internal/snapref/encode_other.go | 262 + .../compress/internal/snapref/snappy.go | 98 + .../klauspost/compress/s2/README.md | 469 +- .../klauspost/compress/s2/decode.go | 493 +- .../klauspost/compress/s2/decode_other.go | 47 +- .../github.com/klauspost/compress/s2/dict.go | 331 + .../klauspost/compress/s2/encode.go | 54 +- .../klauspost/compress/s2/encode_all.go | 595 +- .../klauspost/compress/s2/encode_amd64.go | 14 +- .../klauspost/compress/s2/encode_best.go | 225 +- .../klauspost/compress/s2/encode_better.go | 723 +- .../klauspost/compress/s2/encode_go.go | 422 +- .../compress/s2/encodeblock_amd64.go | 57 +- .../klauspost/compress/s2/encodeblock_amd64.s | 17904 ++++---- .../klauspost/compress/s2/lz4convert.go | 585 + .../klauspost/compress/s2/lz4sconvert.go | 467 + vendor/github.com/klauspost/compress/s2sx.mod | 4 + vendor/github.com/klauspost/compress/s2sx.sum | 0 .../klauspost/compress/zstd/README.md | 441 + .../klauspost/compress/zstd/bitreader.go | 140 + .../klauspost/compress/zstd/bitwriter.go | 113 + .../klauspost/compress/zstd/blockdec.go | 726 + .../klauspost/compress/zstd/blockenc.go | 871 + .../compress/zstd/blocktype_string.go | 85 + .../klauspost/compress/zstd/bytebuf.go | 131 + .../klauspost/compress/zstd/bytereader.go | 82 + .../klauspost/compress/zstd/decodeheader.go | 229 + .../klauspost/compress/zstd/decoder.go | 953 + .../compress/zstd/decoder_options.go | 169 + .../klauspost/compress/zstd/dict.go | 161 + .../klauspost/compress/zstd/enc_base.go | 172 + .../klauspost/compress/zstd/enc_best.go | 555 + .../klauspost/compress/zstd/enc_better.go | 1242 + .../klauspost/compress/zstd/enc_dfast.go | 1123 + .../klauspost/compress/zstd/enc_fast.go | 898 + .../klauspost/compress/zstd/encoder.go | 676 + .../compress/zstd/encoder_options.go | 339 + .../klauspost/compress/zstd/framedec.go | 432 + .../klauspost/compress/zstd/frameenc.go | 137 + .../klauspost/compress/zstd/fse_decoder.go | 307 + .../compress/zstd/fse_decoder_amd64.go | 65 + .../compress/zstd/fse_decoder_amd64.s | 126 + .../compress/zstd/fse_decoder_generic.go | 72 + .../klauspost/compress/zstd/fse_encoder.go | 701 + .../klauspost/compress/zstd/fse_predefined.go | 158 + .../klauspost/compress/zstd/hash.go | 35 + .../klauspost/compress/zstd/history.go | 116 + .../compress/zstd/internal/xxhash/LICENSE.txt | 22 + .../compress/zstd/internal/xxhash/README.md | 71 + .../compress/zstd/internal/xxhash/xxhash.go | 230 + .../zstd/internal/xxhash/xxhash_amd64.s | 210 + .../zstd/internal/xxhash/xxhash_arm64.s | 184 + .../zstd/internal/xxhash/xxhash_asm.go | 16 + .../zstd/internal/xxhash/xxhash_other.go | 76 + .../zstd/internal/xxhash/xxhash_safe.go | 11 + .../klauspost/compress/zstd/seqdec.go | 505 + .../klauspost/compress/zstd/seqdec_amd64.go | 378 + .../klauspost/compress/zstd/seqdec_amd64.s | 4079 ++ .../klauspost/compress/zstd/seqdec_generic.go | 237 + .../klauspost/compress/zstd/seqenc.go | 114 + .../klauspost/compress/zstd/snappy.go | 435 + .../github.com/klauspost/compress/zstd/zip.go | 141 + .../klauspost/compress/zstd/zstd.go | 143 + .../github.com/klauspost/cpuid/v2/README.md | 229 +- vendor/github.com/klauspost/cpuid/v2/cpuid.go | 213 +- .../klauspost/cpuid/v2/detect_x86.go | 2 +- .../klauspost/cpuid/v2/featureid_string.go | 361 +- .../klauspost/cpuid/v2/os_darwin_arm64.go | 2 +- .../github.com/mattn/go-isatty/isatty_bsd.go | 4 +- vendor/github.com/miekg/dns/LICENSE | 49 +- vendor/github.com/miekg/dns/README.md | 6 + vendor/github.com/miekg/dns/acceptfunc.go | 1 - vendor/github.com/miekg/dns/client.go | 5 +- vendor/github.com/miekg/dns/doc.go | 86 +- vendor/github.com/miekg/dns/edns.go | 9 +- vendor/github.com/miekg/dns/fuzz.go | 1 + vendor/github.com/miekg/dns/labels.go | 2 +- .../miekg/dns/listen_no_reuseport.go | 1 + .../github.com/miekg/dns/listen_reuseport.go | 1 + vendor/github.com/miekg/dns/msg.go | 4 +- vendor/github.com/miekg/dns/msg_helpers.go | 34 + vendor/github.com/miekg/dns/scan_rr.go | 112 + vendor/github.com/miekg/dns/server.go | 2 +- vendor/github.com/miekg/dns/svcb.go | 16 +- vendor/github.com/miekg/dns/tools.go | 1 + vendor/github.com/miekg/dns/types.go | 81 + vendor/github.com/miekg/dns/udp.go | 1 + vendor/github.com/miekg/dns/udp_windows.go | 1 + vendor/github.com/miekg/dns/version.go | 2 +- vendor/github.com/miekg/dns/xfr.go | 1 - vendor/github.com/miekg/dns/zduplicate.go | 58 + vendor/github.com/miekg/dns/zmsg.go | 110 + vendor/github.com/miekg/dns/ztypes.go | 42 + .../github.com/minio/minio-go/v7/.gitignore | 4 +- .../minio/minio-go/v7/.golangci.yml | 4 +- vendor/github.com/minio/minio-go/v7/Makefile | 7 +- vendor/github.com/minio/minio-go/v7/README.md | 70 +- .../minio/minio-go/v7/README_zh_CN.md | 70 +- .../minio/minio-go/v7/api-bucket-lifecycle.go | 4 +- .../minio/minio-go/v7/api-bucket-policy.go | 4 +- .../minio-go/v7/api-bucket-replication.go | 4 +- .../minio/minio-go/v7/api-bucket-tagging.go | 3 +- .../minio/minio-go/v7/api-compose-object.go | 7 +- .../minio/minio-go/v7/api-copy-object.go | 3 +- .../minio/minio-go/v7/api-datatypes.go | 20 +- .../minio/minio-go/v7/api-error-response.go | 39 +- .../minio/minio-go/v7/api-get-object.go | 12 +- .../minio/minio-go/v7/api-get-options.go | 57 +- .../github.com/minio/minio-go/v7/api-list.go | 100 +- .../minio-go/v7/api-put-object-common.go | 7 +- .../minio-go/v7/api-put-object-multipart.go | 83 +- .../minio-go/v7/api-put-object-streaming.go | 307 +- .../minio/minio-go/v7/api-put-object.go | 46 +- .../minio-go/v7/api-putobject-snowball.go | 8 +- .../minio/minio-go/v7/api-remove.go | 6 +- .../minio/minio-go/v7/api-s3-datatypes.go | 10 +- .../minio/minio-go/v7/api-select.go | 32 +- .../github.com/minio/minio-go/v7/api-stat.go | 9 +- vendor/github.com/minio/minio-go/v7/api.go | 65 +- .../github.com/minio/minio-go/v7/constants.go | 11 +- vendor/github.com/minio/minio-go/v7/core.go | 15 +- .../minio/minio-go/v7/functional_tests.go | 156 +- .../v7/pkg/credentials/assume_role.go | 7 +- .../minio-go/v7/pkg/credentials/chain.go | 21 +- .../v7/pkg/credentials/credentials.go | 9 +- .../v7/pkg/credentials/credentials.json | 7 + .../v7/pkg/credentials/credentials.sample | 3 + .../minio/minio-go/v7/pkg/credentials/doc.go | 34 +- .../v7/pkg/credentials/error_response.go | 3 +- .../pkg/credentials/file_aws_credentials.go | 48 +- .../v7/pkg/credentials/file_minio_client.go | 9 +- .../minio-go/v7/pkg/credentials/iam_aws.go | 6 +- .../v7/pkg/credentials/sts_client_grants.go | 4 +- .../v7/pkg/credentials/sts_ldap_identity.go | 4 +- .../v7/pkg/credentials/sts_tls_identity.go | 6 +- .../v7/pkg/credentials/sts_web_identity.go | 4 +- .../minio-go/v7/pkg/encrypt/fips_disabled.go | 24 + .../minio-go/v7/pkg/encrypt/fips_enabled.go | 24 + .../minio-go/v7/pkg/encrypt/server-side.go | 6 +- .../minio-go/v7/pkg/notification/info.go | 6 +- .../v7/pkg/notification/notification.go | 51 +- .../v7/pkg/replication/replication.go | 4 + ...st-signature-streaming-unsigned-trailer.go | 224 + .../pkg/signer/request-signature-streaming.go | 141 +- .../v7/pkg/signer/request-signature-v2.go | 27 +- .../v7/pkg/signer/request-signature-v4.go | 41 +- .../minio/minio-go/v7/pkg/signer/utils.go | 3 +- .../minio/minio-go/v7/pkg/tags/tags.go | 101 +- .../minio/minio-go/v7/post-policy.go | 27 +- .../minio/minio-go/v7/s3-endpoints.go | 4 + .../github.com/minio/minio-go/v7/transport.go | 3 +- vendor/github.com/minio/minio-go/v7/utils.go | 60 +- vendor/github.com/oklog/run/.gitignore | 14 + vendor/github.com/oklog/run/LICENSE | 201 + vendor/github.com/oklog/run/README.md | 75 + vendor/github.com/oklog/run/actors.go | 38 + vendor/github.com/oklog/run/group.go | 62 + vendor/github.com/pkg/browser/browser.go | 10 +- .../github.com/pkg/browser/browser_darwin.go | 4 - .../github.com/pkg/browser/browser_freebsd.go | 2 - .../github.com/pkg/browser/browser_linux.go | 2 - .../github.com/pkg/browser/browser_netbsd.go | 14 + .../github.com/pkg/browser/browser_openbsd.go | 2 - .../pkg/browser/browser_unsupported.go | 5 +- .../github.com/pkg/browser/browser_windows.go | 10 +- .../pkg/browser/zbrowser_windows.go | 76 - .../alertmanager/asset/assets_vfsdata.go | 16 +- .../prometheus/alertmanager/config/config.go | 94 +- .../alertmanager/config/coordinator.go | 2 +- .../alertmanager/config/notifiers.go | 167 +- .../alertmanager/pkg/labels/parse.go | 13 +- .../alertmanager/template/default.tmpl | 25 +- .../alertmanager/template/email.html | 6 +- .../alertmanager/template/email.tmpl | 8 +- .../alertmanager/template/template.go | 8 +- .../alertmanager/timeinterval/timeinterval.go | 70 +- .../prometheus/alertmanager/types/types.go | 31 +- .../client_golang/api/prometheus/v1/api.go | 17 +- .../client_golang/prometheus/counter.go | 11 +- .../client_golang/prometheus/doc.go | 107 +- .../client_golang/prometheus/gauge.go | 6 +- .../client_golang/prometheus/histogram.go | 978 +- .../prometheus/internal/almost_equal.go | 60 + .../prometheus/internal/difflib.go | 13 +- .../client_golang/prometheus/labels.go | 3 +- .../client_golang/prometheus/metric.go | 2 +- .../client_golang/prometheus/promauto/auto.go | 170 +- .../prometheus/promhttp/instrument_client.go | 5 +- .../prometheus/promhttp/instrument_server.go | 24 +- .../client_golang/prometheus/push/push.go | 20 +- .../client_golang/prometheus/registry.go | 34 +- .../client_golang/prometheus/summary.go | 9 +- .../client_golang/prometheus/timer.go | 11 +- .../prometheus/client_model/go/metrics.pb.go | 333 +- .../prometheus/common/config/config.go | 26 +- .../prometheus/common/config/http_config.go | 275 +- .../prometheus/common/expfmt/decode.go | 36 +- .../prometheus/common/expfmt/fuzz.go | 4 +- .../common/expfmt/openmetrics_create.go | 22 +- .../prometheus/common/expfmt/text_create.go | 3 +- .../prometheus/common/expfmt/text_parse.go | 10 +- .../bitbucket.org/ww/goautoneg/autoneg.go | 22 +- .../prometheus/common/model/time.go | 89 +- .../prometheus/common/model/value.go | 246 +- .../prometheus/common/model/value_float.go | 100 + .../common/model/value_histogram.go | 178 + .../prometheus/common/model/value_type.go | 83 + .../prometheus/common/version/info.go | 21 +- .../prometheus/common/version/info_default.go | 25 + .../prometheus/common/version/info_go118.go | 67 + .../exporter-toolkit/web/handler.go | 10 +- .../exporter-toolkit/web/landing_page.css | 13 + .../exporter-toolkit/web/landing_page.go | 82 + .../exporter-toolkit/web/landing_page.html | 24 + .../exporter-toolkit/web/tls_config.go | 148 +- .../exporter-toolkit/web/web-config.yml | 1 - .../prometheus/procfs/Makefile.common | 9 +- .../github.com/prometheus/procfs/cpuinfo.go | 36 + .../prometheus/procfs/cpuinfo_loong64.go | 19 + .../prometheus/procfs/cpuinfo_others.go | 4 +- vendor/github.com/prometheus/procfs/doc.go | 51 +- .../prometheus/procfs/mountstats.go | 3 +- .../prometheus/procfs/net_softnet.go | 70 +- .../github.com/prometheus/procfs/netstat.go | 53 +- .../prometheus/procfs/proc_cgroup.go | 2 +- .../prometheus/procfs/proc_interrupts.go | 98 + .../prometheus/procfs/proc_netstat.go | 491 +- .../github.com/prometheus/procfs/proc_snmp.go | 318 +- .../prometheus/procfs/proc_snmp6.go | 364 +- .../github.com/prometheus/procfs/proc_stat.go | 4 +- .../prometheus/procfs/proc_status.go | 6 +- vendor/github.com/prometheus/procfs/stat.go | 22 +- vendor/github.com/prometheus/procfs/thread.go | 79 + vendor/github.com/prometheus/procfs/vm.go | 4 +- .../prometheus/prometheus/config/config.go | 165 +- .../prometheus/discovery/file/file.go | 16 +- .../prometheus/discovery/manager.go | 4 +- .../model/histogram/float_histogram.go | 943 + .../prometheus/model/histogram/generic.go | 548 + .../prometheus/model/histogram/histogram.go | 450 + .../prometheus/model/labels/labels.go | 163 +- .../prometheus/model/labels/labels_string.go | 825 + .../prometheus/model/labels/test_utils.go | 9 +- .../prometheus/model/relabel/relabel.go | 93 +- .../prometheus/model/rulefmt/rulefmt.go | 32 +- .../prometheus/model/textparse/interface.go | 35 +- .../model/textparse/openmetricsparse.go | 112 +- .../prometheus/model/textparse/promparse.go | 66 +- .../model/textparse/protobufparse.go | 538 + .../prometheus/prometheus/prompb/buf.yaml | 3 + .../prometheus/prometheus/prompb/custom.go | 17 + .../prompb/io/prometheus/client/metrics.pb.go | 3997 ++ .../prompb/io/prometheus/client/metrics.proto | 147 + .../prometheus/prometheus/prompb/remote.pb.go | 6 +- .../prometheus/prometheus/prompb/remote.proto | 6 +- .../prometheus/prometheus/prompb/types.pb.go | 1646 +- .../prometheus/prometheus/prompb/types.proto | 78 +- .../prometheus/prometheus/promql/engine.go | 307 +- .../prometheus/prometheus/promql/functions.go | 232 +- .../prometheus/promql/parser/functions.go | 15 + .../promql/parser/generated_parser.y | 10 +- .../promql/parser/generated_parser.y.go | 29 +- .../prometheus/promql/parser/parse.go | 2 +- .../prometheus/prometheus/promql/quantile.go | 171 + .../prometheus/prometheus/promql/test.go | 22 +- .../prometheus/prometheus/promql/value.go | 151 +- .../prometheus/prometheus/scrape/manager.go | 22 +- .../prometheus/prometheus/scrape/scrape.go | 226 +- .../prometheus/prometheus/scrape/target.go | 104 +- .../prometheus/prometheus/storage/buffer.go | 187 +- .../prometheus/prometheus/storage/fanout.go | 15 + .../prometheus/storage/interface.go | 53 +- .../prometheus/storage/memoized_iterator.go | 94 +- .../prometheus/prometheus/storage/merge.go | 172 +- .../prometheus/storage/remote/client.go | 14 +- .../prometheus/storage/remote/codec.go | 231 +- .../storage/remote/queue_manager.go | 438 +- .../prometheus/storage/remote/read.go | 37 +- .../prometheus/storage/remote/read_handler.go | 4 + .../prometheus/storage/remote/write.go | 30 +- .../storage/remote/write_handler.go | 38 +- .../prometheus/prometheus/storage/series.go | 209 +- .../prometheus/prometheus/tsdb/README.md | 2 +- .../prometheus/prometheus/tsdb/block.go | 20 +- .../prometheus/prometheus/tsdb/blockwriter.go | 1 + .../prometheus/tsdb/chunkenc/chunk.go | 212 +- .../tsdb/chunkenc/float_histogram.go | 831 + .../prometheus/tsdb/chunkenc/histogram.go | 944 + .../tsdb/chunkenc/histogram_meta.go | 489 + .../prometheus/tsdb/chunkenc/varbit.go | 232 + .../prometheus/tsdb/chunkenc/xor.go | 252 +- .../tsdb/chunks/chunk_write_queue.go | 5 +- .../prometheus/tsdb/chunks/head_chunks.go | 59 +- .../prometheus/prometheus/tsdb/compact.go | 12 +- .../prometheus/prometheus/tsdb/db.go | 80 +- .../prometheus/tsdb/encoding/encoding.go | 7 +- .../prometheus/prometheus/tsdb/exemplar.go | 5 +- .../prometheus/prometheus/tsdb/head.go | 278 +- .../prometheus/prometheus/tsdb/head_append.go | 703 +- .../prometheus/prometheus/tsdb/head_read.go | 206 +- .../prometheus/prometheus/tsdb/head_wal.go | 317 +- .../prometheus/prometheus/tsdb/index/index.go | 104 +- .../prometheus/tsdb/index/postings.go | 25 +- .../prometheus/prometheus/tsdb/ooo_head.go | 14 +- .../prometheus/tsdb/ooo_head_read.go | 64 +- .../prometheus/prometheus/tsdb/querier.go | 493 +- .../prometheus/tsdb/record/record.go | 341 +- .../prometheus/prometheus/tsdb/test.txt | 1 + .../prometheus/tsdb/tsdbblockutil.go | 34 +- .../prometheus/tsdb/tsdbutil/chunks.go | 73 +- .../prometheus/tsdb/tsdbutil/histogram.go | 110 + .../prometheus/prometheus/tsdb/wal.go | 10 +- .../tsdb/{wal => wlog}/checkpoint.go | 47 +- .../tsdb/{wal => wlog}/live_reader.go | 4 +- .../prometheus/tsdb/{wal => wlog}/reader.go | 4 +- .../prometheus/tsdb/{wal => wlog}/watcher.go | 104 +- .../tsdb/{wal/wal.go => wlog/wlog.go} | 103 +- .../prometheus/util/strutil/strconv.go | 21 + .../prometheus/util/teststorage/storage.go | 1 + vendor/github.com/rueian/rueidis/.gitignore | 3 + .../rueian/rueidis/.goreleaser.yaml | 8 + vendor/github.com/rueian/rueidis/LICENSE | 201 + vendor/github.com/rueian/rueidis/NOTICE | 2 + vendor/github.com/rueian/rueidis/README.md | 389 + vendor/github.com/rueian/rueidis/binary.go | 26 + vendor/github.com/rueian/rueidis/client.go | 250 + vendor/github.com/rueian/rueidis/cluster.go | 764 + vendor/github.com/rueian/rueidis/cmds.go | 19 + vendor/github.com/rueian/rueidis/codecov.yml | 4 + .../rueian/rueidis/docker-compose.yml | 93 + .../github.com/rueian/rueidis/dockertest.sh | 22 + vendor/github.com/rueian/rueidis/helper.go | 70 + .../rueian/rueidis/internal/cmds/builder.go | 119 + .../rueian/rueidis/internal/cmds/cmds.go | 263 + .../rueian/rueidis/internal/cmds/gen.go | 34465 ++++++++++++++++ .../rueian/rueidis/internal/cmds/slot.go | 109 + .../rueian/rueidis/internal/util/parallel.go | 45 + vendor/github.com/rueian/rueidis/lru.go | 260 + vendor/github.com/rueian/rueidis/lua.go | 85 + vendor/github.com/rueian/rueidis/message.go | 929 + vendor/github.com/rueian/rueidis/mux.go | 319 + vendor/github.com/rueian/rueidis/pipe.go | 1228 + vendor/github.com/rueian/rueidis/pool.go | 67 + vendor/github.com/rueian/rueidis/pubsub.go | 134 + vendor/github.com/rueian/rueidis/resp.go | 255 + vendor/github.com/rueian/rueidis/ring.go | 136 + vendor/github.com/rueian/rueidis/rueidis.go | 309 + vendor/github.com/rueian/rueidis/sentinel.go | 353 + .../github.com/rueian/rueidis/singleflight.go | 39 + vendor/github.com/stretchr/objx/Taskfile.yml | 2 +- .../stretchr/testify/assert/assertions.go | 78 +- .../github.com/stretchr/testify/mock/mock.go | 12 +- .../thanos-io/objstore/CHANGELOG.md | 8 + .../github.com/thanos-io/objstore/README.md | 13 +- .../thanos-io/objstore/exthttp/tlsconfig.go | 4 +- vendor/github.com/thanos-io/objstore/inmem.go | 25 +- .../github.com/thanos-io/objstore/objstore.go | 4 +- .../objstore/providers/azure/azure.go | 170 +- .../objstore/providers/azure/helpers.go | 33 +- .../providers/filesystem/filesystem.go | 39 +- .../thanos-io/objstore/providers/s3/s3.go | 16 +- .../objstore/providers/swift/swift.go | 121 +- .../github.com/thanos-io/objstore/testing.go | 11 +- .../thanos-io/objstore/tlsconfig.go | 4 +- .../thanos-io/objstore/tracing/tracing.go | 5 + .../thanos-io/thanos/pkg/block/block.go | 50 + .../thanos-io/thanos/pkg/block/fetcher.go | 11 +- .../thanos-io/thanos/pkg/block/index.go | 12 +- .../pkg/block/indexheader/binary_reader.go | 450 +- .../block/indexheader/lazy_binary_reader.go | 39 +- .../thanos/pkg/block/metadata/markers.go | 31 +- .../thanos/pkg/cacheutil/memcached_client.go | 1 + .../thanos/pkg/cacheutil/redis_client.go | 162 +- .../pkg/compact/downsample/downsample.go | 195 +- .../thanos-io/thanos/pkg/dedup/chunk_iter.go | 14 +- .../thanos-io/thanos/pkg/dedup/iter.go | 321 +- .../thanos/pkg/dedup/pushdown_iter.go | 53 +- .../thanos-io/thanos/pkg/extkingpin/app.go | 148 + .../thanos-io/thanos/pkg/extkingpin/flags.go | 125 + .../pkg/extkingpin/path_content_reloader.go | 128 + .../pkg/extprom/http/instrument_server.go | 4 +- .../thanos-io/thanos/pkg/extprom/testing.go | 2 +- .../thanos-io/thanos/pkg/gate/gate.go | 156 +- .../thanos-io/thanos/pkg/httpconfig/http.go | 4 +- .../thanos-io/thanos/pkg/store/bucket.go | 643 +- .../thanos/pkg/store/labelpb/label.go | 35 +- .../thanos-io/thanos/pkg/store/limiter.go | 86 +- .../thanos-io/thanos/pkg/store/local.go | 3 +- .../thanos-io/thanos/pkg/store/prometheus.go | 37 +- .../thanos-io/thanos/pkg/store/proxy.go | 32 +- .../thanos-io/thanos/pkg/store/proxy_heap.go | 270 +- .../thanos-io/thanos/pkg/store/recover.go | 52 + .../thanos/pkg/store/storepb/custom.go | 11 + .../thanos/pkg/store/storepb/inprocess.go | 8 + .../pkg/store/storepb/prompb/samples.go | 93 +- .../pkg/store/storepb/prompb/types.pb.go | 1652 +- .../pkg/store/storepb/prompb/types.proto | 68 + .../thanos/pkg/store/storepb/rpc.pb.go | 234 +- .../thanos/pkg/store/storepb/rpc.proto | 21 +- .../thanos/pkg/store/storepb/types.pb.go | 78 +- .../thanos/pkg/store/storepb/types.proto | 1 + .../thanos-io/thanos/pkg/store/telemetry.go | 58 + .../thanos-io/thanos/pkg/store/tsdb.go | 12 +- .../thanos/pkg/testutil/testorbench.go | 85 - .../thanos-io/thanos/pkg/testutil/testutil.go | 365 - .../thanos-io/thanos/pkg/tls/options.go | 16 +- .../thanos-io/thanos/pkg/tracing/testutil.go | 2 +- .../weaveworks/common/httpgrpc/httpgrpc.pb.go | 43 +- .../weaveworks/common/httpgrpc/httpgrpc.proto | 11 +- .../common/httpgrpc/server/server.go | 4 +- .../weaveworks/common/httpgrpc/tools.go | 6 + .../common/instrument/instrument.go | 9 +- .../weaveworks/common/middleware/logging.go | 2 +- .../weaveworks/common/middleware/response.go | 52 +- .../weaveworks/common/server/server.go | 80 +- .../weaveworks/common/server/tls_config.go | 55 + .../go.etcd.io/etcd/api/v3/version/version.go | 2 +- .../etcd/client/pkg/v3/fileutil/filereader.go | 60 + .../etcd/client/pkg/v3/fileutil/fileutil.go | 10 +- .../etcd/client/pkg/v3/fileutil/purge.go | 10 +- .../client/pkg/v3/tlsutil/cipher_suites.go | 19 +- .../pkg/v3/transport/keepalive_listener.go | 65 +- .../client/pkg/v3/transport/limit_listen.go | 6 + .../etcd/client/pkg/v3/transport/listener.go | 21 +- .../pkg/v3/transport/sockopt_solaris.go | 35 + .../client/pkg/v3/transport/sockopt_unix.go | 18 +- vendor/go.etcd.io/etcd/client/v3/client.go | 4 +- vendor/go.etcd.io/etcd/client/v3/lease.go | 13 +- vendor/go.etcd.io/etcd/client/v3/logger.go | 4 +- .../go.etcd.io/etcd/client/v3/maintenance.go | 1 + vendor/go.etcd.io/etcd/client/v3/op.go | 2 +- .../etcd/client/v3/retry_interceptor.go | 31 +- vendor/go.etcd.io/etcd/client/v3/watch.go | 50 +- vendor/go.opencensus.io/Makefile | 8 +- vendor/go.opencensus.io/opencensus.go | 2 +- .../plugin/ocgrpc/client_metrics.go | 9 + .../plugin/ocgrpc/server_metrics.go | 9 + .../plugin/ocgrpc/stats_common.go | 23 +- .../go.opencensus.io/plugin/ochttp/server.go | 8 +- vendor/go.opencensus.io/stats/doc.go | 7 +- .../go.opencensus.io/stats/internal/record.go | 6 + vendor/go.opencensus.io/stats/record.go | 21 +- .../stats/view/aggregation.go | 6 +- .../go.opencensus.io/stats/view/collector.go | 9 +- vendor/go.opencensus.io/stats/view/doc.go | 2 +- vendor/go.opencensus.io/stats/view/worker.go | 27 +- vendor/go.opencensus.io/tag/profile_19.go | 1 + vendor/go.opencensus.io/tag/profile_not19.go | 1 + vendor/go.opencensus.io/trace/doc.go | 13 +- vendor/go.opencensus.io/trace/lrumap.go | 2 +- vendor/go.opencensus.io/trace/trace_go11.go | 1 + .../go.opencensus.io/trace/trace_nongo11.go | 1 + .../net/http/otelhttp/config.go | 9 + .../net/http/otelhttp/handler.go | 65 +- .../net/http/otelhttp/transport.go | 8 +- .../net/http/otelhttp/version.go | 2 +- .../propagators/aws/xray/idgenerator.go | 3 +- .../contrib/propagators/b3/b3_config.go | 4 +- .../contrib/propagators/b3/b3_propagator.go | 13 +- .../contrib/propagators/b3/version.go | 2 +- .../propagators/jaeger/jaeger_propagator.go | 19 +- .../contrib/propagators/jaeger/version.go | 2 +- .../contrib/propagators/ot/version.go | 2 +- vendor/go.opentelemetry.io/otel/.golangci.yml | 6 +- vendor/go.opentelemetry.io/otel/.lycheeignore | 3 + vendor/go.opentelemetry.io/otel/CHANGELOG.md | 320 +- .../go.opentelemetry.io/otel/CONTRIBUTING.md | 2 +- vendor/go.opentelemetry.io/otel/Makefile | 13 +- vendor/go.opentelemetry.io/otel/README.md | 5 + vendor/go.opentelemetry.io/otel/RELEASING.md | 11 +- .../otel/attribute/value.go | 95 +- .../otel/baggage/baggage.go | 16 +- .../otel/bridge/opentracing/provider.go | 71 + .../otel/bridge/opentracing/wrapper.go | 9 +- .../go.opentelemetry.io/otel/codes/codes.go | 10 + vendor/go.opentelemetry.io/otel/handler.go | 25 +- .../otel/internal/attribute/attribute.go | 111 + .../otel/internal/global/internal_logging.go | 30 +- .../go.opentelemetry.io/otel/metric/config.go | 25 +- .../otel/metric/global/global.go | 2 +- .../otel/metric/instrument/asyncfloat64.go | 130 + .../instrument/asyncfloat64/asyncfloat64.go | 70 - .../otel/metric/instrument/asyncint64.go | 130 + .../instrument/asyncint64/asyncint64.go | 70 - .../otel/metric/instrument/config.go | 69 - .../otel/metric/instrument/instrument.go | 58 + .../otel/metric/instrument/syncfloat64.go | 85 + .../instrument/syncfloat64/syncfloat64.go | 56 - .../otel/metric/instrument/syncint64.go | 85 + .../metric/instrument/syncint64/syncint64.go | 56 - .../metric/internal/global/instruments.go | 207 +- .../otel/metric/internal/global/meter.go | 319 +- .../go.opentelemetry.io/otel/metric/meter.go | 130 +- .../go.opentelemetry.io/otel/metric/noop.go | 148 +- .../otel/sdk/resource/builtin.go | 8 +- .../otel/sdk/resource/config.go | 2 + .../otel/sdk/resource/container.go | 4 +- .../otel/sdk/resource/env.go | 15 +- .../otel/sdk/resource/os.go | 4 +- .../otel/sdk/resource/process.go | 18 +- .../otel/sdk/trace/provider.go | 56 +- .../otel/sdk/trace/sampling.go | 10 +- .../otel/sdk/trace/span.go | 46 +- .../otel/sdk/trace/span_processor.go | 5 + .../otel/sdk/trace/tracer.go | 2 +- .../otel/semconv/internal/http.go | 336 - .../otel/semconv/internal/v2/http.go | 404 + .../otel/semconv/internal/v2/net.go | 324 + .../otel/semconv/v1.12.0/http.go | 114 - .../otel/semconv/v1.12.0/resource.go | 1042 - .../otel/semconv/v1.12.0/trace.go | 1704 - .../otel/semconv/{v1.12.0 => v1.17.0}/doc.go | 4 +- .../otel/semconv/v1.17.0/event.go | 199 + .../semconv/{v1.12.0 => v1.17.0}/exception.go | 2 +- .../unit/unit.go => semconv/v1.17.0/http.go} | 14 +- .../otel/semconv/v1.17.0/httpconv/http.go | 150 + .../otel/semconv/v1.17.0/resource.go | 2010 + .../semconv/{v1.12.0 => v1.17.0}/schema.go | 4 +- .../otel/semconv/v1.17.0/trace.go | 3375 ++ .../go.opentelemetry.io/otel/trace/config.go | 17 + vendor/go.opentelemetry.io/otel/trace/doc.go | 2 +- vendor/go.opentelemetry.io/otel/version.go | 2 +- vendor/go.opentelemetry.io/otel/versions.yaml | 7 +- vendor/go.uber.org/goleak/CHANGELOG.md | 10 +- vendor/go.uber.org/goleak/Makefile | 10 +- vendor/go.uber.org/goleak/README.md | 5 +- .../go.uber.org/goleak/internal/stack/doc.go | 22 + vendor/go.uber.org/multierr/CHANGELOG.md | 8 + vendor/go.uber.org/multierr/error.go | 345 +- vendor/go4.org/intern/LICENSE | 29 + vendor/go4.org/intern/README.md | 19 + vendor/go4.org/intern/intern.go | 183 + .../unsafe/assume-no-moving-gc/LICENSE | 29 + .../unsafe/assume-no-moving-gc/README.md | 13 + .../assume-no-moving-gc.go | 34 + .../unsafe/assume-no-moving-gc/untested.go | 26 + vendor/golang.org/x/crypto/bcrypt/bcrypt.go | 11 +- vendor/golang.org/x/crypto/cast5/cast5.go | 11 +- .../x/crypto/openpgp/armor/armor.go | 2 +- vendor/golang.org/x/crypto/openpgp/keys.go | 4 +- .../x/crypto/openpgp/packet/opaque.go | 3 +- .../x/crypto/openpgp/packet/private_key.go | 3 +- .../openpgp/packet/symmetrically_encrypted.go | 2 +- .../x/crypto/openpgp/packet/userattribute.go | 3 +- .../x/crypto/openpgp/packet/userid.go | 3 +- vendor/golang.org/x/crypto/openpgp/s2k/s2k.go | 2 +- vendor/golang.org/x/crypto/openpgp/write.go | 2 +- vendor/golang.org/x/crypto/pkcs12/crypto.go | 2 +- .../x/crypto/pkcs12/internal/rc2/rc2.go | 53 +- .../redis/v8 => golang.org/x/exp}/LICENSE | 6 +- vendor/golang.org/x/exp/PATENTS | 22 + .../x/exp/constraints/constraints.go | 50 + vendor/golang.org/x/exp/slices/slices.go | 258 + vendor/golang.org/x/exp/slices/sort.go | 126 + vendor/golang.org/x/exp/slices/zsortfunc.go | 479 + .../golang.org/x/exp/slices/zsortordered.go | 481 + .../x/net/context/ctxhttp/ctxhttp.go | 71 - .../golang.org/x/net/http/httpproxy/proxy.go | 370 + vendor/golang.org/x/net/http2/flow.go | 88 +- vendor/golang.org/x/net/http2/frame.go | 11 +- vendor/golang.org/x/net/http2/headermap.go | 18 + vendor/golang.org/x/net/http2/hpack/encode.go | 5 + vendor/golang.org/x/net/http2/hpack/hpack.go | 81 +- .../x/net/http2/hpack/static_table.go | 188 + vendor/golang.org/x/net/http2/hpack/tables.go | 78 +- vendor/golang.org/x/net/http2/server.go | 337 +- vendor/golang.org/x/net/http2/transport.go | 246 +- .../x/net/internal/socket/mmsghdr_unix.go | 18 +- .../x/net/internal/socket/msghdr_linux.go | 3 - .../net/internal/socket/zsys_openbsd_ppc64.go | 30 + .../internal/socket/zsys_openbsd_riscv64.go | 30 + vendor/golang.org/x/net/ipv6/dgramopt.go | 2 +- vendor/golang.org/x/net/netutil/listen.go | 2 +- .../x/net/publicsuffix/data/children | Bin 0 -> 2876 bytes .../golang.org/x/net/publicsuffix/data/nodes | Bin 0 -> 48280 bytes .../golang.org/x/net/publicsuffix/data/text | 1 + vendor/golang.org/x/net/publicsuffix/list.go | 27 +- vendor/golang.org/x/net/publicsuffix/table.go | 10567 +---- vendor/golang.org/x/net/trace/histogram.go | 2 +- vendor/golang.org/x/net/trace/trace.go | 2 +- vendor/golang.org/x/oauth2/google/default.go | 12 +- vendor/golang.org/x/oauth2/google/doc.go | 18 +- vendor/golang.org/x/oauth2/google/google.go | 9 +- .../google/internal/externalaccount/aws.go | 105 +- .../externalaccount/basecredentials.go | 34 +- vendor/golang.org/x/oauth2/internal/token.go | 4 +- vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c | 1 + .../golang.org/x/sys/cpu/cpu_linux_arm64.go | 44 +- .../golang.org/x/sys/cpu/cpu_other_ppc64x.go | 15 + vendor/golang.org/x/sys/cpu/endian_big.go | 11 + vendor/golang.org/x/sys/cpu/endian_little.go | 11 + vendor/golang.org/x/sys/cpu/hwcap_linux.go | 15 + vendor/golang.org/x/sys/cpu/parse.go | 43 + .../x/sys/cpu/proc_cpuinfo_linux.go | 54 + vendor/golang.org/x/sys/cpu/runtime_auxv.go | 16 + .../x/sys/cpu/runtime_auxv_go121.go | 19 + vendor/golang.org/x/sys/execabs/execabs.go | 2 +- .../golang.org/x/sys/execabs/execabs_go118.go | 6 + .../golang.org/x/sys/execabs/execabs_go119.go | 12 +- vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s | 31 + vendor/golang.org/x/sys/unix/dirent.go | 4 +- vendor/golang.org/x/sys/unix/gccgo.go | 4 +- vendor/golang.org/x/sys/unix/gccgo_c.c | 4 +- vendor/golang.org/x/sys/unix/ioctl.go | 21 +- vendor/golang.org/x/sys/unix/ioctl_zos.go | 8 +- vendor/golang.org/x/sys/unix/mkall.sh | 22 +- vendor/golang.org/x/sys/unix/mkerrors.sh | 4 +- vendor/golang.org/x/sys/unix/ptrace_darwin.go | 6 + vendor/golang.org/x/sys/unix/ptrace_ios.go | 6 + vendor/golang.org/x/sys/unix/sockcmsg_unix.go | 14 + vendor/golang.org/x/sys/unix/syscall_aix.go | 5 +- vendor/golang.org/x/sys/unix/syscall_bsd.go | 3 +- .../golang.org/x/sys/unix/syscall_darwin.go | 13 +- .../x/sys/unix/syscall_darwin_amd64.go | 1 + .../x/sys/unix/syscall_darwin_arm64.go | 1 + .../x/sys/unix/syscall_dragonfly.go | 2 + .../golang.org/x/sys/unix/syscall_freebsd.go | 44 +- .../x/sys/unix/syscall_freebsd_386.go | 12 +- .../x/sys/unix/syscall_freebsd_amd64.go | 12 +- .../x/sys/unix/syscall_freebsd_arm.go | 10 +- .../x/sys/unix/syscall_freebsd_arm64.go | 10 +- .../x/sys/unix/syscall_freebsd_riscv64.go | 10 +- vendor/golang.org/x/sys/unix/syscall_hurd.go | 30 + .../golang.org/x/sys/unix/syscall_hurd_386.go | 29 + .../golang.org/x/sys/unix/syscall_illumos.go | 106 - vendor/golang.org/x/sys/unix/syscall_linux.go | 101 +- .../golang.org/x/sys/unix/syscall_netbsd.go | 20 +- .../golang.org/x/sys/unix/syscall_openbsd.go | 2 + .../x/sys/unix/syscall_openbsd_libc.go | 4 +- .../x/sys/unix/syscall_openbsd_ppc64.go | 42 + .../x/sys/unix/syscall_openbsd_riscv64.go | 42 + .../golang.org/x/sys/unix/syscall_solaris.go | 120 +- vendor/golang.org/x/sys/unix/syscall_unix.go | 69 +- .../golang.org/x/sys/unix/syscall_unix_gc.go | 6 +- .../x/sys/unix/syscall_zos_s390x.go | 177 +- vendor/golang.org/x/sys/unix/timestruct.go | 2 +- vendor/golang.org/x/sys/unix/xattr_bsd.go | 9 +- vendor/golang.org/x/sys/unix/zerrors_linux.go | 40 +- .../x/sys/unix/zerrors_linux_386.go | 1 + .../x/sys/unix/zerrors_linux_amd64.go | 1 + .../x/sys/unix/zerrors_linux_arm.go | 1 + .../x/sys/unix/zerrors_linux_arm64.go | 1 + .../x/sys/unix/zerrors_linux_loong64.go | 1 + .../x/sys/unix/zerrors_linux_mips.go | 1 + .../x/sys/unix/zerrors_linux_mips64.go | 1 + .../x/sys/unix/zerrors_linux_mips64le.go | 1 + .../x/sys/unix/zerrors_linux_mipsle.go | 1 + .../x/sys/unix/zerrors_linux_ppc.go | 1 + .../x/sys/unix/zerrors_linux_ppc64.go | 1 + .../x/sys/unix/zerrors_linux_ppc64le.go | 1 + .../x/sys/unix/zerrors_linux_riscv64.go | 1 + .../x/sys/unix/zerrors_linux_s390x.go | 1 + .../x/sys/unix/zerrors_linux_sparc64.go | 1 + .../x/sys/unix/zerrors_openbsd_386.go | 356 +- .../x/sys/unix/zerrors_openbsd_amd64.go | 189 +- .../x/sys/unix/zerrors_openbsd_arm.go | 348 +- .../x/sys/unix/zerrors_openbsd_arm64.go | 160 +- .../x/sys/unix/zerrors_openbsd_mips64.go | 95 +- .../x/sys/unix/zerrors_openbsd_ppc64.go | 1905 + .../x/sys/unix/zerrors_openbsd_riscv64.go | 1904 + .../x/sys/unix/zptrace_armnn_linux.go | 8 +- .../x/sys/unix/zptrace_linux_arm64.go | 4 +- .../x/sys/unix/zptrace_mipsnn_linux.go | 8 +- .../x/sys/unix/zptrace_mipsnnle_linux.go | 8 +- .../x/sys/unix/zptrace_x86_linux.go | 8 +- .../golang.org/x/sys/unix/zsyscall_aix_ppc.go | 10 + .../x/sys/unix/zsyscall_aix_ppc64.go | 10 + .../x/sys/unix/zsyscall_aix_ppc64_gc.go | 7 + .../x/sys/unix/zsyscall_aix_ppc64_gccgo.go | 8 + .../x/sys/unix/zsyscall_darwin_amd64.go | 16 + .../x/sys/unix/zsyscall_darwin_arm64.go | 16 + .../x/sys/unix/zsyscall_dragonfly_amd64.go | 20 + .../x/sys/unix/zsyscall_freebsd_386.go | 30 + .../x/sys/unix/zsyscall_freebsd_amd64.go | 30 + .../x/sys/unix/zsyscall_freebsd_arm.go | 30 + .../x/sys/unix/zsyscall_freebsd_arm64.go | 30 + .../x/sys/unix/zsyscall_freebsd_riscv64.go | 30 + .../x/sys/unix/zsyscall_illumos_amd64.go | 28 +- .../golang.org/x/sys/unix/zsyscall_linux.go | 31 + .../x/sys/unix/zsyscall_netbsd_386.go | 20 + .../x/sys/unix/zsyscall_netbsd_amd64.go | 20 + .../x/sys/unix/zsyscall_netbsd_arm.go | 20 + .../x/sys/unix/zsyscall_netbsd_arm64.go | 20 + .../x/sys/unix/zsyscall_openbsd_386.go | 22 + .../x/sys/unix/zsyscall_openbsd_386.s | 137 +- .../x/sys/unix/zsyscall_openbsd_amd64.go | 22 + .../x/sys/unix/zsyscall_openbsd_amd64.s | 137 +- .../x/sys/unix/zsyscall_openbsd_arm.go | 22 + .../x/sys/unix/zsyscall_openbsd_arm.s | 137 +- .../x/sys/unix/zsyscall_openbsd_arm64.go | 22 + .../x/sys/unix/zsyscall_openbsd_arm64.s | 137 +- .../x/sys/unix/zsyscall_openbsd_mips64.go | 820 +- .../x/sys/unix/zsyscall_openbsd_mips64.s | 669 + .../x/sys/unix/zsyscall_openbsd_ppc64.go | 2243 + .../x/sys/unix/zsyscall_openbsd_ppc64.s | 802 + .../x/sys/unix/zsyscall_openbsd_riscv64.go | 2243 + .../x/sys/unix/zsyscall_openbsd_riscv64.s | 669 + .../x/sys/unix/zsyscall_solaris_amd64.go | 52 +- .../x/sys/unix/zsyscall_zos_s390x.go | 10 + .../x/sys/unix/zsysctl_openbsd_386.go | 51 +- .../x/sys/unix/zsysctl_openbsd_amd64.go | 17 +- .../x/sys/unix/zsysctl_openbsd_arm.go | 51 +- .../x/sys/unix/zsysctl_openbsd_arm64.go | 11 +- .../x/sys/unix/zsysctl_openbsd_mips64.go | 3 +- .../x/sys/unix/zsysctl_openbsd_ppc64.go | 281 + .../x/sys/unix/zsysctl_openbsd_riscv64.go | 282 + .../x/sys/unix/zsysnum_openbsd_mips64.go | 1 + .../x/sys/unix/zsysnum_openbsd_ppc64.go | 218 + .../x/sys/unix/zsysnum_openbsd_riscv64.go | 219 + .../x/sys/unix/ztypes_freebsd_386.go | 2 +- .../x/sys/unix/ztypes_freebsd_amd64.go | 2 +- .../x/sys/unix/ztypes_freebsd_arm.go | 2 +- .../x/sys/unix/ztypes_freebsd_arm64.go | 2 +- .../x/sys/unix/ztypes_freebsd_riscv64.go | 2 +- .../x/sys/unix/ztypes_illumos_amd64.go | 42 - vendor/golang.org/x/sys/unix/ztypes_linux.go | 349 +- .../golang.org/x/sys/unix/ztypes_linux_386.go | 8 +- .../x/sys/unix/ztypes_linux_amd64.go | 8 +- .../golang.org/x/sys/unix/ztypes_linux_arm.go | 8 +- .../x/sys/unix/ztypes_linux_arm64.go | 8 +- .../x/sys/unix/ztypes_linux_loong64.go | 8 +- .../x/sys/unix/ztypes_linux_mips.go | 8 +- .../x/sys/unix/ztypes_linux_mips64.go | 8 +- .../x/sys/unix/ztypes_linux_mips64le.go | 8 +- .../x/sys/unix/ztypes_linux_mipsle.go | 8 +- .../golang.org/x/sys/unix/ztypes_linux_ppc.go | 8 +- .../x/sys/unix/ztypes_linux_ppc64.go | 8 +- .../x/sys/unix/ztypes_linux_ppc64le.go | 8 +- .../x/sys/unix/ztypes_linux_riscv64.go | 8 +- .../x/sys/unix/ztypes_linux_s390x.go | 8 +- .../x/sys/unix/ztypes_linux_sparc64.go | 8 +- .../x/sys/unix/ztypes_netbsd_386.go | 84 + .../x/sys/unix/ztypes_netbsd_amd64.go | 84 + .../x/sys/unix/ztypes_netbsd_arm.go | 84 + .../x/sys/unix/ztypes_netbsd_arm64.go | 84 + .../x/sys/unix/ztypes_openbsd_386.go | 97 +- .../x/sys/unix/ztypes_openbsd_amd64.go | 33 +- .../x/sys/unix/ztypes_openbsd_arm.go | 9 +- .../x/sys/unix/ztypes_openbsd_arm64.go | 9 +- .../x/sys/unix/ztypes_openbsd_mips64.go | 9 +- .../x/sys/unix/ztypes_openbsd_ppc64.go | 571 + .../x/sys/unix/ztypes_openbsd_riscv64.go | 571 + .../x/sys/unix/ztypes_solaris_amd64.go | 35 + .../golang.org/x/sys/unix/ztypes_zos_s390x.go | 11 +- .../x/sys/windows/syscall_windows.go | 35 +- .../golang.org/x/sys/windows/types_windows.go | 85 + .../x/sys/windows/zsyscall_windows.go | 34 + vendor/golang.org/x/text/AUTHORS | 3 - vendor/golang.org/x/text/CONTRIBUTORS | 3 - vendor/golang.org/x/text/cases/cases.go | 162 + vendor/golang.org/x/text/cases/context.go | 376 + vendor/golang.org/x/text/cases/fold.go | 34 + vendor/golang.org/x/text/cases/icu.go | 62 + vendor/golang.org/x/text/cases/info.go | 82 + vendor/golang.org/x/text/cases/map.go | 816 + .../golang.org/x/text/cases/tables10.0.0.go | 2256 + .../golang.org/x/text/cases/tables11.0.0.go | 2317 ++ .../golang.org/x/text/cases/tables12.0.0.go | 2360 ++ .../golang.org/x/text/cases/tables13.0.0.go | 2400 ++ vendor/golang.org/x/text/cases/tables9.0.0.go | 2216 + vendor/golang.org/x/text/cases/trieval.go | 217 + vendor/golang.org/x/text/internal/internal.go | 49 + .../x/text/internal/language/common.go | 16 + .../x/text/internal/language/compact.go | 29 + .../text/internal/language/compact/compact.go | 61 + .../internal/language/compact/language.go | 260 + .../text/internal/language/compact/parents.go | 120 + .../text/internal/language/compact/tables.go | 1015 + .../x/text/internal/language/compact/tags.go | 91 + .../x/text/internal/language/compose.go | 167 + .../x/text/internal/language/coverage.go | 28 + .../x/text/internal/language/language.go | 627 + .../x/text/internal/language/lookup.go | 412 + .../x/text/internal/language/match.go | 226 + .../x/text/internal/language/parse.go | 608 + .../x/text/internal/language/tables.go | 3472 ++ .../x/text/internal/language/tags.go | 48 + vendor/golang.org/x/text/internal/match.go | 67 + vendor/golang.org/x/text/internal/tag/tag.go | 100 + vendor/golang.org/x/text/language/coverage.go | 187 + vendor/golang.org/x/text/language/doc.go | 98 + vendor/golang.org/x/text/language/language.go | 605 + vendor/golang.org/x/text/language/match.go | 735 + vendor/golang.org/x/text/language/parse.go | 256 + vendor/golang.org/x/text/language/tables.go | 298 + vendor/golang.org/x/text/language/tags.go | 145 + vendor/golang.org/x/text/unicode/bidi/core.go | 26 +- .../golang.org/x/text/unicode/bidi/trieval.go | 12 - .../x/text/unicode/norm/forminfo.go | 11 +- .../x/text/unicode/norm/normalize.go | 11 +- .../x/text/unicode/norm/tables13.0.0.go | 4 +- vendor/golang.org/x/time/rate/rate.go | 87 +- vendor/golang.org/x/time/rate/sometimes.go | 67 + .../x/tools/go/gcexportdata/gcexportdata.go | 32 +- .../go/internal/gcimporter/gcimporter.go | 1125 - .../golang.org/x/tools/go/packages/golist.go | 22 +- .../x/tools/go/packages/packages.go | 87 +- .../x/tools/go/types/objectpath/objectpath.go | 762 + .../{go => }/internal/gcimporter/bexport.go | 9 +- .../{go => }/internal/gcimporter/bimport.go | 0 .../internal/gcimporter/exportdata.go | 0 .../x/tools/internal/gcimporter/gcimporter.go | 277 + .../{go => }/internal/gcimporter/iexport.go | 198 +- .../{go => }/internal/gcimporter/iimport.go | 112 +- .../internal/gcimporter/newInterface10.go | 0 .../internal/gcimporter/newInterface11.go | 0 .../internal/gcimporter/support_go117.go | 0 .../internal/gcimporter/support_go118.go | 14 + .../internal/gcimporter/unified_no.go | 0 .../internal/gcimporter/unified_yes.go | 0 .../internal/gcimporter/ureader_no.go | 0 .../internal/gcimporter/ureader_yes.go | 211 +- .../x/tools/internal/gocommand/invoke.go | 83 +- .../x/tools/internal/gocommand/version.go | 36 +- .../tools/{go => }/internal/pkgbits/codes.go | 0 .../{go => }/internal/pkgbits/decoder.go | 108 +- .../x/tools/{go => }/internal/pkgbits/doc.go | 0 .../{go => }/internal/pkgbits/encoder.go | 20 +- .../tools/{go => }/internal/pkgbits/flags.go | 0 .../{go => }/internal/pkgbits/frames_go1.go | 0 .../{go => }/internal/pkgbits/frames_go17.go | 0 .../tools/{go => }/internal/pkgbits/reloc.go | 4 +- .../{go => }/internal/pkgbits/support.go | 0 .../x/tools/{go => }/internal/pkgbits/sync.go | 0 .../internal/pkgbits/syncmarker_string.go | 0 .../internal/tokeninternal/tokeninternal.go | 59 + .../x/tools/internal/typeparams/common.go | 1 - .../tools/internal/typesinternal/errorcode.go | 38 +- .../typesinternal/errorcode_string.go | 26 +- .../x/tools/internal/typesinternal/types.go | 9 + .../api/googleapi/googleapi.go | 17 +- .../iamcredentials/v1/iamcredentials-gen.go | 26 +- .../api/internal/gensupport/error.go | 24 + .../api/internal/gensupport/json.go | 26 +- .../api/internal/gensupport/media.go | 95 +- .../api/internal/gensupport/resumable.go | 15 +- .../api/internal/gensupport/send.go | 4 +- .../google.golang.org/api/internal/version.go | 2 +- .../option/internaloption/internaloption.go | 7 + vendor/google.golang.org/api/option/option.go | 4 +- .../api/storage/v1/storage-gen.go | 286 +- .../api/transport/cert/enterprise_cert.go | 4 +- .../api/transport/grpc/dial.go | 48 +- .../api/transport/grpc/dial_socketopt.go | 6 +- .../transport/http/configure_http2_go116.go | 26 - .../http/configure_http2_not_go116.go | 17 - .../api/transport/http/dial.go | 14 +- .../googleapis/api/annotations/client.pb.go | 1559 +- .../googleapis/api/launch_stage.pb.go | 203 + .../genproto/googleapis/iam/v1/alias.go | 208 + .../genproto/googleapis/rpc/code/code.pb.go | 27 +- .../rpc/errdetails/error_details.pb.go | 374 +- .../googleapis/rpc/status/status.pb.go | 10 +- .../protobuf/encoding/protojson/doc.go | 2 +- .../encoding/protojson/well_known_types.go | 12 +- .../protobuf/encoding/protowire/wire.go | 8 +- .../protobuf/internal/encoding/json/decode.go | 2 +- .../protobuf/internal/encoding/text/decode.go | 5 +- .../internal/encoding/text/decode_number.go | 43 +- .../protobuf/internal/genid/descriptor_gen.go | 90 +- .../protobuf/internal/impl/convert.go | 1 - .../protobuf/internal/strs/strings_unsafe.go | 2 +- .../protobuf/internal/version/version.go | 4 +- .../google.golang.org/protobuf/proto/doc.go | 9 +- .../google.golang.org/protobuf/proto/equal.go | 172 +- .../reflect/protoreflect/source_gen.go | 14 + .../protobuf/reflect/protoreflect/value.go | 2 +- .../reflect/protoreflect/value_equal.go | 168 + .../reflect/protoreflect/value_union.go | 4 +- .../reflect/protoregistry/registry.go | 2 +- .../types/descriptorpb/descriptor.pb.go | 1547 +- .../protobuf/types/known/anypb/any.pb.go | 135 +- .../types/known/durationpb/duration.pb.go | 63 +- .../protobuf/types/known/emptypb/empty.pb.go | 8 +- .../types/known/fieldmaskpb/field_mask.pb.go | 137 +- .../types/known/timestamppb/timestamp.pb.go | 61 +- vendor/gopkg.in/ini.v1/deprecated.go | 5 +- vendor/gopkg.in/ini.v1/ini.go | 8 +- vendor/modules.txt | 339 +- 1383 files changed, 229053 insertions(+), 73506 deletions(-) delete mode 100644 vendor/cloud.google.com/go/.gitignore delete mode 100644 vendor/cloud.google.com/go/.release-please-manifest-submodules.json delete mode 100644 vendor/cloud.google.com/go/.release-please-manifest.json delete mode 100644 vendor/cloud.google.com/go/CHANGES.md delete mode 100644 vendor/cloud.google.com/go/CODE_OF_CONDUCT.md delete mode 100644 vendor/cloud.google.com/go/CONTRIBUTING.md delete mode 100644 vendor/cloud.google.com/go/README.md delete mode 100644 vendor/cloud.google.com/go/RELEASING.md delete mode 100644 vendor/cloud.google.com/go/SECURITY.md rename vendor/{google.golang.org/genproto/protobuf/field_mask/field_mask.go => cloud.google.com/go/compute/internal/version.go} (60%) create mode 100644 vendor/cloud.google.com/go/compute/metadata/CHANGES.md create mode 100644 vendor/cloud.google.com/go/compute/metadata/LICENSE create mode 100644 vendor/cloud.google.com/go/compute/metadata/README.md rename vendor/cloud.google.com/go/{iam/go_mod_tidy_hack.go => compute/metadata/tidyfix.go} (85%) delete mode 100644 vendor/cloud.google.com/go/doc.go rename vendor/{google.golang.org/genproto/googleapis/iam/v1 => cloud.google.com/go/iam/apiv1/iampb}/iam_policy.pb.go (99%) rename vendor/{google.golang.org/genproto/googleapis/iam/v1 => cloud.google.com/go/iam/apiv1/iampb}/options.pb.go (99%) rename vendor/{google.golang.org/genproto/googleapis/iam/v1 => cloud.google.com/go/iam/apiv1/iampb}/policy.pb.go (94%) create mode 100644 vendor/cloud.google.com/go/internal/cloudbuild.yaml delete mode 100644 vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json delete mode 100644 vendor/cloud.google.com/go/release-please-config.json delete mode 100644 vendor/cloud.google.com/go/storage/.release-please-manifest.json delete mode 100644 vendor/cloud.google.com/go/storage/release-please-config.json delete mode 100644 vendor/cloud.google.com/go/testing.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_api_version.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/constants.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/models.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/responses.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/autorest.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/constants.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/responses.go rename vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/{zc_retry_reader.go => blob/retry_reader.go} (57%) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/utils.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go rename vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/{ => blockblob}/chunkwriting.go (82%) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/constants.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/models.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/responses.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/common.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/connection.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/constants.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/responses.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/highlevel.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/access_conditions.go rename vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/{zc_access_policy.go => internal/exported/access_policy.go} (88%) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/exported.go rename vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/{zc_shared_policy_shared_key_credential.go => internal/exported/shared_key_credential.go} (73%) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/user_delegation_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/appendblob_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/blob_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/block_blob_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/container_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/pageblob_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/service_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go rename vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/{zz_generated_blob_client.go => internal/generated/zz_blob_client.go} (52%) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go rename vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/{zz_generated_constants.go => internal/generated/zz_constants.go} (84%) rename vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/{zz_generated_container_client.go => internal/generated/zz_container_client.go} (50%) rename vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/{zz_generated_models.go => internal/generated/zz_models.go} (75%) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go rename vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/{zz_generated_response_types.go => internal/generated/zz_response_types.go} (70%) rename vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/{zz_generated_service_client.go => internal/generated/zz_service_client.go} (62%) rename vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/{zz_generated_time_rfc1123.go => internal/generated/zz_time_rfc1123.go} (96%) rename vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/{zz_generated_time_rfc3339.go => internal/generated/zz_time_rfc3339.go} (97%) rename vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/{zz_generated_xml_helper.go => internal/generated/zz_xml_helper.go} (96%) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go rename vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/{ => internal/shared}/bytes_writer.go (74%) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/section_writer.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go rename vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/{ => internal/shared}/transfer_manager.go (96%) delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/zc_shared.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/migrationguide.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/models.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/constants.go rename vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/{zm_page_blob_client_util.go => pageblob/models.go} (51%) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/responses.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/responses.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/account.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go rename vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/{zc_sas_service.go => sas/service.go} (51%) rename vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/{zc_parsing_urls.go => sas/url_parts.go} (68%) delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/section_writer.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/constants.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/models.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/responses.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/test-resources.json delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_append_blob_client.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_blob_client.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_blob_lease_client.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_block_blob_client.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_connection_string.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_container_client.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_container_lease_client.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_page_blob_client.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_response_error.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_response_helpers.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_account.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_query_params.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_service_client.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_storage_error.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_validators.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_access_conditions.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_append_blob_client_util.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_blob_client_util.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_blob_lease_client_util.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_block_blob_client_util.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_client_util.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_container_client_util.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_container_lease_client_util.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_highlevel_util.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_serialize_and_desearilize_util.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_service_client_util.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_appendblob_client.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_blockblob_client.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_pageblob_client.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_pagers.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/crr/cache.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/crr/endpoint.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/crr/sync_map.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/crr/sync_map_1_8.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home_go1.12.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/customizations.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc_custom.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/dynamodb/waiters.go create mode 100644 vendor/github.com/cespare/xxhash/v2/testall.sh create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s rename vendor/github.com/cespare/xxhash/v2/{xxhash_amd64.go => xxhash_asm.go} (73%) create mode 100644 vendor/github.com/coreos/go-systemd/v22/activation/files_unix.go rename vendor/{go.opentelemetry.io/otel/metric/unit/doc.go => github.com/coreos/go-systemd/v22/activation/files_windows.go} (61%) create mode 100644 vendor/github.com/coreos/go-systemd/v22/activation/listeners.go create mode 100644 vendor/github.com/coreos/go-systemd/v22/activation/packetconns.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/chunk.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/factory.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/instrumentation.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/prometheus_chunk.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/cortexpb/slicesPool.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/ring/kv/codec/clonable.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/ring/kv/codec/multikey.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/ring/kv/dynamodb/client.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/ring/kv/dynamodb/dynamodb.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/ring/kv/dynamodb/metrics.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/ring/util/string_utils.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/redis_client_config.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/util/grpcencoding/snappyblock/snappyblock.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/util/grpcencoding/zstd/zstd.go delete mode 100644 vendor/github.com/dgryski/go-rendezvous/LICENSE delete mode 100644 vendor/github.com/dgryski/go-rendezvous/rdv.go create mode 100644 vendor/github.com/efficientgo/core/COPYRIGHT rename vendor/github.com/efficientgo/{tools => }/core/LICENSE (100%) rename vendor/github.com/efficientgo/{tools/core/pkg => core}/errcapture/do.go (89%) rename vendor/github.com/efficientgo/{tools/core/pkg => core}/errcapture/doc.go (71%) create mode 100644 vendor/github.com/efficientgo/core/errors/doc.go create mode 100644 vendor/github.com/efficientgo/core/errors/errors.go create mode 100644 vendor/github.com/efficientgo/core/errors/stacktrace.go rename vendor/github.com/efficientgo/{tools/core/pkg => core}/logerrcapture/do.go (94%) rename vendor/github.com/efficientgo/{tools/core/pkg => core}/logerrcapture/doc.go (66%) create mode 100644 vendor/github.com/efficientgo/core/merrors/doc.go rename vendor/github.com/efficientgo/{tools/core/pkg/merrors/errors.go => core/merrors/merrors.go} (100%) create mode 100644 vendor/github.com/efficientgo/core/testutil/doc.go create mode 100644 vendor/github.com/efficientgo/core/testutil/internal/difflib.go rename vendor/github.com/efficientgo/{tools/core/pkg => core}/testutil/testorbench.go (100%) create mode 100644 vendor/github.com/efficientgo/core/testutil/testutil.go delete mode 100644 vendor/github.com/efficientgo/tools/core/pkg/merrors/doc.go delete mode 100644 vendor/github.com/efficientgo/tools/core/pkg/testutil/doc.go delete mode 100644 vendor/github.com/efficientgo/tools/core/pkg/testutil/testutil.go create mode 100644 vendor/github.com/efficientgo/tools/extkingpin/LICENSE create mode 100644 vendor/github.com/efficientgo/tools/extkingpin/doc.go create mode 100644 vendor/github.com/efficientgo/tools/extkingpin/pathorcontent.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/AUTHORS create mode 100644 vendor/github.com/fsnotify/fsnotify/backend_fen.go create mode 100644 vendor/github.com/fsnotify/fsnotify/backend_inotify.go create mode 100644 vendor/github.com/fsnotify/fsnotify/backend_kqueue.go create mode 100644 vendor/github.com/fsnotify/fsnotify/backend_other.go create mode 100644 vendor/github.com/fsnotify/fsnotify/backend_windows.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/fen.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/inotify.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/inotify_poller.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/kqueue.go create mode 100644 vendor/github.com/fsnotify/fsnotify/mkdoc.zsh rename vendor/github.com/fsnotify/fsnotify/{open_mode_bsd.go => system_bsd.go} (57%) rename vendor/github.com/fsnotify/fsnotify/{open_mode_darwin.go => system_darwin.go} (52%) delete mode 100644 vendor/github.com/fsnotify/fsnotify/windows.go delete mode 100644 vendor/github.com/go-redis/redis/v8/.gitignore delete mode 100644 vendor/github.com/go-redis/redis/v8/.golangci.yml delete mode 100644 vendor/github.com/go-redis/redis/v8/.prettierrc.yml delete mode 100644 vendor/github.com/go-redis/redis/v8/CHANGELOG.md delete mode 100644 vendor/github.com/go-redis/redis/v8/Makefile delete mode 100644 vendor/github.com/go-redis/redis/v8/README.md delete mode 100644 vendor/github.com/go-redis/redis/v8/RELEASING.md delete mode 100644 vendor/github.com/go-redis/redis/v8/cluster.go delete mode 100644 vendor/github.com/go-redis/redis/v8/cluster_commands.go delete mode 100644 vendor/github.com/go-redis/redis/v8/command.go delete mode 100644 vendor/github.com/go-redis/redis/v8/commands.go delete mode 100644 vendor/github.com/go-redis/redis/v8/doc.go delete mode 100644 vendor/github.com/go-redis/redis/v8/error.go delete mode 100644 vendor/github.com/go-redis/redis/v8/internal/arg.go delete mode 100644 vendor/github.com/go-redis/redis/v8/internal/hashtag/hashtag.go delete mode 100644 vendor/github.com/go-redis/redis/v8/internal/hscan/hscan.go delete mode 100644 vendor/github.com/go-redis/redis/v8/internal/hscan/structmap.go delete mode 100644 vendor/github.com/go-redis/redis/v8/internal/internal.go delete mode 100644 vendor/github.com/go-redis/redis/v8/internal/log.go delete mode 100644 vendor/github.com/go-redis/redis/v8/internal/once.go delete mode 100644 vendor/github.com/go-redis/redis/v8/internal/pool/conn.go delete mode 100644 vendor/github.com/go-redis/redis/v8/internal/pool/pool.go delete mode 100644 vendor/github.com/go-redis/redis/v8/internal/pool/pool_single.go delete mode 100644 vendor/github.com/go-redis/redis/v8/internal/pool/pool_sticky.go delete mode 100644 vendor/github.com/go-redis/redis/v8/internal/proto/reader.go delete mode 100644 vendor/github.com/go-redis/redis/v8/internal/proto/scan.go delete mode 100644 vendor/github.com/go-redis/redis/v8/internal/proto/writer.go delete mode 100644 vendor/github.com/go-redis/redis/v8/internal/rand/rand.go delete mode 100644 vendor/github.com/go-redis/redis/v8/internal/safe.go delete mode 100644 vendor/github.com/go-redis/redis/v8/internal/unsafe.go delete mode 100644 vendor/github.com/go-redis/redis/v8/internal/util.go delete mode 100644 vendor/github.com/go-redis/redis/v8/internal/util/safe.go delete mode 100644 vendor/github.com/go-redis/redis/v8/internal/util/strconv.go delete mode 100644 vendor/github.com/go-redis/redis/v8/internal/util/unsafe.go delete mode 100644 vendor/github.com/go-redis/redis/v8/iterator.go delete mode 100644 vendor/github.com/go-redis/redis/v8/options.go delete mode 100644 vendor/github.com/go-redis/redis/v8/package.json delete mode 100644 vendor/github.com/go-redis/redis/v8/pipeline.go delete mode 100644 vendor/github.com/go-redis/redis/v8/pubsub.go delete mode 100644 vendor/github.com/go-redis/redis/v8/redis.go delete mode 100644 vendor/github.com/go-redis/redis/v8/result.go delete mode 100644 vendor/github.com/go-redis/redis/v8/ring.go delete mode 100644 vendor/github.com/go-redis/redis/v8/script.go delete mode 100644 vendor/github.com/go-redis/redis/v8/sentinel.go delete mode 100644 vendor/github.com/go-redis/redis/v8/tx.go delete mode 100644 vendor/github.com/go-redis/redis/v8/universal.go delete mode 100644 vendor/github.com/go-redis/redis/v8/version.go delete mode 100644 vendor/github.com/golang-jwt/jwt/.travis.yml delete mode 100644 vendor/github.com/golang-jwt/jwt/MIGRATION_GUIDE.md delete mode 100644 vendor/github.com/golang-jwt/jwt/claims.go delete mode 100644 vendor/github.com/golang-jwt/jwt/errors.go delete mode 100644 vendor/github.com/golang-jwt/jwt/token.go rename vendor/github.com/golang-jwt/jwt/{ => v4}/.gitignore (100%) rename vendor/github.com/golang-jwt/jwt/{ => v4}/LICENSE (100%) create mode 100644 vendor/github.com/golang-jwt/jwt/v4/MIGRATION_GUIDE.md rename vendor/github.com/golang-jwt/jwt/{ => v4}/README.md (61%) create mode 100644 vendor/github.com/golang-jwt/jwt/v4/SECURITY.md rename vendor/github.com/golang-jwt/jwt/{ => v4}/VERSION_HISTORY.md (89%) create mode 100644 vendor/github.com/golang-jwt/jwt/v4/claims.go rename vendor/github.com/golang-jwt/jwt/{ => v4}/doc.go (100%) rename vendor/github.com/golang-jwt/jwt/{ => v4}/ecdsa.go (81%) rename vendor/github.com/golang-jwt/jwt/{ => v4}/ecdsa_utils.go (81%) create mode 100644 vendor/github.com/golang-jwt/jwt/v4/ed25519.go create mode 100644 vendor/github.com/golang-jwt/jwt/v4/ed25519_utils.go create mode 100644 vendor/github.com/golang-jwt/jwt/v4/errors.go rename vendor/github.com/golang-jwt/jwt/{ => v4}/hmac.go (90%) rename vendor/github.com/golang-jwt/jwt/{ => v4}/map_claims.go (50%) rename vendor/github.com/golang-jwt/jwt/{ => v4}/none.go (94%) rename vendor/github.com/golang-jwt/jwt/{ => v4}/parser.go (70%) create mode 100644 vendor/github.com/golang-jwt/jwt/v4/parser_option.go rename vendor/github.com/golang-jwt/jwt/{ => v4}/rsa.go (92%) rename vendor/github.com/golang-jwt/jwt/{ => v4}/rsa_pss.go (93%) rename vendor/github.com/golang-jwt/jwt/{ => v4}/rsa_utils.go (72%) rename vendor/github.com/golang-jwt/jwt/{ => v4}/signing_method.go (66%) create mode 100644 vendor/github.com/golang-jwt/jwt/v4/staticcheck.conf create mode 100644 vendor/github.com/golang-jwt/jwt/v4/token.go create mode 100644 vendor/github.com/golang-jwt/jwt/v4/types.go create mode 100644 vendor/github.com/googleapis/gax-go/v2/content_type.go create mode 100644 vendor/github.com/hashicorp/consul/api/config_entry_inline_certificate.go create mode 100644 vendor/github.com/hashicorp/consul/api/config_entry_routes.go create mode 100644 vendor/github.com/hashicorp/consul/api/config_entry_status.go create mode 100644 vendor/github.com/hashicorp/consul/api/operator_usage.go create mode 100644 vendor/github.com/klauspost/compress/.gitattributes create mode 100644 vendor/github.com/klauspost/compress/.gitignore create mode 100644 vendor/github.com/klauspost/compress/.goreleaser.yml create mode 100644 vendor/github.com/klauspost/compress/README.md create mode 100644 vendor/github.com/klauspost/compress/compressible.go create mode 100644 vendor/github.com/klauspost/compress/fse/README.md create mode 100644 vendor/github.com/klauspost/compress/fse/bitreader.go create mode 100644 vendor/github.com/klauspost/compress/fse/bitwriter.go create mode 100644 vendor/github.com/klauspost/compress/fse/bytereader.go create mode 100644 vendor/github.com/klauspost/compress/fse/compress.go create mode 100644 vendor/github.com/klauspost/compress/fse/decompress.go create mode 100644 vendor/github.com/klauspost/compress/fse/fse.go create mode 100644 vendor/github.com/klauspost/compress/gen.sh create mode 100644 vendor/github.com/klauspost/compress/huff0/.gitignore create mode 100644 vendor/github.com/klauspost/compress/huff0/README.md create mode 100644 vendor/github.com/klauspost/compress/huff0/bitreader.go create mode 100644 vendor/github.com/klauspost/compress/huff0/bitwriter.go create mode 100644 vendor/github.com/klauspost/compress/huff0/bytereader.go create mode 100644 vendor/github.com/klauspost/compress/huff0/compress.go create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress.go create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_amd64.go create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_amd64.s create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_generic.go create mode 100644 vendor/github.com/klauspost/compress/huff0/huff0.go create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/LICENSE create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/decode.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/decode_other.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/encode.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/encode_other.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/snappy.go create mode 100644 vendor/github.com/klauspost/compress/s2/dict.go create mode 100644 vendor/github.com/klauspost/compress/s2/lz4convert.go create mode 100644 vendor/github.com/klauspost/compress/s2/lz4sconvert.go create mode 100644 vendor/github.com/klauspost/compress/s2sx.mod create mode 100644 vendor/github.com/klauspost/compress/s2sx.sum create mode 100644 vendor/github.com/klauspost/compress/zstd/README.md create mode 100644 vendor/github.com/klauspost/compress/zstd/bitreader.go create mode 100644 vendor/github.com/klauspost/compress/zstd/bitwriter.go create mode 100644 vendor/github.com/klauspost/compress/zstd/blockdec.go create mode 100644 vendor/github.com/klauspost/compress/zstd/blockenc.go create mode 100644 vendor/github.com/klauspost/compress/zstd/blocktype_string.go create mode 100644 vendor/github.com/klauspost/compress/zstd/bytebuf.go create mode 100644 vendor/github.com/klauspost/compress/zstd/bytereader.go create mode 100644 vendor/github.com/klauspost/compress/zstd/decodeheader.go create mode 100644 vendor/github.com/klauspost/compress/zstd/decoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/decoder_options.go create mode 100644 vendor/github.com/klauspost/compress/zstd/dict.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_base.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_best.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_better.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_dfast.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_fast.go create mode 100644 vendor/github.com/klauspost/compress/zstd/encoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/encoder_options.go create mode 100644 vendor/github.com/klauspost/compress/zstd/framedec.go create mode 100644 vendor/github.com/klauspost/compress/zstd/frameenc.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_encoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_predefined.go create mode 100644 vendor/github.com/klauspost/compress/zstd/hash.go create mode 100644 vendor/github.com/klauspost/compress/zstd/history.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_generic.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqenc.go create mode 100644 vendor/github.com/klauspost/compress/zstd/snappy.go create mode 100644 vendor/github.com/klauspost/compress/zstd/zip.go create mode 100644 vendor/github.com/klauspost/compress/zstd/zstd.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go create mode 100644 vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go create mode 100644 vendor/github.com/oklog/run/.gitignore create mode 100644 vendor/github.com/oklog/run/LICENSE create mode 100644 vendor/github.com/oklog/run/README.md create mode 100644 vendor/github.com/oklog/run/actors.go create mode 100644 vendor/github.com/oklog/run/group.go create mode 100644 vendor/github.com/pkg/browser/browser_netbsd.go delete mode 100644 vendor/github.com/pkg/browser/zbrowser_windows.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go create mode 100644 vendor/github.com/prometheus/common/model/value_float.go create mode 100644 vendor/github.com/prometheus/common/model/value_histogram.go create mode 100644 vendor/github.com/prometheus/common/model/value_type.go create mode 100644 vendor/github.com/prometheus/common/version/info_default.go create mode 100644 vendor/github.com/prometheus/common/version/info_go118.go create mode 100644 vendor/github.com/prometheus/exporter-toolkit/web/landing_page.css create mode 100644 vendor/github.com/prometheus/exporter-toolkit/web/landing_page.go create mode 100644 vendor/github.com/prometheus/exporter-toolkit/web/landing_page.html create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_loong64.go create mode 100644 vendor/github.com/prometheus/procfs/proc_interrupts.go create mode 100644 vendor/github.com/prometheus/procfs/thread.go create mode 100644 vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go create mode 100644 vendor/github.com/prometheus/prometheus/model/histogram/generic.go create mode 100644 vendor/github.com/prometheus/prometheus/model/histogram/histogram.go create mode 100644 vendor/github.com/prometheus/prometheus/model/labels/labels_string.go create mode 100644 vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go create mode 100644 vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.pb.go create mode 100644 vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.proto create mode 100644 vendor/github.com/prometheus/prometheus/tsdb/chunkenc/float_histogram.go create mode 100644 vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go create mode 100644 vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram_meta.go create mode 100644 vendor/github.com/prometheus/prometheus/tsdb/chunkenc/varbit.go create mode 100644 vendor/github.com/prometheus/prometheus/tsdb/test.txt create mode 100644 vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/histogram.go rename vendor/github.com/prometheus/prometheus/tsdb/{wal => wlog}/checkpoint.go (87%) rename vendor/github.com/prometheus/prometheus/tsdb/{wal => wlog}/live_reader.go (99%) rename vendor/github.com/prometheus/prometheus/tsdb/{wal => wlog}/reader.go (99%) rename vendor/github.com/prometheus/prometheus/tsdb/{wal => wlog}/watcher.go (87%) rename vendor/github.com/prometheus/prometheus/tsdb/{wal/wal.go => wlog/wlog.go} (91%) create mode 100644 vendor/github.com/rueian/rueidis/.gitignore create mode 100644 vendor/github.com/rueian/rueidis/.goreleaser.yaml create mode 100644 vendor/github.com/rueian/rueidis/LICENSE create mode 100644 vendor/github.com/rueian/rueidis/NOTICE create mode 100644 vendor/github.com/rueian/rueidis/README.md create mode 100644 vendor/github.com/rueian/rueidis/binary.go create mode 100644 vendor/github.com/rueian/rueidis/client.go create mode 100644 vendor/github.com/rueian/rueidis/cluster.go create mode 100644 vendor/github.com/rueian/rueidis/cmds.go create mode 100644 vendor/github.com/rueian/rueidis/codecov.yml create mode 100644 vendor/github.com/rueian/rueidis/docker-compose.yml create mode 100644 vendor/github.com/rueian/rueidis/dockertest.sh create mode 100644 vendor/github.com/rueian/rueidis/helper.go create mode 100644 vendor/github.com/rueian/rueidis/internal/cmds/builder.go create mode 100644 vendor/github.com/rueian/rueidis/internal/cmds/cmds.go create mode 100644 vendor/github.com/rueian/rueidis/internal/cmds/gen.go create mode 100644 vendor/github.com/rueian/rueidis/internal/cmds/slot.go create mode 100644 vendor/github.com/rueian/rueidis/internal/util/parallel.go create mode 100644 vendor/github.com/rueian/rueidis/lru.go create mode 100644 vendor/github.com/rueian/rueidis/lua.go create mode 100644 vendor/github.com/rueian/rueidis/message.go create mode 100644 vendor/github.com/rueian/rueidis/mux.go create mode 100644 vendor/github.com/rueian/rueidis/pipe.go create mode 100644 vendor/github.com/rueian/rueidis/pool.go create mode 100644 vendor/github.com/rueian/rueidis/pubsub.go create mode 100644 vendor/github.com/rueian/rueidis/resp.go create mode 100644 vendor/github.com/rueian/rueidis/ring.go create mode 100644 vendor/github.com/rueian/rueidis/rueidis.go create mode 100644 vendor/github.com/rueian/rueidis/sentinel.go create mode 100644 vendor/github.com/rueian/rueidis/singleflight.go create mode 100644 vendor/github.com/thanos-io/thanos/pkg/extkingpin/app.go create mode 100644 vendor/github.com/thanos-io/thanos/pkg/extkingpin/flags.go create mode 100644 vendor/github.com/thanos-io/thanos/pkg/extkingpin/path_content_reloader.go create mode 100644 vendor/github.com/thanos-io/thanos/pkg/store/recover.go delete mode 100644 vendor/github.com/thanos-io/thanos/pkg/testutil/testorbench.go delete mode 100644 vendor/github.com/thanos-io/thanos/pkg/testutil/testutil.go create mode 100644 vendor/github.com/weaveworks/common/httpgrpc/tools.go create mode 100644 vendor/github.com/weaveworks/common/server/tls_config.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/filereader.go create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_solaris.go create mode 100644 vendor/go.opentelemetry.io/otel/bridge/opentracing/provider.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64/asyncfloat64.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64/asyncint64.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument/config.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64/syncfloat64.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument/syncint64.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument/syncint64/syncint64.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/internal/http.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/internal/v2/http.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/internal/v2/net.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.12.0/http.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.12.0/resource.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.12.0/trace.go rename vendor/go.opentelemetry.io/otel/semconv/{v1.12.0 => v1.17.0}/doc.go (92%) create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go rename vendor/go.opentelemetry.io/otel/semconv/{v1.12.0 => v1.17.0}/exception.go (99%) rename vendor/go.opentelemetry.io/otel/{metric/unit/unit.go => semconv/v1.17.0/http.go} (69%) create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv/http.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go rename vendor/go.opentelemetry.io/otel/semconv/{v1.12.0 => v1.17.0}/schema.go (93%) create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go create mode 100644 vendor/go.uber.org/goleak/internal/stack/doc.go create mode 100644 vendor/go4.org/intern/LICENSE create mode 100644 vendor/go4.org/intern/README.md create mode 100644 vendor/go4.org/intern/intern.go create mode 100644 vendor/go4.org/unsafe/assume-no-moving-gc/LICENSE create mode 100644 vendor/go4.org/unsafe/assume-no-moving-gc/README.md create mode 100644 vendor/go4.org/unsafe/assume-no-moving-gc/assume-no-moving-gc.go create mode 100644 vendor/go4.org/unsafe/assume-no-moving-gc/untested.go rename vendor/{github.com/go-redis/redis/v8 => golang.org/x/exp}/LICENSE (83%) create mode 100644 vendor/golang.org/x/exp/PATENTS create mode 100644 vendor/golang.org/x/exp/constraints/constraints.go create mode 100644 vendor/golang.org/x/exp/slices/slices.go create mode 100644 vendor/golang.org/x/exp/slices/sort.go create mode 100644 vendor/golang.org/x/exp/slices/zsortfunc.go create mode 100644 vendor/golang.org/x/exp/slices/zsortordered.go delete mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go create mode 100644 vendor/golang.org/x/net/http/httpproxy/proxy.go create mode 100644 vendor/golang.org/x/net/http2/hpack/static_table.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_openbsd_ppc64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_openbsd_riscv64.go create mode 100644 vendor/golang.org/x/net/publicsuffix/data/children create mode 100644 vendor/golang.org/x/net/publicsuffix/data/nodes create mode 100644 vendor/golang.org/x/net/publicsuffix/data/text create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go create mode 100644 vendor/golang.org/x/sys/cpu/endian_big.go create mode 100644 vendor/golang.org/x/sys/cpu/endian_little.go create mode 100644 vendor/golang.org/x/sys/cpu/parse.go create mode 100644 vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go create mode 100644 vendor/golang.org/x/sys/cpu/runtime_auxv.go create mode 100644 vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go create mode 100644 vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s create mode 100644 vendor/golang.org/x/sys/unix/syscall_hurd.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_hurd_386.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_illumos_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go delete mode 100644 vendor/golang.org/x/text/AUTHORS delete mode 100644 vendor/golang.org/x/text/CONTRIBUTORS create mode 100644 vendor/golang.org/x/text/cases/cases.go create mode 100644 vendor/golang.org/x/text/cases/context.go create mode 100644 vendor/golang.org/x/text/cases/fold.go create mode 100644 vendor/golang.org/x/text/cases/icu.go create mode 100644 vendor/golang.org/x/text/cases/info.go create mode 100644 vendor/golang.org/x/text/cases/map.go create mode 100644 vendor/golang.org/x/text/cases/tables10.0.0.go create mode 100644 vendor/golang.org/x/text/cases/tables11.0.0.go create mode 100644 vendor/golang.org/x/text/cases/tables12.0.0.go create mode 100644 vendor/golang.org/x/text/cases/tables13.0.0.go create mode 100644 vendor/golang.org/x/text/cases/tables9.0.0.go create mode 100644 vendor/golang.org/x/text/cases/trieval.go create mode 100644 vendor/golang.org/x/text/internal/internal.go create mode 100644 vendor/golang.org/x/text/internal/language/common.go create mode 100644 vendor/golang.org/x/text/internal/language/compact.go create mode 100644 vendor/golang.org/x/text/internal/language/compact/compact.go create mode 100644 vendor/golang.org/x/text/internal/language/compact/language.go create mode 100644 vendor/golang.org/x/text/internal/language/compact/parents.go create mode 100644 vendor/golang.org/x/text/internal/language/compact/tables.go create mode 100644 vendor/golang.org/x/text/internal/language/compact/tags.go create mode 100644 vendor/golang.org/x/text/internal/language/compose.go create mode 100644 vendor/golang.org/x/text/internal/language/coverage.go create mode 100644 vendor/golang.org/x/text/internal/language/language.go create mode 100644 vendor/golang.org/x/text/internal/language/lookup.go create mode 100644 vendor/golang.org/x/text/internal/language/match.go create mode 100644 vendor/golang.org/x/text/internal/language/parse.go create mode 100644 vendor/golang.org/x/text/internal/language/tables.go create mode 100644 vendor/golang.org/x/text/internal/language/tags.go create mode 100644 vendor/golang.org/x/text/internal/match.go create mode 100644 vendor/golang.org/x/text/internal/tag/tag.go create mode 100644 vendor/golang.org/x/text/language/coverage.go create mode 100644 vendor/golang.org/x/text/language/doc.go create mode 100644 vendor/golang.org/x/text/language/language.go create mode 100644 vendor/golang.org/x/text/language/match.go create mode 100644 vendor/golang.org/x/text/language/parse.go create mode 100644 vendor/golang.org/x/text/language/tables.go create mode 100644 vendor/golang.org/x/text/language/tags.go create mode 100644 vendor/golang.org/x/time/rate/sometimes.go delete mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go create mode 100644 vendor/golang.org/x/tools/go/types/objectpath/objectpath.go rename vendor/golang.org/x/tools/{go => }/internal/gcimporter/bexport.go (99%) rename vendor/golang.org/x/tools/{go => }/internal/gcimporter/bimport.go (100%) rename vendor/golang.org/x/tools/{go => }/internal/gcimporter/exportdata.go (100%) create mode 100644 vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go rename vendor/golang.org/x/tools/{go => }/internal/gcimporter/iexport.go (78%) rename vendor/golang.org/x/tools/{go => }/internal/gcimporter/iimport.go (86%) rename vendor/golang.org/x/tools/{go => }/internal/gcimporter/newInterface10.go (100%) rename vendor/golang.org/x/tools/{go => }/internal/gcimporter/newInterface11.go (100%) rename vendor/golang.org/x/tools/{go => }/internal/gcimporter/support_go117.go (100%) rename vendor/golang.org/x/tools/{go => }/internal/gcimporter/support_go118.go (62%) rename vendor/golang.org/x/tools/{go => }/internal/gcimporter/unified_no.go (100%) rename vendor/golang.org/x/tools/{go => }/internal/gcimporter/unified_yes.go (100%) rename vendor/golang.org/x/tools/{go => }/internal/gcimporter/ureader_no.go (100%) rename vendor/golang.org/x/tools/{go => }/internal/gcimporter/ureader_yes.go (71%) rename vendor/golang.org/x/tools/{go => }/internal/pkgbits/codes.go (100%) rename vendor/golang.org/x/tools/{go => }/internal/pkgbits/decoder.go (83%) rename vendor/golang.org/x/tools/{go => }/internal/pkgbits/doc.go (100%) rename vendor/golang.org/x/tools/{go => }/internal/pkgbits/encoder.go (95%) rename vendor/golang.org/x/tools/{go => }/internal/pkgbits/flags.go (100%) rename vendor/golang.org/x/tools/{go => }/internal/pkgbits/frames_go1.go (100%) rename vendor/golang.org/x/tools/{go => }/internal/pkgbits/frames_go17.go (100%) rename vendor/golang.org/x/tools/{go => }/internal/pkgbits/reloc.go (95%) rename vendor/golang.org/x/tools/{go => }/internal/pkgbits/support.go (100%) rename vendor/golang.org/x/tools/{go => }/internal/pkgbits/sync.go (100%) rename vendor/golang.org/x/tools/{go => }/internal/pkgbits/syncmarker_string.go (100%) create mode 100644 vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go create mode 100644 vendor/google.golang.org/api/internal/gensupport/error.go delete mode 100644 vendor/google.golang.org/api/transport/http/configure_http2_go116.go delete mode 100644 vendor/google.golang.org/api/transport/http/configure_http2_not_go116.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/iam/v1/alias.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go diff --git a/vendor/cloud.google.com/go/.gitignore b/vendor/cloud.google.com/go/.gitignore deleted file mode 100644 index cc7e53b46..000000000 --- a/vendor/cloud.google.com/go/.gitignore +++ /dev/null @@ -1,12 +0,0 @@ -# Editors -.idea -.vscode -*.swp -.history - -# Test files -*.test -coverage.txt - -# Other -.DS_Store diff --git a/vendor/cloud.google.com/go/.release-please-manifest-submodules.json b/vendor/cloud.google.com/go/.release-please-manifest-submodules.json deleted file mode 100644 index e33930b3c..000000000 --- a/vendor/cloud.google.com/go/.release-please-manifest-submodules.json +++ /dev/null @@ -1,111 +0,0 @@ -{ - "accessapproval": "1.3.0", - "accesscontextmanager": "1.2.0", - "aiplatform": "1.17.0", - "analytics": "0.9.0", - "apigateway": "1.2.0", - "apigeeconnect": "1.2.0", - "apigeeregistry": "0.2.0", - "apikeys": "0.1.0", - "appengine": "1.3.0", - "area120": "0.4.0", - "artifactregistry": "1.4.0", - "asset": "1.4.0", - "assuredworkloads": "1.2.0", - "automl": "1.4.0", - "baremetalsolution": "0.2.0", - "batch": "0.1.0", - "beyondcorp": "0.1.0", - "billing": "1.2.0", - "binaryauthorization": "1.0.0", - "certificatemanager": "0.2.1", - "channel": "1.7.0", - "cloudbuild": "1.2.0", - "clouddms": "1.2.0", - "cloudtasks": "1.4.0", - "compute": "1.9.0", - "contactcenterinsights": "1.2.3", - "container": "1.3.1", - "containeranalysis": "0.4.0", - "datacatalog": "1.3.1", - "dataflow": "0.5.1", - "dataform": "0.2.0", - "datafusion": "1.3.0", - "datalabeling": "0.3.0", - "dataplex": "1.1.0", - "dataproc": "1.5.0", - "dataqna": "0.4.0", - "datastream": "1.0.0", - "deploy": "1.2.1", - "dialogflow": "1.12.1", - "dlp": "1.4.0", - "documentai": "1.5.0", - "domains": "0.5.0", - "essentialcontacts": "1.2.0", - "eventarc": "1.6.0", - "filestore": "1.2.0", - "functions": "1.5.0", - "gaming": "1.3.1", - "gkebackup": "0.1.0", - "gkeconnect": "0.3.0", - "gkehub": "0.8.0", - "gkemulticloud": "0.2.0", - "grafeas": "0.2.0", - "gsuiteaddons": "1.2.0", - "iam": "0.3.0", - "iap": "1.3.0", - "ids": "1.0.0", - "iot": "1.2.0", - "kms": "1.4.0", - "language": "1.3.0", - "lifesciences": "0.4.0", - "managedidentities": "1.2.0", - "mediatranslation": "0.3.0", - "memcache": "1.3.0", - "metastore": "1.3.0", - "monitoring": "1.6.0", - "networkconnectivity": "1.2.0", - "networkmanagement": "1.3.0", - "networksecurity": "0.3.1", - "notebooks": "1.0.0", - "optimization": "1.0.0", - "orchestration": "1.2.0", - "orgpolicy": "1.3.0", - "osconfig": "1.6.0", - "oslogin": "1.3.0", - "phishingprotection": "0.4.0", - "policytroubleshooter": "1.2.0", - "privatecatalog": "0.4.0", - "recaptchaenterprise/v2": "2.0.1", - "recommendationengine": "0.3.0", - "recommender": "1.4.0", - "redis": "1.6.0", - "resourcemanager": "1.2.0", - "resourcesettings": "1.2.0", - "retail": "1.5.0", - "run": "0.1.1", - "scheduler": "1.3.0", - "secretmanager": "1.5.0", - "security": "1.4.1", - "securitycenter": "1.10.0", - "servicecontrol": "1.3.0", - "servicedirectory": "1.3.0", - "servicemanagement": "1.3.1", - "serviceusage": "1.2.0", - "shell": "1.2.0", - "speech": "1.5.0", - "storagetransfer": "1.3.0", - "talent": "1.0.0", - "texttospeech": "1.3.0", - "tpu": "1.2.0", - "trace": "1.2.0", - "translate": "1.2.0", - "video": "1.7.0", - "videointelligence": "1.4.0", - "vision/v2": "2.1.0", - "vmmigration": "1.1.0", - "vpcaccess": "1.2.0", - "webrisk": "1.3.0", - "websecurityscanner": "1.2.0", - "workflows": "1.5.0" -} diff --git a/vendor/cloud.google.com/go/.release-please-manifest.json b/vendor/cloud.google.com/go/.release-please-manifest.json deleted file mode 100644 index d9c938906..000000000 --- a/vendor/cloud.google.com/go/.release-please-manifest.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - ".": "0.104.0" -} diff --git a/vendor/cloud.google.com/go/CHANGES.md b/vendor/cloud.google.com/go/CHANGES.md deleted file mode 100644 index 8d00d2e95..000000000 --- a/vendor/cloud.google.com/go/CHANGES.md +++ /dev/null @@ -1,2424 +0,0 @@ -# Changes - -## [0.104.0](https://github.com/googleapis/google-cloud-go/compare/v0.103.0...v0.104.0) (2022-08-24) - - -### Features - -* **godocfx:** add friendlyAPIName ([#6447](https://github.com/googleapis/google-cloud-go/issues/6447)) ([c6d3ba4](https://github.com/googleapis/google-cloud-go/commit/c6d3ba401b7b3ae9b710a8850c6ec5d49c4c1490)) - -## [0.103.0](https://github.com/googleapis/google-cloud-go/compare/v0.102.1...v0.103.0) (2022-06-29) - - -### Features - -* **privateca:** temporarily remove REGAPIC support ([199b725](https://github.com/googleapis/google-cloud-go/commit/199b7250f474b1a6f53dcf0aac0c2966f4987b68)) - -## [0.102.1](https://github.com/googleapis/google-cloud-go/compare/v0.102.0...v0.102.1) (2022-06-17) - - -### Bug Fixes - -* **longrunning:** regapic remove path params duped as query params ([#6183](https://github.com/googleapis/google-cloud-go/issues/6183)) ([c963be3](https://github.com/googleapis/google-cloud-go/commit/c963be301f074779e6bb8c897d8064fa076e9e35)) - -## [0.102.0](https://github.com/googleapis/google-cloud-go/compare/v0.101.1...v0.102.0) (2022-05-24) - - -### Features - -* **civil:** add Before and After methods to civil.Time ([#5703](https://github.com/googleapis/google-cloud-go/issues/5703)) ([7acaaaf](https://github.com/googleapis/google-cloud-go/commit/7acaaafef47668c3e8382b8bc03475598c3db187)) - -### [0.101.1](https://github.com/googleapis/google-cloud-go/compare/v0.101.0...v0.101.1) (2022-05-03) - - -### Bug Fixes - -* **internal/gapicgen:** properly update modules that have no gapic changes ([#5945](https://github.com/googleapis/google-cloud-go/issues/5945)) ([de2befc](https://github.com/googleapis/google-cloud-go/commit/de2befcaa2a886499db9da6d4d04d28398c8d44b)) - -## [0.101.0](https://github.com/googleapis/google-cloud-go/compare/v0.100.2...v0.101.0) (2022-04-20) - - -### Features - -* **all:** bump grpc dep ([#5481](https://github.com/googleapis/google-cloud-go/issues/5481)) ([b12964d](https://github.com/googleapis/google-cloud-go/commit/b12964df5c63c647aaf204e73cfcdfd379d19682)) -* **internal/gapicgen:** change versionClient for gapics ([#5687](https://github.com/googleapis/google-cloud-go/issues/5687)) ([55f0d92](https://github.com/googleapis/google-cloud-go/commit/55f0d92bf112f14b024b4ab0076c9875a17423c9)) - - -### Bug Fixes - -* **internal/gapicgen:** add generation of internal/version.go for new client modules ([#5726](https://github.com/googleapis/google-cloud-go/issues/5726)) ([341e0df](https://github.com/googleapis/google-cloud-go/commit/341e0df1e44480706180cc5b07c49b3cee904095)) -* **internal/gapicgen:** don't gen version files for longrunning and debugger ([#5698](https://github.com/googleapis/google-cloud-go/issues/5698)) ([3a81108](https://github.com/googleapis/google-cloud-go/commit/3a81108c74cd8864c56b8ab5939afd864db3c64b)) -* **internal/gapicgen:** don't try to make snippets for non-gapics ([#5919](https://github.com/googleapis/google-cloud-go/issues/5919)) ([c94dddc](https://github.com/googleapis/google-cloud-go/commit/c94dddc60ef83a0584ba8f7dd24589d9db971672)) -* **internal/gapicgen:** move breaking change indicator if present ([#5452](https://github.com/googleapis/google-cloud-go/issues/5452)) ([e712df5](https://github.com/googleapis/google-cloud-go/commit/e712df5ebb45598a1653081d7e11e578bad22ff8)) -* **internal/godocfx:** prevent errors for filtered mods ([#5485](https://github.com/googleapis/google-cloud-go/issues/5485)) ([6cb9b89](https://github.com/googleapis/google-cloud-go/commit/6cb9b89b2d654c695eab00d8fb375cce0cd6e059)) - -## [0.100.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.99.0...v0.100.0) (2022-01-04) - - -### Features - -* **analytics/admin:** add the `AcknowledgeUserDataCollection` operation which acknowledges the terms of user data collection for the specified property feat: add the new resource type `DataStream`, which is planned to eventually replace `WebDataStream`, `IosAppDataStream`, `AndroidAppDataStream` resources fix!: remove `GetEnhancedMeasurementSettings`, `UpdateEnhancedMeasurementSettingsRequest`, `UpdateEnhancedMeasurementSettingsRequest` operations from the API feat: add `CreateDataStream`, `DeleteDataStream`, `UpdateDataStream`, `ListDataStreams` operations to support the new `DataStream` resource feat: add `DISPLAY_VIDEO_360_ADVERTISER_LINK`, `DISPLAY_VIDEO_360_ADVERTISER_LINK_PROPOSAL` fields to `ChangeHistoryResourceType` enum feat: add the `account` field to the `Property` type docs: update the documentation with a new list of valid values for `UserLink.direct_roles` field ([5444809](https://www.github.com/googleapis/google-cloud-go/commit/5444809e0b7cf9f5416645ea2df6fec96f8b9023)) -* **assuredworkloads:** EU Regions and Support With Sovereign Controls ([5444809](https://www.github.com/googleapis/google-cloud-go/commit/5444809e0b7cf9f5416645ea2df6fec96f8b9023)) -* **dialogflow/cx:** added the display name of the current page in webhook requests ([e0833b2](https://www.github.com/googleapis/google-cloud-go/commit/e0833b2853834ba79fd20ca2ae9c613d585dd2a5)) -* **dialogflow/cx:** added the display name of the current page in webhook requests ([e0833b2](https://www.github.com/googleapis/google-cloud-go/commit/e0833b2853834ba79fd20ca2ae9c613d585dd2a5)) -* **dialogflow:** added export documentation method feat: added filter in list documentations request feat: added option to import custom metadata from Google Cloud Storage in reload document request feat: added option to apply partial update to the smart messaging allowlist in reload document request feat: added filter in list knowledge bases request ([5444809](https://www.github.com/googleapis/google-cloud-go/commit/5444809e0b7cf9f5416645ea2df6fec96f8b9023)) -* **dialogflow:** removed OPTIONAL for speech model variant docs: added more docs for speech model variant and improved docs format for participant ([5444809](https://www.github.com/googleapis/google-cloud-go/commit/5444809e0b7cf9f5416645ea2df6fec96f8b9023)) -* **recaptchaenterprise:** add new reCAPTCHA Enterprise fraud annotations ([3dd34a2](https://www.github.com/googleapis/google-cloud-go/commit/3dd34a262edbff63b9aece8faddc2ff0d98ce42a)) - - -### Bug Fixes - -* **artifactregistry:** fix resource pattern ID segment name ([5444809](https://www.github.com/googleapis/google-cloud-go/commit/5444809e0b7cf9f5416645ea2df6fec96f8b9023)) -* **compute:** add parameter in compute bazel rules ([#692](https://www.github.com/googleapis/google-cloud-go/issues/692)) ([5444809](https://www.github.com/googleapis/google-cloud-go/commit/5444809e0b7cf9f5416645ea2df6fec96f8b9023)) -* **profiler:** refine regular expression for parsing backoff duration in E2E tests ([#5229](https://www.github.com/googleapis/google-cloud-go/issues/5229)) ([4438aeb](https://www.github.com/googleapis/google-cloud-go/commit/4438aebca2ec01d4dbf22287aa651937a381e043)) -* **profiler:** remove certificate expiration workaround ([#5222](https://www.github.com/googleapis/google-cloud-go/issues/5222)) ([2da36c9](https://www.github.com/googleapis/google-cloud-go/commit/2da36c95f44d5f88fd93cd949ab78823cea74fe7)) - -## [0.99.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.98.0...v0.99.0) (2021-12-06) - - -### Features - -* **dialogflow/cx:** added `TelephonyTransferCall` in response message ([fe27098](https://www.github.com/googleapis/google-cloud-go/commit/fe27098e5d429911428821ded57384353e699774)) - -## [0.98.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.97.0...v0.98.0) (2021-12-03) - - -### Features - -* **aiplatform:** add enable_private_service_connect field to Endpoint feat: add id field to DeployedModel feat: add service_attachment field to PrivateEndpoints feat: add endpoint_id to CreateEndpointRequest and method signature to CreateEndpoint feat: add method signature to CreateFeatureStore, CreateEntityType, CreateFeature feat: add network and enable_private_service_connect to IndexEndpoint feat: add service_attachment to IndexPrivateEndpoints feat: add stratified_split field to training_pipeline InputDataConfig ([a2c0bef](https://www.github.com/googleapis/google-cloud-go/commit/a2c0bef551489c9f1d0d12b973d3bf095354841e)) -* **aiplatform:** add featurestore service to aiplatform v1 feat: add metadata service to aiplatform v1 ([30794e7](https://www.github.com/googleapis/google-cloud-go/commit/30794e70050b55ff87d6a80d0b4075065e9d271d)) -* **aiplatform:** Adds support for `google.protobuf.Value` pipeline parameters in the `parameter_values` field ([88a1cdb](https://www.github.com/googleapis/google-cloud-go/commit/88a1cdbef3cc337354a61bc9276725bfb9a686d8)) -* **aiplatform:** Tensorboard v1 protos release feat:Exposing a field for v1 CustomJob-Tensorboard integration. ([90e2868](https://www.github.com/googleapis/google-cloud-go/commit/90e2868a3d220aa7f897438f4917013fda7a7c59)) -* **binaryauthorization:** add new admission rule types to Policy feat: update SignatureAlgorithm enum to match algorithm names in KMS feat: add SystemPolicyV1Beta1 service ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5)) -* **channel:** add resource type to ChannelPartnerLink ([c206948](https://www.github.com/googleapis/google-cloud-go/commit/c2069487f6af5bcb37d519afeb60e312e35e67d5)) -* **cloudtasks:** add C++ rules for Cloud Tasks ([90e2868](https://www.github.com/googleapis/google-cloud-go/commit/90e2868a3d220aa7f897438f4917013fda7a7c59)) -* **compute:** Move compute.v1 from googleapis-discovery to googleapis ([#675](https://www.github.com/googleapis/google-cloud-go/issues/675)) ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5)) -* **compute:** Switch to string enums for compute ([#685](https://www.github.com/googleapis/google-cloud-go/issues/685)) ([c8271d4](https://www.github.com/googleapis/google-cloud-go/commit/c8271d4b217a6e6924d9f87eac9468c4b5767ba7)) -* **contactcenterinsights:** Add ability to update phrase matchers feat: Add issue model stats to time series feat: Add display name to issue model stats ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5)) -* **contactcenterinsights:** Add WriteDisposition to BigQuery Export API ([a2c0bef](https://www.github.com/googleapis/google-cloud-go/commit/a2c0bef551489c9f1d0d12b973d3bf095354841e)) -* **contactcenterinsights:** deprecate issue_matches docs: if conversation medium is unspecified, it will default to PHONE_CALL ([1a0720f](https://www.github.com/googleapis/google-cloud-go/commit/1a0720f2f33bb14617f5c6a524946a93209e1266)) -* **contactcenterinsights:** new feature flag disable_issue_modeling docs: fixed formatting issues in the reference documentation ([c8271d4](https://www.github.com/googleapis/google-cloud-go/commit/c8271d4b217a6e6924d9f87eac9468c4b5767ba7)) -* **contactcenterinsights:** remove feature flag disable_issue_modeling ([c8271d4](https://www.github.com/googleapis/google-cloud-go/commit/c8271d4b217a6e6924d9f87eac9468c4b5767ba7)) -* **datacatalog:** Added BigQueryDateShardedSpec.latest_shard_resource field feat: Added SearchCatalogResult.display_name field feat: Added SearchCatalogResult.description field ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5)) -* **dataproc:** add Dataproc Serverless for Spark Batches API ([30794e7](https://www.github.com/googleapis/google-cloud-go/commit/30794e70050b55ff87d6a80d0b4075065e9d271d)) -* **dataproc:** Add support for dataproc BatchController service ([8519b94](https://www.github.com/googleapis/google-cloud-go/commit/8519b948fee5dc82d39300c4d96e92c85fe78fe6)) -* **dialogflow/cx:** added API for changelogs docs: clarified semantic of the streaming APIs ([587bba5](https://www.github.com/googleapis/google-cloud-go/commit/587bba5ad792a92f252107aa38c6af50fb09fb58)) -* **dialogflow/cx:** added API for changelogs docs: clarified semantic of the streaming APIs ([587bba5](https://www.github.com/googleapis/google-cloud-go/commit/587bba5ad792a92f252107aa38c6af50fb09fb58)) -* **dialogflow/cx:** added support for comparing between versions docs: clarified security settings API reference ([83b941c](https://www.github.com/googleapis/google-cloud-go/commit/83b941c0983e44fdd18ceee8c6f3e91219d72ad1)) -* **dialogflow/cx:** added support for Deployments with ListDeployments and GetDeployment apis feat: added support for DeployFlow api under Environments feat: added support for TestCasesConfig under Environment docs: added long running operation explanation for several apis fix!: marked resource name of security setting as not-required ([8c5c6cf](https://www.github.com/googleapis/google-cloud-go/commit/8c5c6cf9df046b67998a8608d05595bd9e34feb0)) -* **dialogflow/cx:** allow setting custom CA for generic webhooks and release CompareVersions API docs: clarify DLP template reader usage ([90e2868](https://www.github.com/googleapis/google-cloud-go/commit/90e2868a3d220aa7f897438f4917013fda7a7c59)) -* **dialogflow:** added support to configure security settings, language code and time zone on conversation profile ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5)) -* **dialogflow:** support document metadata filter in article suggestion and smart reply model in human agent assistant ([e33350c](https://www.github.com/googleapis/google-cloud-go/commit/e33350cfcabcddcda1a90069383d39c68deb977a)) -* **dlp:** added deidentify replacement dictionaries feat: added field for BigQuery inspect template inclusion lists feat: added field to support infotype versioning ([a2c0bef](https://www.github.com/googleapis/google-cloud-go/commit/a2c0bef551489c9f1d0d12b973d3bf095354841e)) -* **domains:** added library for Cloud Domains v1 API. Also added methods for the transfer-in flow docs: improved API comments ([8519b94](https://www.github.com/googleapis/google-cloud-go/commit/8519b948fee5dc82d39300c4d96e92c85fe78fe6)) -* **functions:** Secret Manager integration fields 'secret_environment_variables' and 'secret_volumes' added feat: CMEK integration fields 'kms_key_name' and 'docker_repository' added ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5)) -* **kms:** add OAEP+SHA1 to the list of supported algorithms ([8c5c6cf](https://www.github.com/googleapis/google-cloud-go/commit/8c5c6cf9df046b67998a8608d05595bd9e34feb0)) -* **kms:** add RPC retry information for MacSign, MacVerify, and GenerateRandomBytes Committer: [@bdhess](https://www.github.com/bdhess) ([1a0720f](https://www.github.com/googleapis/google-cloud-go/commit/1a0720f2f33bb14617f5c6a524946a93209e1266)) -* **kms:** add support for Raw PKCS[#1](https://www.github.com/googleapis/google-cloud-go/issues/1) signing keys ([58bea89](https://www.github.com/googleapis/google-cloud-go/commit/58bea89a3d177d5c431ff19310794e3296253353)) -* **monitoring/apiv3:** add CreateServiceTimeSeries RPC ([9e41088](https://www.github.com/googleapis/google-cloud-go/commit/9e41088bb395fbae0e757738277d5c95fa2749c8)) -* **monitoring/dashboard:** Added support for auto-close configurations ([90e2868](https://www.github.com/googleapis/google-cloud-go/commit/90e2868a3d220aa7f897438f4917013fda7a7c59)) -* **monitoring/metricsscope:** promote apiv1 to GA ([#5135](https://www.github.com/googleapis/google-cloud-go/issues/5135)) ([33c0f63](https://www.github.com/googleapis/google-cloud-go/commit/33c0f63e0e0ce69d9ef6e57b04d1b8cc10ed2b78)) -* **osconfig:** OSConfig: add OS policy assignment rpcs ([83b941c](https://www.github.com/googleapis/google-cloud-go/commit/83b941c0983e44fdd18ceee8c6f3e91219d72ad1)) -* **osconfig:** Update OSConfig API ([e33350c](https://www.github.com/googleapis/google-cloud-go/commit/e33350cfcabcddcda1a90069383d39c68deb977a)) -* **osconfig:** Update osconfig v1 and v1alpha RecurringSchedule.Frequency with DAILY frequency ([59e548a](https://www.github.com/googleapis/google-cloud-go/commit/59e548acc249c7bddd9c884c2af35d582a408c4d)) -* **recaptchaenterprise:** add reCAPTCHA Enterprise account defender API methods ([88a1cdb](https://www.github.com/googleapis/google-cloud-go/commit/88a1cdbef3cc337354a61bc9276725bfb9a686d8)) -* **redis:** [Cloud Memorystore for Redis] Support Multiple Read Replicas when creating Instance ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5)) -* **redis:** [Cloud Memorystore for Redis] Support Multiple Read Replicas when creating Instance ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5)) -* **security/privateca:** add IAMPolicy & Locations mix-in support ([1a0720f](https://www.github.com/googleapis/google-cloud-go/commit/1a0720f2f33bb14617f5c6a524946a93209e1266)) -* **securitycenter:** Added a new API method UpdateExternalSystem, which enables updating a finding w/ external system metadata. External systems are a child resource under finding, and are housed on the finding itself, and can also be filtered on in Notifications, the ListFindings and GroupFindings API ([c8271d4](https://www.github.com/googleapis/google-cloud-go/commit/c8271d4b217a6e6924d9f87eac9468c4b5767ba7)) -* **securitycenter:** Added mute related APIs, proto messages and fields ([3e7185c](https://www.github.com/googleapis/google-cloud-go/commit/3e7185c241d97ee342f132ae04bc93bb79a8e897)) -* **securitycenter:** Added resource type and display_name field to the FindingResult, and supported them in the filter for ListFindings and GroupFindings. Also added display_name to the resource which is surfaced in NotificationMessage ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5)) -* **securitycenter:** Added vulnerability field to the finding feat: Added type field to the resource which is surfaced in NotificationMessage ([090cc3a](https://www.github.com/googleapis/google-cloud-go/commit/090cc3ae0f8747a14cc904fc6d429e2f5379bb03)) -* **servicecontrol:** add C++ rules for many Cloud services ([c8271d4](https://www.github.com/googleapis/google-cloud-go/commit/c8271d4b217a6e6924d9f87eac9468c4b5767ba7)) -* **speech:** add result_end_time to SpeechRecognitionResult ([a2c0bef](https://www.github.com/googleapis/google-cloud-go/commit/a2c0bef551489c9f1d0d12b973d3bf095354841e)) -* **speech:** added alternative_language_codes to RecognitionConfig feat: WEBM_OPUS codec feat: SpeechAdaptation configuration feat: word confidence feat: spoken punctuation and spoken emojis feat: hint boost in SpeechContext ([a2c0bef](https://www.github.com/googleapis/google-cloud-go/commit/a2c0bef551489c9f1d0d12b973d3bf095354841e)) -* **texttospeech:** update v1 proto ([90e2868](https://www.github.com/googleapis/google-cloud-go/commit/90e2868a3d220aa7f897438f4917013fda7a7c59)) -* **workflows/executions:** add a stack_trace field to the Error messages specifying where the error occured feat: add call_log_level field to Execution messages doc: clarify requirement to escape strings within JSON arguments ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5)) - - -### Bug Fixes - -* **accesscontextmanager:** nodejs package name access-context-manager ([30794e7](https://www.github.com/googleapis/google-cloud-go/commit/30794e70050b55ff87d6a80d0b4075065e9d271d)) -* **aiplatform:** Remove invalid resource annotations ([587bba5](https://www.github.com/googleapis/google-cloud-go/commit/587bba5ad792a92f252107aa38c6af50fb09fb58)) -* **compute/metadata:** return an error when all retries have failed ([#5063](https://www.github.com/googleapis/google-cloud-go/issues/5063)) ([c792a0d](https://www.github.com/googleapis/google-cloud-go/commit/c792a0d13db019c9964efeee5c6bc85b07ca50fa)), refs [#5062](https://www.github.com/googleapis/google-cloud-go/issues/5062) -* **compute:** make parent_id fields required compute move and insert methods ([#686](https://www.github.com/googleapis/google-cloud-go/issues/686)) ([c8271d4](https://www.github.com/googleapis/google-cloud-go/commit/c8271d4b217a6e6924d9f87eac9468c4b5767ba7)) -* **compute:** Move compute_small protos under its own directory ([#681](https://www.github.com/googleapis/google-cloud-go/issues/681)) ([3e7185c](https://www.github.com/googleapis/google-cloud-go/commit/3e7185c241d97ee342f132ae04bc93bb79a8e897)) -* **internal/gapicgen:** fix a compute filtering ([#5111](https://www.github.com/googleapis/google-cloud-go/issues/5111)) ([77aa19d](https://www.github.com/googleapis/google-cloud-go/commit/77aa19de7fc33a9e831e6b91bd324d6832b44d99)) -* **internal/godocfx:** only put TOC status on mod if all pkgs have same status ([#4974](https://www.github.com/googleapis/google-cloud-go/issues/4974)) ([309b59e](https://www.github.com/googleapis/google-cloud-go/commit/309b59e583d1bf0dd9ffe84223034eb8a2975d47)) -* **internal/godocfx:** replace * with HTML code ([#5049](https://www.github.com/googleapis/google-cloud-go/issues/5049)) ([a8f7c06](https://www.github.com/googleapis/google-cloud-go/commit/a8f7c066e8d97120ae4e12963e3c9acc8b8906c2)) -* **monitoring/apiv3:** Reintroduce deprecated field/enum for backward compatibility docs: Use absolute link targets in comments ([45fd259](https://www.github.com/googleapis/google-cloud-go/commit/45fd2594d99ef70c776df26866f0a3b537e7e69e)) -* **profiler:** workaround certificate expiration issue in integration tests ([#4955](https://www.github.com/googleapis/google-cloud-go/issues/4955)) ([de9e465](https://www.github.com/googleapis/google-cloud-go/commit/de9e465bea8cd0580c45e87d2cbc2b610615b363)) -* **security/privateca:** include mixin protos as input for mixin rpcs ([479c2f9](https://www.github.com/googleapis/google-cloud-go/commit/479c2f90d556a106b25ebcdb1539d231488182da)) -* **security/privateca:** repair service config to enable mixins ([83b941c](https://www.github.com/googleapis/google-cloud-go/commit/83b941c0983e44fdd18ceee8c6f3e91219d72ad1)) -* **video/transcoder:** update nodejs package name to video-transcoder ([30794e7](https://www.github.com/googleapis/google-cloud-go/commit/30794e70050b55ff87d6a80d0b4075065e9d271d)) - -## [0.97.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.96.0...v0.97.0) (2021-09-29) - - -### Features - -* **internal** add Retry func to testutil from samples repository [#4902](https://github.com/googleapis/google-cloud-go/pull/4902) - -## [0.96.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.95.0...v0.96.0) (2021-09-28) - - -### Features - -* **civil:** add IsEmpty function to time, date and datetime ([#4728](https://www.github.com/googleapis/google-cloud-go/issues/4728)) ([88bfa64](https://www.github.com/googleapis/google-cloud-go/commit/88bfa64d6df2f3bb7d41e0b8f56717dd3de790e2)), refs [#4727](https://www.github.com/googleapis/google-cloud-go/issues/4727) -* **internal/godocfx:** detect preview versions ([#4899](https://www.github.com/googleapis/google-cloud-go/issues/4899)) ([9b60844](https://www.github.com/googleapis/google-cloud-go/commit/9b608445ce9ebabbc87a50e85ce6ef89125031d2)) -* **internal:** provide wrapping for retried errors ([#4797](https://www.github.com/googleapis/google-cloud-go/issues/4797)) ([ce5f4db](https://www.github.com/googleapis/google-cloud-go/commit/ce5f4dbab884e847a2d9f1f8f3fcfd7df19a505a)) - - -### Bug Fixes - -* **internal/gapicgen:** restore fmting proto files ([#4789](https://www.github.com/googleapis/google-cloud-go/issues/4789)) ([5606b54](https://www.github.com/googleapis/google-cloud-go/commit/5606b54b97bb675487c6c138a4081c827218f933)) -* **internal/trace:** use xerrors.As for trace ([#4813](https://www.github.com/googleapis/google-cloud-go/issues/4813)) ([05fe61c](https://www.github.com/googleapis/google-cloud-go/commit/05fe61c5aa4860bdebbbe3e91a9afaba16aa6184)) - -## [0.95.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.94.1...v0.95.0) (2021-09-21) - -### Bug Fixes - -* **internal/gapicgen:** add a temporary import ([#4756](https://www.github.com/googleapis/google-cloud-go/issues/4756)) ([4d9c046](https://www.github.com/googleapis/google-cloud-go/commit/4d9c046b66a2dc205e2c14b676995771301440da)) -* **compute/metadata:** remove heavy gax dependency ([#4784](https://www.github.com/googleapis/google-cloud-go/issues/4784)) ([ea00264](https://www.github.com/googleapis/google-cloud-go/commit/ea00264428137471805f2ec67f04f3a5a42928fa)) - -### [0.94.1](https://www.github.com/googleapis/google-cloud-go/compare/v0.94.0...v0.94.1) (2021-09-02) - - -### Bug Fixes - -* **compute/metadata:** fix retry logic to not panic on error ([#4714](https://www.github.com/googleapis/google-cloud-go/issues/4714)) ([75c63b9](https://www.github.com/googleapis/google-cloud-go/commit/75c63b94d2cf86606fffc3611f7e6150b667eedc)), refs [#4713](https://www.github.com/googleapis/google-cloud-go/issues/4713) - -## [0.94.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.92.0...v0.94.0) (2021-08-31) - - -### Features - -* **aiplatform:** add XAI, model monitoring, and index services to aiplatform v1 ([e385b40](https://www.github.com/googleapis/google-cloud-go/commit/e385b40a1e2ecf81f5fd0910de5c37275951f86b)) -* **analytics/admin:** add `GetDataRetentionSettings`, `UpdateDataRetentionSettings` methods to the API ([8467899](https://www.github.com/googleapis/google-cloud-go/commit/8467899ab6ebf0328c543bfb5fbcddeb2f53a082)) -* **asset:** Release of relationships in v1, Add content type Relationship to support relationship export Committer: lvv@ ([d4c3340](https://www.github.com/googleapis/google-cloud-go/commit/d4c3340bfc8b6793d6d2c8a3ed8ccdb472e1efd3)) -* **assuredworkloads:** Add Canada Regions And Support compliance regime ([b9226eb](https://www.github.com/googleapis/google-cloud-go/commit/b9226eb0b34473cb6f920c2526ad0d6dacb03f3c)) -* **cloudbuild/apiv1:** Add ability to configure BuildTriggers to create Builds that require approval before executing and ApproveBuild API to approve or reject pending Builds ([d4c3340](https://www.github.com/googleapis/google-cloud-go/commit/d4c3340bfc8b6793d6d2c8a3ed8ccdb472e1efd3)) -* **cloudbuild/apiv1:** add script field to BuildStep message ([b9226eb](https://www.github.com/googleapis/google-cloud-go/commit/b9226eb0b34473cb6f920c2526ad0d6dacb03f3c)) -* **cloudbuild/apiv1:** Update cloudbuild proto with the service_account for BYOSA Triggers. ([b9226eb](https://www.github.com/googleapis/google-cloud-go/commit/b9226eb0b34473cb6f920c2526ad0d6dacb03f3c)) -* **compute/metadata:** retry error when talking to metadata service ([#4648](https://www.github.com/googleapis/google-cloud-go/issues/4648)) ([81c6039](https://www.github.com/googleapis/google-cloud-go/commit/81c6039503121f8da3de4f4cd957b8488a3ef620)), refs [#4642](https://www.github.com/googleapis/google-cloud-go/issues/4642) -* **dataproc:** remove apiv1beta2 client ([#4682](https://www.github.com/googleapis/google-cloud-go/issues/4682)) ([2248554](https://www.github.com/googleapis/google-cloud-go/commit/22485541affb1251604df292670a20e794111d3e)) -* **gaming:** support version reporting API ([cd65cec](https://www.github.com/googleapis/google-cloud-go/commit/cd65cecf15c4a01648da7f8f4f4d497772961510)) -* **gkehub:** Add request_id under `DeleteMembershipRequest` and `UpdateMembershipRequest` ([b9226eb](https://www.github.com/googleapis/google-cloud-go/commit/b9226eb0b34473cb6f920c2526ad0d6dacb03f3c)) -* **internal/carver:** support carving batches ([#4623](https://www.github.com/googleapis/google-cloud-go/issues/4623)) ([2972d19](https://www.github.com/googleapis/google-cloud-go/commit/2972d194da19bedf16d76fda471c06a965cfdcd6)) -* **kms:** add support for Key Reimport ([bf4378b](https://www.github.com/googleapis/google-cloud-go/commit/bf4378b5b859f7b835946891dbfebfee31c4b123)) -* **metastore:** Added the Backup resource and Backup resource GetIamPolicy/SetIamPolicy to V1 feat: Added the RestoreService method to V1 ([d4c3340](https://www.github.com/googleapis/google-cloud-go/commit/d4c3340bfc8b6793d6d2c8a3ed8ccdb472e1efd3)) -* **monitoring/dashboard:** Added support for logs-based alerts: https://cloud.google.com/logging/docs/alerting/log-based-alerts feat: Added support for user-defined labels on cloud monitoring's Service and ServiceLevelObjective objects fix!: mark required fields in QueryTimeSeriesRequest as required ([b9226eb](https://www.github.com/googleapis/google-cloud-go/commit/b9226eb0b34473cb6f920c2526ad0d6dacb03f3c)) -* **osconfig:** Update osconfig v1 and v1alpha with WindowsApplication ([bf4378b](https://www.github.com/googleapis/google-cloud-go/commit/bf4378b5b859f7b835946891dbfebfee31c4b123)) -* **speech:** Add transcript normalization ([b31646d](https://www.github.com/googleapis/google-cloud-go/commit/b31646d1e12037731df4b5c0ba9f60b6434d7b9b)) -* **talent:** Add new commute methods in Search APIs feat: Add new histogram type 'publish_time_in_day' feat: Support filtering by requisitionId is ListJobs API ([d4c3340](https://www.github.com/googleapis/google-cloud-go/commit/d4c3340bfc8b6793d6d2c8a3ed8ccdb472e1efd3)) -* **translate:** added v3 proto for online/batch document translation and updated v3beta1 proto for format conversion ([bf4378b](https://www.github.com/googleapis/google-cloud-go/commit/bf4378b5b859f7b835946891dbfebfee31c4b123)) - - -### Bug Fixes - -* **datastream:** Change a few resource pattern variables from camelCase to snake_case ([bf4378b](https://www.github.com/googleapis/google-cloud-go/commit/bf4378b5b859f7b835946891dbfebfee31c4b123)) - -## [0.92.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.91.0...v0.92.0) (2021-08-16) - - -### Features - -* **all:** remove testing deps ([#4580](https://www.github.com/googleapis/google-cloud-go/issues/4580)) ([15c1eb9](https://www.github.com/googleapis/google-cloud-go/commit/15c1eb9730f0b514edb911161f9c59e8d790a5ec)), refs [#4061](https://www.github.com/googleapis/google-cloud-go/issues/4061) -* **internal/detect:** add helper to detect projectID from env ([#4582](https://www.github.com/googleapis/google-cloud-go/issues/4582)) ([cc65d94](https://www.github.com/googleapis/google-cloud-go/commit/cc65d945688ac446602bce6ef86a935714dfe2f8)), refs [#1294](https://www.github.com/googleapis/google-cloud-go/issues/1294) -* **spannertest:** Add validation of duplicated column names ([#4611](https://www.github.com/googleapis/google-cloud-go/issues/4611)) ([84f86a6](https://www.github.com/googleapis/google-cloud-go/commit/84f86a605c809ab36dd3cb4b3ab1df15a5302083)) - -## [0.91.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.90.0...v0.91.0) (2021-08-11) - - -### Features - -* **.github:** support dynamic submodule detection ([#4537](https://www.github.com/googleapis/google-cloud-go/issues/4537)) ([4374b90](https://www.github.com/googleapis/google-cloud-go/commit/4374b907e9f166da6bd23a8ef94399872b00afd6)) -* **dialogflow/cx:** add advanced settings for agent level feat: add rollout config, state and failure reason for experiment feat: add insights export settings for security setting feat: add language code for streaming recognition result and flow versions for query parameters docs: deprecate legacy logging settings ([ed73554](https://www.github.com/googleapis/google-cloud-go/commit/ed735541dc57d0681d84b46853393eac5f7ccec3)) -* **dialogflow/cx:** add advanced settings for agent level feat: add rollout config, state and failure reason for experiment feat: add insights export settings for security setting feat: add language code for streaming recognition result and flow versions for query parameters docs: deprecate legacy logging settings ([ed73554](https://www.github.com/googleapis/google-cloud-go/commit/ed735541dc57d0681d84b46853393eac5f7ccec3)) -* **dialogflow/cx:** added support for DLP templates; expose `Locations` service to get/list avaliable locations of Dialogflow products ([5996846](https://www.github.com/googleapis/google-cloud-go/commit/59968462a3870c6289166fa1161f9b6d9c10e093)) -* **dialogflow/cx:** added support for DLP templates; expose `Locations` service to get/list avaliable locations of Dialogflow products docs: reorder some fields ([5996846](https://www.github.com/googleapis/google-cloud-go/commit/59968462a3870c6289166fa1161f9b6d9c10e093)) -* **dialogflow:** expose `Locations` service to get/list avaliable locations of Dialogflow products; fixed some API annotations ([5996846](https://www.github.com/googleapis/google-cloud-go/commit/59968462a3870c6289166fa1161f9b6d9c10e093)) -* **kms:** add support for HMAC, Variable Key Destruction, and GenerateRandom ([5996846](https://www.github.com/googleapis/google-cloud-go/commit/59968462a3870c6289166fa1161f9b6d9c10e093)) -* **speech:** add total_billed_time response field ([5996846](https://www.github.com/googleapis/google-cloud-go/commit/59968462a3870c6289166fa1161f9b6d9c10e093)) -* **video/transcoder:** Add video cropping feature feat: Add video padding feature feat: Add ttl_after_completion_days field to Job docs: Update proto documentation docs: Indicate v1beta1 deprecation ([5996846](https://www.github.com/googleapis/google-cloud-go/commit/59968462a3870c6289166fa1161f9b6d9c10e093)) - - -### Bug Fixes - -* **functions:** Updating behavior of source_upload_url during Get/List function calls ([381a494](https://www.github.com/googleapis/google-cloud-go/commit/381a494c29da388977b0bdda2177058328cc4afe)) - -## [0.90.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.89.0...v0.90.0) (2021-08-03) - - -### ⚠ BREAKING CHANGES - -* **compute:** add pagination and an Operation wrapper (#4542) - -### Features - -* **compute:** add pagination and an Operation wrapper ([#4542](https://www.github.com/googleapis/google-cloud-go/issues/4542)) ([36f4649](https://www.github.com/googleapis/google-cloud-go/commit/36f46494111f6d16d103fb208d49616576dbf91e)) -* **internal/godocfx:** add status to packages and TOCs ([#4547](https://www.github.com/googleapis/google-cloud-go/issues/4547)) ([c6de69c](https://www.github.com/googleapis/google-cloud-go/commit/c6de69c710561bb2a40eff05417df4b9798c258a)) -* **internal/godocfx:** mark status of deprecated items ([#4525](https://www.github.com/googleapis/google-cloud-go/issues/4525)) ([d571c6f](https://www.github.com/googleapis/google-cloud-go/commit/d571c6f4337ec9c4807c230cd77f53b6e7db6437)) - - -### Bug Fixes - -* **internal/carver:** don't tag commits ([#4518](https://www.github.com/googleapis/google-cloud-go/issues/4518)) ([c355eb8](https://www.github.com/googleapis/google-cloud-go/commit/c355eb8ecb0bb1af0ccf55e6262ca8c0d5c7e352)) - -## [0.89.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.88.0...v0.89.0) (2021-07-29) - - -### Features - -* **assuredworkloads:** Add EU Regions And Support compliance regime ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758)) -* **datacatalog:** Added support for BigQuery connections entries feat: Added support for BigQuery routines entries feat: Added usage_signal field feat: Added labels field feat: Added ReplaceTaxonomy in Policy Tag Manager Serialization API feat: Added support for public tag templates feat: Added support for rich text tags docs: Documentation improvements ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758)) -* **datafusion:** start generating apiv1 ([e55a016](https://www.github.com/googleapis/google-cloud-go/commit/e55a01667afaf36ff70807d061ecafb61827ba97)) -* **iap:** start generating apiv1 ([e55a016](https://www.github.com/googleapis/google-cloud-go/commit/e55a01667afaf36ff70807d061ecafb61827ba97)) -* **internal/carver:** add tooling to help carve out sub-modules ([#4417](https://www.github.com/googleapis/google-cloud-go/issues/4417)) ([a7e28f2](https://www.github.com/googleapis/google-cloud-go/commit/a7e28f2557469562ae57e5174b41bdf8fce62b63)) -* **networkconnectivity:** Add files for Network Connectivity v1 API. ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758)) -* **retail:** Add restricted Retail Search features for Retail API v2. ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758)) -* **secretmanager:** In Secret Manager, users can now use filter to customize the output of ListSecrets/ListSecretVersions calls ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758)) -* **securitycenter:** add finding_class and indicator fields in Finding ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758)) -* **speech:** add total_billed_time response field. fix!: phrase_set_id is required field in CreatePhraseSetRequest. fix!: custom_class_id is required field in CreateCustomClassRequest. ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758)) -* **storagetransfer:** start generating apiv1 ([#4505](https://www.github.com/googleapis/google-cloud-go/issues/4505)) ([f2d531d](https://www.github.com/googleapis/google-cloud-go/commit/f2d531d2b519efa58e0f23a178bbebe675c203c3)) - - -### Bug Fixes - -* **internal/gapicgen:** exec Stdout already set ([#4509](https://www.github.com/googleapis/google-cloud-go/issues/4509)) ([41246e9](https://www.github.com/googleapis/google-cloud-go/commit/41246e900aaaea92a9f956e92956c40c86f4cb3a)) -* **internal/gapicgen:** tidy all after dep bump ([#4515](https://www.github.com/googleapis/google-cloud-go/issues/4515)) ([9401be5](https://www.github.com/googleapis/google-cloud-go/commit/9401be509c570c3c55694375065c84139e961857)), refs [#4434](https://www.github.com/googleapis/google-cloud-go/issues/4434) - -## [0.88.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.87.0...v0.88.0) (2021-07-22) - - -### ⚠ BREAKING CHANGES - -* **cloudbuild/apiv1:** Proto had a prior definitions of WorkerPool resources which were never supported. This change replaces those resources with definitions that are currently supported. - -### Features - -* **cloudbuild/apiv1:** add a WorkerPools API ([19ea3f8](https://www.github.com/googleapis/google-cloud-go/commit/19ea3f830212582bfee21d9e09f0034f9ce76547)) -* **cloudbuild/apiv1:** Implementation of Build Failure Info: - Added message FailureInfo field ([19ea3f8](https://www.github.com/googleapis/google-cloud-go/commit/19ea3f830212582bfee21d9e09f0034f9ce76547)) -* **osconfig/agentendpoint:** OSConfig AgentEndpoint: add basic os info to RegisterAgentRequest, add WindowsApplication type to Inventory ([8936bc3](https://www.github.com/googleapis/google-cloud-go/commit/8936bc3f2d0fb2f6514f6e019fa247b8f41bd43c)) -* **resourcesettings:** Publish Cloud ResourceSettings v1 API ([43ad3cb](https://www.github.com/googleapis/google-cloud-go/commit/43ad3cb7be981fff9dc5dcf4510f1cd7bea99957)) - - -### Bug Fixes - -* **internal/godocfx:** set exit code, print cmd output, no go get ... ([#4445](https://www.github.com/googleapis/google-cloud-go/issues/4445)) ([cc70f77](https://www.github.com/googleapis/google-cloud-go/commit/cc70f77ac279a62e24e1b07f6e53fd126b7286b0)) -* **internal:** detect module for properly generating docs URLs ([#4460](https://www.github.com/googleapis/google-cloud-go/issues/4460)) ([1eaba8b](https://www.github.com/googleapis/google-cloud-go/commit/1eaba8bd694f7552a8e3e09b4f164de8b6ca23f0)), refs [#4447](https://www.github.com/googleapis/google-cloud-go/issues/4447) -* **kms:** Updating WORKSPACE files to use the newest version of the Typescript generator. ([8936bc3](https://www.github.com/googleapis/google-cloud-go/commit/8936bc3f2d0fb2f6514f6e019fa247b8f41bd43c)) - -## [0.87.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.86.0...v0.87.0) (2021-07-13) - - -### Features - -* **container:** allow updating security group on existing clusters ([528ffc9](https://www.github.com/googleapis/google-cloud-go/commit/528ffc9bd63090129a8b1355cd31273f8c23e34c)) -* **monitoring/dashboard:** added validation only mode when writing dashboards feat: added alert chart widget ([652d7c2](https://www.github.com/googleapis/google-cloud-go/commit/652d7c277da2f6774729064ab65d557875c81567)) -* **networkmanagment:** start generating apiv1 ([907592c](https://www.github.com/googleapis/google-cloud-go/commit/907592c576abfc65c01bbcd30c1a6094916cdc06)) -* **secretmanager:** Tune Secret Manager auto retry parameters ([528ffc9](https://www.github.com/googleapis/google-cloud-go/commit/528ffc9bd63090129a8b1355cd31273f8c23e34c)) -* **video/transcoder:** start generating apiv1 ([907592c](https://www.github.com/googleapis/google-cloud-go/commit/907592c576abfc65c01bbcd30c1a6094916cdc06)) - - -### Bug Fixes - -* **compute:** properly generate PUT requests ([#4426](https://www.github.com/googleapis/google-cloud-go/issues/4426)) ([a7491a5](https://www.github.com/googleapis/google-cloud-go/commit/a7491a533e4ad75eb6d5f89718d4dafb0c5b4167)) -* **internal:** fix relative pathing for generator ([#4397](https://www.github.com/googleapis/google-cloud-go/issues/4397)) ([25e0eae](https://www.github.com/googleapis/google-cloud-go/commit/25e0eaecf9feb1caa97988c5398ac58f6ca17391)) - - -### Miscellaneous Chores - -* **all:** fix release version ([#4427](https://www.github.com/googleapis/google-cloud-go/issues/4427)) ([2c0d267](https://www.github.com/googleapis/google-cloud-go/commit/2c0d2673ccab7281b6432215ee8279f9efd04a15)) - -## [0.86.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.85.0...v0.86.0) (2021-07-01) - - -### Features - -* **bigquery managedwriter:** schema conversion support ([#4357](https://www.github.com/googleapis/google-cloud-go/issues/4357)) ([f2b20f4](https://www.github.com/googleapis/google-cloud-go/commit/f2b20f493e2ed5a883ce42fa65695c03c574feb5)) - -## [0.85.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.84.0...v0.85.0) (2021-06-30) - - -### Features - -* **dataflow:** start generating apiv1beta3 ([cfee361](https://www.github.com/googleapis/google-cloud-go/commit/cfee36161d41e3a0f769e51ab96c25d0967af273)) -* **datastream:** start generating apiv1alpha1 ([cfee361](https://www.github.com/googleapis/google-cloud-go/commit/cfee36161d41e3a0f769e51ab96c25d0967af273)) -* **dialogflow:** added Automated agent reply type and allow cancellation flag for partial response feature. ([5a9c6ce](https://www.github.com/googleapis/google-cloud-go/commit/5a9c6ce781fb6a338e29d3dee72367998d834af0)) -* **documentai:** update document.proto, add the processor management methods. ([5a9c6ce](https://www.github.com/googleapis/google-cloud-go/commit/5a9c6ce781fb6a338e29d3dee72367998d834af0)) -* **eventarc:** start generating apiv1 ([cfee361](https://www.github.com/googleapis/google-cloud-go/commit/cfee36161d41e3a0f769e51ab96c25d0967af273)) -* **gkehub:** added v1alpha messages and client for gkehub ([8fb4649](https://www.github.com/googleapis/google-cloud-go/commit/8fb464956f0ca51d30e8e14dc625ff9fa150c437)) -* **internal/godocfx:** add support for other modules ([#4290](https://www.github.com/googleapis/google-cloud-go/issues/4290)) ([d52bae6](https://www.github.com/googleapis/google-cloud-go/commit/d52bae6cd77474174192c46236d309bf967dfa00)) -* **internal/godocfx:** different metadata for different modules ([#4297](https://www.github.com/googleapis/google-cloud-go/issues/4297)) ([598f5b9](https://www.github.com/googleapis/google-cloud-go/commit/598f5b93778b2e2e75265ae54484dd54477433f5)) -* **internal:** add force option for regen ([#4310](https://www.github.com/googleapis/google-cloud-go/issues/4310)) ([de654eb](https://www.github.com/googleapis/google-cloud-go/commit/de654ebfcf23a53b4d1fee0aa48c73999a55c1a6)) -* **servicecontrol:** Added the gRPC service config for the Service Controller v1 API docs: Updated some comments. ([8fb4649](https://www.github.com/googleapis/google-cloud-go/commit/8fb464956f0ca51d30e8e14dc625ff9fa150c437)) -* **workflows/executions:** start generating apiv1 ([cfee361](https://www.github.com/googleapis/google-cloud-go/commit/cfee36161d41e3a0f769e51ab96c25d0967af273)) - - -### Bug Fixes - -* **internal:** add autogenerated header to snippets ([#4261](https://www.github.com/googleapis/google-cloud-go/issues/4261)) ([2220787](https://www.github.com/googleapis/google-cloud-go/commit/222078722c37c3fdadec7bbbe0bcf81edd105f1a)), refs [#4260](https://www.github.com/googleapis/google-cloud-go/issues/4260) -* **internal:** fix googleapis-disco regen ([#4354](https://www.github.com/googleapis/google-cloud-go/issues/4354)) ([aeea1ce](https://www.github.com/googleapis/google-cloud-go/commit/aeea1ce1e5dff3acdfe208932327b52c49851b41)) -* **kms:** replace IAMPolicy mixin in service config. ([5a9c6ce](https://www.github.com/googleapis/google-cloud-go/commit/5a9c6ce781fb6a338e29d3dee72367998d834af0)) -* **security/privateca:** Fixed casing of the Ruby namespace ([5a9c6ce](https://www.github.com/googleapis/google-cloud-go/commit/5a9c6ce781fb6a338e29d3dee72367998d834af0)) - -## [0.84.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.83.0...v0.84.0) (2021-06-09) - - -### Features - -* **aiplatform:** start generating apiv1 ([be1d729](https://www.github.com/googleapis/google-cloud-go/commit/be1d729fdaa18eb1c782f3b09a6bb8fd6b3a144c)) -* **apigeeconnect:** start generating abiv1 ([be1d729](https://www.github.com/googleapis/google-cloud-go/commit/be1d729fdaa18eb1c782f3b09a6bb8fd6b3a144c)) -* **dialogflow/cx:** support sentiment analysis in bot testing ([7a57aac](https://www.github.com/googleapis/google-cloud-go/commit/7a57aac996f2bae20ee6ddbd02ad9e56e380099b)) -* **dialogflow/cx:** support sentiment analysis in bot testing ([6ad2306](https://www.github.com/googleapis/google-cloud-go/commit/6ad2306f64710ce16059b464342dbc6a98d2d9c2)) -* **documentai:** Move CommonOperationMetadata into a separate proto file for potential reuse. ([9e80ea0](https://www.github.com/googleapis/google-cloud-go/commit/9e80ea0d053b06876418194f65a478045dc4fe6c)) -* **documentai:** Move CommonOperationMetadata into a separate proto file for potential reuse. ([18375e5](https://www.github.com/googleapis/google-cloud-go/commit/18375e50e8f16e63506129b8927a7b62f85e407b)) -* **gkeconnect/gateway:** start generating apiv1beta1 ([#4235](https://www.github.com/googleapis/google-cloud-go/issues/4235)) ([1c3e968](https://www.github.com/googleapis/google-cloud-go/commit/1c3e9689d78670a231a3660db00fd4fd8f5c6345)) -* **lifesciences:** strat generating apiv2beta ([be1d729](https://www.github.com/googleapis/google-cloud-go/commit/be1d729fdaa18eb1c782f3b09a6bb8fd6b3a144c)) -* **tpu:** start generating apiv1 ([#4199](https://www.github.com/googleapis/google-cloud-go/issues/4199)) ([cac48ea](https://www.github.com/googleapis/google-cloud-go/commit/cac48eab960cd34cc20732f6a1aeb93c540a036b)) - - -### Bug Fixes - -* **bttest:** fix race condition in SampleRowKeys ([#4207](https://www.github.com/googleapis/google-cloud-go/issues/4207)) ([5711fb1](https://www.github.com/googleapis/google-cloud-go/commit/5711fb10d25c458807598d736a232bb2210a047a)) -* **documentai:** Fix Ruby gem title of documentai v1 (package not currently published) ([9e80ea0](https://www.github.com/googleapis/google-cloud-go/commit/9e80ea0d053b06876418194f65a478045dc4fe6c)) - -## [0.83.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.82.0...v0.83.0) (2021-06-02) - - -### Features - -* **dialogflow:** added a field in the query result to indicate whether slot filling is cancelled. ([f9cda8f](https://www.github.com/googleapis/google-cloud-go/commit/f9cda8fb6c3d76a062affebe6649f0a43aeb96f3)) -* **essentialcontacts:** start generating apiv1 ([#4118](https://www.github.com/googleapis/google-cloud-go/issues/4118)) ([fe14afc](https://www.github.com/googleapis/google-cloud-go/commit/fe14afcf74e09089b22c4f5221cbe37046570fda)) -* **gsuiteaddons:** start generating apiv1 ([#4082](https://www.github.com/googleapis/google-cloud-go/issues/4082)) ([6de5c99](https://www.github.com/googleapis/google-cloud-go/commit/6de5c99173c4eeaf777af18c47522ca15637d232)) -* **osconfig:** OSConfig: add ExecResourceOutput and per step error message. ([f9cda8f](https://www.github.com/googleapis/google-cloud-go/commit/f9cda8fb6c3d76a062affebe6649f0a43aeb96f3)) -* **osconfig:** start generating apiv1alpha ([#4119](https://www.github.com/googleapis/google-cloud-go/issues/4119)) ([8ad471f](https://www.github.com/googleapis/google-cloud-go/commit/8ad471f26087ec076460df6dcf27769ffe1b8834)) -* **privatecatalog:** start generating apiv1beta1 ([500c1a6](https://www.github.com/googleapis/google-cloud-go/commit/500c1a6101f624cb6032f0ea16147645a02e7076)) -* **serviceusage:** start generating apiv1 ([#4120](https://www.github.com/googleapis/google-cloud-go/issues/4120)) ([e4531f9](https://www.github.com/googleapis/google-cloud-go/commit/e4531f93cfeb6388280bb253ef6eb231aba37098)) -* **shell:** start generating apiv1 ([500c1a6](https://www.github.com/googleapis/google-cloud-go/commit/500c1a6101f624cb6032f0ea16147645a02e7076)) -* **vpcaccess:** start generating apiv1 ([500c1a6](https://www.github.com/googleapis/google-cloud-go/commit/500c1a6101f624cb6032f0ea16147645a02e7076)) - -## [0.82.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.81.0...v0.82.0) (2021-05-17) - - -### Features - -* **billing/budgets:** Added support for configurable budget time period. fix: Updated some documentation links. ([83b1b3b](https://www.github.com/googleapis/google-cloud-go/commit/83b1b3b648c6d9225f07f00e8c0cdabc3d1fc1ab)) -* **billing/budgets:** Added support for configurable budget time period. fix: Updated some documentation links. ([83b1b3b](https://www.github.com/googleapis/google-cloud-go/commit/83b1b3b648c6d9225f07f00e8c0cdabc3d1fc1ab)) -* **cloudbuild/apiv1:** Add fields for Pub/Sub triggers ([8b4adbf](https://www.github.com/googleapis/google-cloud-go/commit/8b4adbf9815e1ec229dfbcfb9189d3ea63112e1b)) -* **cloudbuild/apiv1:** Implementation of Source Manifests: - Added message StorageSourceManifest as an option for the Source message - Added StorageSourceManifest field to the SourceProvenance message ([7fd2ccd](https://www.github.com/googleapis/google-cloud-go/commit/7fd2ccd26adec1468e15fe84bf75210255a9dfea)) -* **clouddms:** start generating apiv1 ([#4081](https://www.github.com/googleapis/google-cloud-go/issues/4081)) ([29df85c](https://www.github.com/googleapis/google-cloud-go/commit/29df85c40ab64d59e389a980c9ce550077839763)) -* **dataproc:** update the Dataproc V1 API client library ([9a459d5](https://www.github.com/googleapis/google-cloud-go/commit/9a459d5d149b9c3b02a35d4245d164b899ff09b3)) -* **dialogflow/cx:** add support for service directory webhooks ([7fd2ccd](https://www.github.com/googleapis/google-cloud-go/commit/7fd2ccd26adec1468e15fe84bf75210255a9dfea)) -* **dialogflow/cx:** add support for service directory webhooks ([7fd2ccd](https://www.github.com/googleapis/google-cloud-go/commit/7fd2ccd26adec1468e15fe84bf75210255a9dfea)) -* **dialogflow/cx:** support setting current_page to resume sessions; expose transition_route_groups in flows and language_code in webhook ([9a459d5](https://www.github.com/googleapis/google-cloud-go/commit/9a459d5d149b9c3b02a35d4245d164b899ff09b3)) -* **dialogflow/cx:** support setting current_page to resume sessions; expose transition_route_groups in flows and language_code in webhook ([9a459d5](https://www.github.com/googleapis/google-cloud-go/commit/9a459d5d149b9c3b02a35d4245d164b899ff09b3)) -* **dialogflow:** added more Environment RPCs feat: added Versions service feat: added Fulfillment service feat: added TextToSpeechSettings. feat: added location in some resource patterns. ([4f73dc1](https://www.github.com/googleapis/google-cloud-go/commit/4f73dc19c2e05ad6133a8eac3d62ddb522314540)) -* **documentai:** add confidence field to the PageAnchor.PageRef in document.proto. ([d089dda](https://www.github.com/googleapis/google-cloud-go/commit/d089dda0089acb9aaef9b3da40b219476af9fc06)) -* **documentai:** add confidence field to the PageAnchor.PageRef in document.proto. ([07fdcd1](https://www.github.com/googleapis/google-cloud-go/commit/07fdcd12499eac26f9b5fae01d6c1282c3e02b7c)) -* **internal/gapicgen:** only update relevant gapic files ([#4066](https://www.github.com/googleapis/google-cloud-go/issues/4066)) ([5948bee](https://www.github.com/googleapis/google-cloud-go/commit/5948beedbadd491601bdee6a006cf685e94a85f4)) -* **internal/gensnippets:** add license header and region tags ([#3924](https://www.github.com/googleapis/google-cloud-go/issues/3924)) ([e9ff7a0](https://www.github.com/googleapis/google-cloud-go/commit/e9ff7a0f9bb1cc67f5d0de47934811960429e72c)) -* **internal/gensnippets:** initial commit ([#3922](https://www.github.com/googleapis/google-cloud-go/issues/3922)) ([3fabef0](https://www.github.com/googleapis/google-cloud-go/commit/3fabef032388713f732ab4dbfc51624cdca0f481)) -* **internal:** auto-generate snippets ([#3949](https://www.github.com/googleapis/google-cloud-go/issues/3949)) ([b70e0fc](https://www.github.com/googleapis/google-cloud-go/commit/b70e0fccdc86813e0d97ff63b585822d4deafb38)) -* **internal:** generate region tags for snippets ([#3962](https://www.github.com/googleapis/google-cloud-go/issues/3962)) ([ef2b90e](https://www.github.com/googleapis/google-cloud-go/commit/ef2b90ea6d47e27744c98a1a9ae0c487c5051808)) -* **metastore:** start generateing apiv1 ([#4083](https://www.github.com/googleapis/google-cloud-go/issues/4083)) ([661610a](https://www.github.com/googleapis/google-cloud-go/commit/661610afa6a9113534884cafb138109536724310)) -* **security/privateca:** start generating apiv1 ([#4023](https://www.github.com/googleapis/google-cloud-go/issues/4023)) ([08aa83a](https://www.github.com/googleapis/google-cloud-go/commit/08aa83a5371bb6485bc3b19b3ed5300f807ce69f)) -* **securitycenter:** add canonical_name and folder fields ([5c5ca08](https://www.github.com/googleapis/google-cloud-go/commit/5c5ca08c637a23cfa3e3a051fea576e1feb324fd)) -* **securitycenter:** add canonical_name and folder fields ([5c5ca08](https://www.github.com/googleapis/google-cloud-go/commit/5c5ca08c637a23cfa3e3a051fea576e1feb324fd)) -* **speech:** add webm opus support. ([d089dda](https://www.github.com/googleapis/google-cloud-go/commit/d089dda0089acb9aaef9b3da40b219476af9fc06)) -* **speech:** Support for spoken punctuation and spoken emojis. ([9a459d5](https://www.github.com/googleapis/google-cloud-go/commit/9a459d5d149b9c3b02a35d4245d164b899ff09b3)) - - -### Bug Fixes - -* **binaryauthorization:** add Java options to Binaryauthorization protos ([9a459d5](https://www.github.com/googleapis/google-cloud-go/commit/9a459d5d149b9c3b02a35d4245d164b899ff09b3)) -* **internal/gapicgen:** filter out internal directory changes ([#4085](https://www.github.com/googleapis/google-cloud-go/issues/4085)) ([01473f6](https://www.github.com/googleapis/google-cloud-go/commit/01473f6d8db26c6e18969ace7f9e87c66e94ad9e)) -* **internal/gapicgen:** use correct region tags for gensnippets ([#4022](https://www.github.com/googleapis/google-cloud-go/issues/4022)) ([8ccd689](https://www.github.com/googleapis/google-cloud-go/commit/8ccd689cab08f016008ca06a939a4828817d4a25)) -* **internal/gensnippets:** run goimports ([#3931](https://www.github.com/googleapis/google-cloud-go/issues/3931)) ([10050f0](https://www.github.com/googleapis/google-cloud-go/commit/10050f05c20c226547d87c08168fa4bc551395c5)) -* **internal:** append a new line to comply with go fmt ([#4028](https://www.github.com/googleapis/google-cloud-go/issues/4028)) ([a297278](https://www.github.com/googleapis/google-cloud-go/commit/a2972783c4af806199d1c67c9f63ad9677f20f34)) -* **internal:** make sure formatting is run on snippets ([#4039](https://www.github.com/googleapis/google-cloud-go/issues/4039)) ([130dfc5](https://www.github.com/googleapis/google-cloud-go/commit/130dfc535396e98fc009585b0457e3bc48ead941)), refs [#4037](https://www.github.com/googleapis/google-cloud-go/issues/4037) -* **metastore:** increase metastore lro polling timeouts ([83b1b3b](https://www.github.com/googleapis/google-cloud-go/commit/83b1b3b648c6d9225f07f00e8c0cdabc3d1fc1ab)) - - -### Miscellaneous Chores - -* **all:** fix release version ([#4040](https://www.github.com/googleapis/google-cloud-go/issues/4040)) ([4c991a9](https://www.github.com/googleapis/google-cloud-go/commit/4c991a928665d9be93691decce0c653f430688b7)) - -## [0.81.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.80.0...v0.81.0) (2021-04-02) - - -### Features - -* **datacatalog:** Policy Tag Manager v1 API service feat: new RenameTagTemplateFieldEnumValue API feat: adding fully_qualified_name in lookup and search feat: added DATAPROC_METASTORE integrated system along with new entry types: DATABASE and SERVICE docs: Documentation improvements ([2b02a03](https://www.github.com/googleapis/google-cloud-go/commit/2b02a03ff9f78884da5a8e7b64a336014c61bde7)) -* **dialogflow/cx:** include original user query in WebhookRequest; add GetTextCaseresult API. doc: clarify resource format for session response. ([a0b1f6f](https://www.github.com/googleapis/google-cloud-go/commit/a0b1f6faae77d014fdee166ab018ddcd6f846ab4)) -* **dialogflow/cx:** include original user query in WebhookRequest; add GetTextCaseresult API. doc: clarify resource format for session response. ([b5b4da6](https://www.github.com/googleapis/google-cloud-go/commit/b5b4da6952922440d03051f629f3166f731dfaa3)) -* **dialogflow:** expose MP3_64_KBPS and MULAW for output audio encodings. ([b5b4da6](https://www.github.com/googleapis/google-cloud-go/commit/b5b4da6952922440d03051f629f3166f731dfaa3)) -* **secretmanager:** Rotation for Secrets ([2b02a03](https://www.github.com/googleapis/google-cloud-go/commit/2b02a03ff9f78884da5a8e7b64a336014c61bde7)) - - -### Bug Fixes - -* **internal/godocfx:** filter out non-Cloud ([#3878](https://www.github.com/googleapis/google-cloud-go/issues/3878)) ([625aef9](https://www.github.com/googleapis/google-cloud-go/commit/625aef9b47181cf627587cc9cde9e400713c6678)) - -## [0.80.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.79.0...v0.80.0) (2021-03-23) - - -### ⚠ BREAKING CHANGES - -* **all:** This is a breaking change in dialogflow - -### Features - -* **appengine:** added vm_liveness, search_api_available, network_settings, service_account, build_env_variables, kms_key_reference to v1 API ([fd04a55](https://www.github.com/googleapis/google-cloud-go/commit/fd04a552213f99619c714b5858548f61f4948493)) -* **assuredworkloads:** Add 'resource_settings' field to provide custom properties (ids) for the provisioned projects. ([ab4824a](https://www.github.com/googleapis/google-cloud-go/commit/ab4824a7914864228e59b244d6382de862139524)) -* **assuredworkloads:** add HIPAA and HITRUST compliance regimes ([ab4824a](https://www.github.com/googleapis/google-cloud-go/commit/ab4824a7914864228e59b244d6382de862139524)) -* **dialogflow/cx:** added fallback option when restoring an agent docs: clarified experiment length ([cd70aa9](https://www.github.com/googleapis/google-cloud-go/commit/cd70aa9cc1a5dccfe4e49d2d6ca6db2119553c86)) -* **dialogflow/cx:** start generating apiv3 ([#3850](https://www.github.com/googleapis/google-cloud-go/issues/3850)) ([febbdcf](https://www.github.com/googleapis/google-cloud-go/commit/febbdcf13fcea3f5d8186c3d3dface1c0d27ef9e)), refs [#3634](https://www.github.com/googleapis/google-cloud-go/issues/3634) -* **documentai:** add EVAL_SKIPPED value to the Provenance.OperationType enum in document.proto. ([cb43066](https://www.github.com/googleapis/google-cloud-go/commit/cb4306683926843f6e977f207fa6070bb9242a61)) -* **documentai:** start generating apiv1 ([#3853](https://www.github.com/googleapis/google-cloud-go/issues/3853)) ([d68e604](https://www.github.com/googleapis/google-cloud-go/commit/d68e604c953eea90489f6134e71849b24dd0fcbf)) -* **internal/godocfx:** add prettyprint class to code blocks ([#3819](https://www.github.com/googleapis/google-cloud-go/issues/3819)) ([6e49f21](https://www.github.com/googleapis/google-cloud-go/commit/6e49f2148b116ee439c8a882dcfeefb6e7647c57)) -* **internal/godocfx:** handle Markdown content ([#3816](https://www.github.com/googleapis/google-cloud-go/issues/3816)) ([56d5d0a](https://www.github.com/googleapis/google-cloud-go/commit/56d5d0a900197fb2de46120a0eda649f2c17448f)) -* **kms:** Add maxAttempts to retry policy for KMS gRPC service config feat: Add Bazel exports_files entry for KMS gRPC service config ([fd04a55](https://www.github.com/googleapis/google-cloud-go/commit/fd04a552213f99619c714b5858548f61f4948493)) -* **resourcesettings:** start generating apiv1 ([#3854](https://www.github.com/googleapis/google-cloud-go/issues/3854)) ([3b288b4](https://www.github.com/googleapis/google-cloud-go/commit/3b288b4fa593c6cb418f696b5b26768967c20b9e)) -* **speech:** Support output transcript to GCS for LongRunningRecognize. ([fd04a55](https://www.github.com/googleapis/google-cloud-go/commit/fd04a552213f99619c714b5858548f61f4948493)) -* **speech:** Support output transcript to GCS for LongRunningRecognize. ([cd70aa9](https://www.github.com/googleapis/google-cloud-go/commit/cd70aa9cc1a5dccfe4e49d2d6ca6db2119553c86)) -* **speech:** Support output transcript to GCS for LongRunningRecognize. ([35a8706](https://www.github.com/googleapis/google-cloud-go/commit/35a870662df8bf63c4ec10a0233d1d7a708007ee)) - - -### Miscellaneous Chores - -* **all:** auto-regenerate gapics ([#3837](https://www.github.com/googleapis/google-cloud-go/issues/3837)) ([ab4824a](https://www.github.com/googleapis/google-cloud-go/commit/ab4824a7914864228e59b244d6382de862139524)) - -## [0.79.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.78.0...v0.79.0) (2021-03-10) - - -### Features - -* **apigateway:** start generating apiv1 ([#3726](https://www.github.com/googleapis/google-cloud-go/issues/3726)) ([66046da](https://www.github.com/googleapis/google-cloud-go/commit/66046da2a4be5971ce2655dc6a5e1fadb08c3d1f)) -* **channel:** addition of billing_account field on Plan. docs: clarification that valid address lines are required for all customers. ([d4246aa](https://www.github.com/googleapis/google-cloud-go/commit/d4246aad4da3c3ef12350385f229bb908e3fb215)) -* **dialogflow/cx:** allow to disable webhook invocation per request ([d4246aa](https://www.github.com/googleapis/google-cloud-go/commit/d4246aad4da3c3ef12350385f229bb908e3fb215)) -* **dialogflow/cx:** allow to disable webhook invocation per request ([44c6bf9](https://www.github.com/googleapis/google-cloud-go/commit/44c6bf986f39a3c9fddf46788ae63bfbb3739441)) -* **dialogflow:** Add CCAI API ([18c88c4](https://www.github.com/googleapis/google-cloud-go/commit/18c88c437bd1741eaf5bf5911b9da6f6ea7cd75d)) -* **documentai:** remove the translation fields in document.proto. ([18c88c4](https://www.github.com/googleapis/google-cloud-go/commit/18c88c437bd1741eaf5bf5911b9da6f6ea7cd75d)) -* **documentai:** Update documentai/v1beta3 protos: add support for boolean normalized value ([529925b](https://www.github.com/googleapis/google-cloud-go/commit/529925ba79f4d3191ef80a13e566d86210fe4d25)) -* **internal/godocfx:** keep some cross links on same domain ([#3767](https://www.github.com/googleapis/google-cloud-go/issues/3767)) ([77f76ed](https://www.github.com/googleapis/google-cloud-go/commit/77f76ed09cb07a090ba9054063a7c002a35bca4e)) -* **internal:** add ability to regenerate one module's docs ([#3777](https://www.github.com/googleapis/google-cloud-go/issues/3777)) ([dc15995](https://www.github.com/googleapis/google-cloud-go/commit/dc15995521bd065da4cfaae95642588919a8c548)) -* **metastore:** added support for release channels when creating service ([18c88c4](https://www.github.com/googleapis/google-cloud-go/commit/18c88c437bd1741eaf5bf5911b9da6f6ea7cd75d)) -* **metastore:** Publish Dataproc Metastore v1alpha API ([18c88c4](https://www.github.com/googleapis/google-cloud-go/commit/18c88c437bd1741eaf5bf5911b9da6f6ea7cd75d)) -* **metastore:** start generating apiv1alpha ([#3747](https://www.github.com/googleapis/google-cloud-go/issues/3747)) ([359312a](https://www.github.com/googleapis/google-cloud-go/commit/359312ad6d4f61fb341d41ffa35fc0634979e650)) -* **metastore:** start generating apiv1beta ([#3788](https://www.github.com/googleapis/google-cloud-go/issues/3788)) ([2977095](https://www.github.com/googleapis/google-cloud-go/commit/297709593ad32f234c0fbcfa228cffcfd3e591f4)) -* **secretmanager:** added topic field to Secret ([f1323b1](https://www.github.com/googleapis/google-cloud-go/commit/f1323b10a3c7cc1d215730cefd3062064ef54c01)) - - -### Bug Fixes - -* **analytics/admin:** add `https://www.googleapis.com/auth/analytics.edit` OAuth2 scope to the list of acceptable scopes for all read only methods of the Admin API docs: update the documentation of the `update_mask` field used by Update() methods ([f1323b1](https://www.github.com/googleapis/google-cloud-go/commit/f1323b10a3c7cc1d215730cefd3062064ef54c01)) -* **apigateway:** Provide resource definitions for service management and IAM resources ([18c88c4](https://www.github.com/googleapis/google-cloud-go/commit/18c88c437bd1741eaf5bf5911b9da6f6ea7cd75d)) -* **functions:** Fix service namespace in grpc_service_config. ([7811a34](https://www.github.com/googleapis/google-cloud-go/commit/7811a34ef64d722480c640810251bb3a0d65d495)) -* **internal/godocfx:** prevent index out of bounds when pkg == mod ([#3768](https://www.github.com/googleapis/google-cloud-go/issues/3768)) ([3d80b4e](https://www.github.com/googleapis/google-cloud-go/commit/3d80b4e93b0f7e857d6e9681d8d6a429750ecf80)) -* **internal/godocfx:** use correct anchor links ([#3738](https://www.github.com/googleapis/google-cloud-go/issues/3738)) ([919039a](https://www.github.com/googleapis/google-cloud-go/commit/919039a01a006c41e720218bd55f83ce98a5edef)) -* **internal:** fix Bash syntax ([#3779](https://www.github.com/googleapis/google-cloud-go/issues/3779)) ([3dd245d](https://www.github.com/googleapis/google-cloud-go/commit/3dd245dbdbfa84f0bbe5a476412d8463fe3e700c)) -* **tables:** use area120tables_v1alpha1.yaml as api-service-config ([#3759](https://www.github.com/googleapis/google-cloud-go/issues/3759)) ([b130ec0](https://www.github.com/googleapis/google-cloud-go/commit/b130ec0aa946b1a1eaa4d5a7c33e72353ac1612e)) - -## [0.78.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.77.0...v0.78.0) (2021-02-22) - - -### Features - -* **area120/tables:** Added ListWorkspaces, GetWorkspace, BatchDeleteRows APIs. ([16597fa](https://www.github.com/googleapis/google-cloud-go/commit/16597fa1ce549053c7183e8456e23f554a5501de)) -* **area120/tables:** Added ListWorkspaces, GetWorkspace, BatchDeleteRows APIs. ([0bd21d7](https://www.github.com/googleapis/google-cloud-go/commit/0bd21d793f75924e5a2d033c58e8aaef89cf8113)) -* **dialogflow:** add additional_bindings to Dialogflow v2 ListIntents API docs: update copyrights and session docs ([0bd21d7](https://www.github.com/googleapis/google-cloud-go/commit/0bd21d793f75924e5a2d033c58e8aaef89cf8113)) -* **documentai:** Update documentai/v1beta3 protos ([613ced7](https://www.github.com/googleapis/google-cloud-go/commit/613ced702bbc82a154a4d3641b483f71c7cd1af4)) -* **gkehub:** Update Membership API v1beta1 proto ([613ced7](https://www.github.com/googleapis/google-cloud-go/commit/613ced702bbc82a154a4d3641b483f71c7cd1af4)) -* **servicecontrol:** Update the ruby_cloud_gapic_library rules for the libraries published to google-cloud-ruby to the form that works with build_gen (separate parameters for ruby_cloud_title and ruby_cloud_description). chore: Update Bazel-Ruby rules version. chore: Update build_gen version. ([0bd21d7](https://www.github.com/googleapis/google-cloud-go/commit/0bd21d793f75924e5a2d033c58e8aaef89cf8113)) -* **speech:** Support Model Adaptation. ([0bd21d7](https://www.github.com/googleapis/google-cloud-go/commit/0bd21d793f75924e5a2d033c58e8aaef89cf8113)) - - -### Bug Fixes - -* **dialogflow/cx:** RunTestCase http template. PHP REST client lib can be generated. feat: Support transition route group coverage for Test Cases. ([613ced7](https://www.github.com/googleapis/google-cloud-go/commit/613ced702bbc82a154a4d3641b483f71c7cd1af4)) -* **errorreporting:** Fixes ruby gem build ([0bd21d7](https://www.github.com/googleapis/google-cloud-go/commit/0bd21d793f75924e5a2d033c58e8aaef89cf8113)) - -## [0.77.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.76.0...v0.77.0) (2021-02-16) - - -### Features - -* **channel:** Add Pub/Sub endpoints for Cloud Channel API. ([1aea7c8](https://www.github.com/googleapis/google-cloud-go/commit/1aea7c87d39eed87620b488ba0dd60b88ff26c04)) -* **dialogflow/cx:** supports SentimentAnalysisResult in webhook request docs: minor updates in wording ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663)) -* **errorreporting:** Make resolution status field available for error groups. Now callers can set the status of an error group by passing this to UpdateGroup. When not specified, it's treated like OPEN. feat: Make source location available for error groups created from GAE. ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663)) -* **errorreporting:** Make resolution status field available for error groups. Now callers can set the status of an error group by passing this to UpdateGroup. When not specified, it's treated like OPEN. feat: Make source location available for error groups created from GAE. ([f66114b](https://www.github.com/googleapis/google-cloud-go/commit/f66114bc7233ad06e18f38dd39497a74d85fdbd8)) -* **gkehub:** start generating apiv1beta1 ([#3698](https://www.github.com/googleapis/google-cloud-go/issues/3698)) ([8aed3bd](https://www.github.com/googleapis/google-cloud-go/commit/8aed3bd1bbbe983e4891c813e4c5dc9b3aa1b9b2)) -* **internal/docfx:** full cross reference linking ([#3656](https://www.github.com/googleapis/google-cloud-go/issues/3656)) ([fcb7318](https://www.github.com/googleapis/google-cloud-go/commit/fcb7318eb338bf3828ac831ed06ca630e1876418)) -* **memcache:** added ApplySoftwareUpdate API docs: various clarifications, new documentation for ApplySoftwareUpdate chore: update proto annotations ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663)) -* **networkconnectivity:** Add state field in resources docs: Minor changes ([0b4370a](https://www.github.com/googleapis/google-cloud-go/commit/0b4370a0d397913d932dbbdc2046a958dc3b836a)) -* **networkconnectivity:** Add state field in resources docs: Minor changes ([b4b5898](https://www.github.com/googleapis/google-cloud-go/commit/b4b58987368f80494bbc7f651f50e9123200fb3f)) -* **recommendationengine:** start generating apiv1beta1 ([#3686](https://www.github.com/googleapis/google-cloud-go/issues/3686)) ([8f4e130](https://www.github.com/googleapis/google-cloud-go/commit/8f4e13009444d88a5a56144129f055623a2205ac)) - - -### Bug Fixes - -* **errorreporting:** Remove dependency on AppEngine's proto definitions. This also removes the source_references field. ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663)) -* **errorreporting:** Update bazel builds for ER client libraries. ([0b4370a](https://www.github.com/googleapis/google-cloud-go/commit/0b4370a0d397913d932dbbdc2046a958dc3b836a)) -* **internal/godocfx:** use exact list of top-level decls ([#3665](https://www.github.com/googleapis/google-cloud-go/issues/3665)) ([3cd2961](https://www.github.com/googleapis/google-cloud-go/commit/3cd2961bd7b9c29d82a21ba8850eff00c7c332fd)) -* **kms:** do not retry on 13 INTERNAL ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663)) -* **orgpolicy:** Fix constraint resource pattern annotation ([f66114b](https://www.github.com/googleapis/google-cloud-go/commit/f66114bc7233ad06e18f38dd39497a74d85fdbd8)) -* **orgpolicy:** Fix constraint resource pattern annotation ([0b4370a](https://www.github.com/googleapis/google-cloud-go/commit/0b4370a0d397913d932dbbdc2046a958dc3b836a)) -* **profiler:** make sure retries use the most up-to-date copy of the trailer ([#3660](https://www.github.com/googleapis/google-cloud-go/issues/3660)) ([3ba9ebc](https://www.github.com/googleapis/google-cloud-go/commit/3ba9ebcee2b8b43cdf2c8f8a3d810516a604b363)) -* **vision:** sync vision v1 protos to get extra FaceAnnotation Landmark Types ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663)) - -## [0.76.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.75.0...v0.76.0) (2021-02-02) - - -### Features - -* **accessapproval:** Migrate the Bazel rules for the libraries published to google-cloud-ruby to use the gapic-generator-ruby instead of the monolith generator. ([ac22beb](https://www.github.com/googleapis/google-cloud-go/commit/ac22beb9b90771b24c8b35db7587ad3f5c0a970e)) -* **all:** auto-regenerate gapics ([#3526](https://www.github.com/googleapis/google-cloud-go/issues/3526)) ([ab2af0b](https://www.github.com/googleapis/google-cloud-go/commit/ab2af0b32630dd97f44800f4e273184f887375db)) -* **all:** auto-regenerate gapics ([#3539](https://www.github.com/googleapis/google-cloud-go/issues/3539)) ([84d4d8a](https://www.github.com/googleapis/google-cloud-go/commit/84d4d8ae2d3fbf34a4a312a0a2e4062d18caaa3d)) -* **all:** auto-regenerate gapics ([#3546](https://www.github.com/googleapis/google-cloud-go/issues/3546)) ([959fde5](https://www.github.com/googleapis/google-cloud-go/commit/959fde5ab12f7aee206dd46022e3cad1bc3470f7)) -* **all:** auto-regenerate gapics ([#3563](https://www.github.com/googleapis/google-cloud-go/issues/3563)) ([102112a](https://www.github.com/googleapis/google-cloud-go/commit/102112a4e9285a16645aabc89789f613d4f47c9e)) -* **all:** auto-regenerate gapics ([#3576](https://www.github.com/googleapis/google-cloud-go/issues/3576)) ([ac22beb](https://www.github.com/googleapis/google-cloud-go/commit/ac22beb9b90771b24c8b35db7587ad3f5c0a970e)) -* **all:** auto-regenerate gapics ([#3580](https://www.github.com/googleapis/google-cloud-go/issues/3580)) ([9974a80](https://www.github.com/googleapis/google-cloud-go/commit/9974a8017b5de8129a586f2404a23396caea0ee1)) -* **all:** auto-regenerate gapics ([#3587](https://www.github.com/googleapis/google-cloud-go/issues/3587)) ([3859a6f](https://www.github.com/googleapis/google-cloud-go/commit/3859a6ffc447e9c0b4ef231e2788fbbcfe48a94f)) -* **all:** auto-regenerate gapics ([#3598](https://www.github.com/googleapis/google-cloud-go/issues/3598)) ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582)) -* **appengine:** start generating apiv1 ([#3561](https://www.github.com/googleapis/google-cloud-go/issues/3561)) ([2b6a3b4](https://www.github.com/googleapis/google-cloud-go/commit/2b6a3b4609e389da418a83eb60a8ae3710d646d7)) -* **assuredworkloads:** updated google.cloud.assuredworkloads.v1beta1.AssuredWorkloadsService service. Clients can now create workloads with US_REGIONAL_ACCESS compliance regime ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582)) -* **binaryauthorization:** start generating apiv1beta1 ([#3562](https://www.github.com/googleapis/google-cloud-go/issues/3562)) ([56e18a6](https://www.github.com/googleapis/google-cloud-go/commit/56e18a64836ab9482528b212eb139f649f7a35c3)) -* **channel:** Add Pub/Sub endpoints for Cloud Channel API. ([9070c86](https://www.github.com/googleapis/google-cloud-go/commit/9070c86e2c69f9405d42fc0e6fe7afd4a256d8b8)) -* **cloudtasks:** introducing field: ListQueuesRequest.read_mask, GetQueueRequest.read_mask, Queue.task_ttl, Queue.tombstone_ttl, Queue.stats, Task.pull_message and introducing messages: QueueStats PullMessage docs: updates to max burst size description ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582)) -* **cloudtasks:** introducing fields: ListQueuesRequest.read_mask, GetQueueRequest.read_mask, Queue.task_ttl, Queue.tombstone_ttl, Queue.stats and introducing messages: QueueStats docs: updates to AppEngineHttpRequest description ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582)) -* **datalabeling:** start generating apiv1beta1 ([#3582](https://www.github.com/googleapis/google-cloud-go/issues/3582)) ([d8a7fee](https://www.github.com/googleapis/google-cloud-go/commit/d8a7feef51d3344fa7e258aba1d9fbdab56dadcf)) -* **dataqna:** start generating apiv1alpha ([#3586](https://www.github.com/googleapis/google-cloud-go/issues/3586)) ([24c5b8f](https://www.github.com/googleapis/google-cloud-go/commit/24c5b8f4f45f8cd8b3001b1ca5a8d80e9f3b39d5)) -* **dialogflow/cx:** Add new Experiment service docs: minor doc update on redact field in intent.proto and page.proto ([0959f27](https://www.github.com/googleapis/google-cloud-go/commit/0959f27e85efe94d39437ceef0ff62ddceb8e7a7)) -* **dialogflow/cx:** added support for test cases and agent validation ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582)) -* **dialogflow/cx:** added support for test cases and agent validation ([3859a6f](https://www.github.com/googleapis/google-cloud-go/commit/3859a6ffc447e9c0b4ef231e2788fbbcfe48a94f)) -* **dialogflow:** add C++ targets for DialogFlow ([959fde5](https://www.github.com/googleapis/google-cloud-go/commit/959fde5ab12f7aee206dd46022e3cad1bc3470f7)) -* **documentai:** start generating apiv1beta3 ([#3595](https://www.github.com/googleapis/google-cloud-go/issues/3595)) ([5ae21fa](https://www.github.com/googleapis/google-cloud-go/commit/5ae21fa1cfb8b8dacbcd0fc43eee430f7db63102)) -* **domains:** start generating apiv1beta1 ([#3632](https://www.github.com/googleapis/google-cloud-go/issues/3632)) ([b8ada6f](https://www.github.com/googleapis/google-cloud-go/commit/b8ada6f197e680d0bb26aa031e6431bc099a3149)) -* **godocfx:** include alt documentation link ([#3530](https://www.github.com/googleapis/google-cloud-go/issues/3530)) ([806cdd5](https://www.github.com/googleapis/google-cloud-go/commit/806cdd56fb6fdddd7a6c1354e55e0d1259bd6c8b)) -* **internal/gapicgen:** change commit formatting to match standard ([#3500](https://www.github.com/googleapis/google-cloud-go/issues/3500)) ([d1e3d46](https://www.github.com/googleapis/google-cloud-go/commit/d1e3d46c47c425581e2b149c07f8e27ffc373c7e)) -* **internal/godocfx:** xref function declarations ([#3615](https://www.github.com/googleapis/google-cloud-go/issues/3615)) ([2bdbb87](https://www.github.com/googleapis/google-cloud-go/commit/2bdbb87a682d799cf5e262a61a3ef1faf41151af)) -* **mediatranslation:** start generating apiv1beta1 ([#3636](https://www.github.com/googleapis/google-cloud-go/issues/3636)) ([4129469](https://www.github.com/googleapis/google-cloud-go/commit/412946966cf7f53c51deff1b1cc1a12d62ed0279)) -* **memcache:** start generating apiv1 ([#3579](https://www.github.com/googleapis/google-cloud-go/issues/3579)) ([eabf7cf](https://www.github.com/googleapis/google-cloud-go/commit/eabf7cfde7b3a3cc1b35c320ba52e07be9926359)) -* **networkconnectivity:** initial generation of apiv1alpha1 ([#3567](https://www.github.com/googleapis/google-cloud-go/issues/3567)) ([adf489a](https://www.github.com/googleapis/google-cloud-go/commit/adf489a536292e3196677621477eae0d52761e7f)) -* **orgpolicy:** start generating apiv2 ([#3652](https://www.github.com/googleapis/google-cloud-go/issues/3652)) ([c103847](https://www.github.com/googleapis/google-cloud-go/commit/c1038475779fda3589aa9659d4ad0b703036b531)) -* **osconfig/agentendpoint:** add ApplyConfigTask to AgentEndpoint API ([9070c86](https://www.github.com/googleapis/google-cloud-go/commit/9070c86e2c69f9405d42fc0e6fe7afd4a256d8b8)) -* **osconfig/agentendpoint:** add ApplyConfigTask to AgentEndpoint API ([9af529c](https://www.github.com/googleapis/google-cloud-go/commit/9af529c21e98b62c4617f7a7191c307659cf8bb8)) -* **recommender:** add bindings for folder/org type resources for protos in recommendations, insights and recommender_service to enable v1 api for folder/org ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582)) -* **recommender:** auto generated cl for enabling v1beta1 folder/org APIs and integration test ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582)) -* **resourcemanager:** start generating apiv2 ([#3575](https://www.github.com/googleapis/google-cloud-go/issues/3575)) ([93d0ebc](https://www.github.com/googleapis/google-cloud-go/commit/93d0ebceb4270351518a13958005bb68f0cace60)) -* **secretmanager:** added expire_time and ttl fields to Secret ([9974a80](https://www.github.com/googleapis/google-cloud-go/commit/9974a8017b5de8129a586f2404a23396caea0ee1)) -* **secretmanager:** added expire_time and ttl fields to Secret ([ac22beb](https://www.github.com/googleapis/google-cloud-go/commit/ac22beb9b90771b24c8b35db7587ad3f5c0a970e)) -* **servicecontrol:** start generating apiv1 ([#3644](https://www.github.com/googleapis/google-cloud-go/issues/3644)) ([f84938b](https://www.github.com/googleapis/google-cloud-go/commit/f84938bb4042a5629fd66bda42de028fd833648a)) -* **servicemanagement:** start generating apiv1 ([#3614](https://www.github.com/googleapis/google-cloud-go/issues/3614)) ([b96134f](https://www.github.com/googleapis/google-cloud-go/commit/b96134fe91c182237359000cd544af5fec60d7db)) - - -### Bug Fixes - -* **datacatalog:** Update PHP package name casing to match the PHP namespace in the proto files ([c7ecf0f](https://www.github.com/googleapis/google-cloud-go/commit/c7ecf0f3f454606b124e52d20af2545b2c68646f)) -* **internal/godocfx:** add TOC element for module root package ([#3599](https://www.github.com/googleapis/google-cloud-go/issues/3599)) ([1d6eb23](https://www.github.com/googleapis/google-cloud-go/commit/1d6eb238206fcf8815d88981527ef176851afd7a)) -* **profiler:** Force gax to retry in case of certificate errors ([#3178](https://www.github.com/googleapis/google-cloud-go/issues/3178)) ([35dcd72](https://www.github.com/googleapis/google-cloud-go/commit/35dcd725dcd03266ed7439de40c277376b38cd71)) - -## [0.75.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.74.0...v0.75.0) (2021-01-11) - - -### Features - -* **all:** auto-regenerate gapics , refs [#3514](https://www.github.com/googleapis/google-cloud-go/issues/3514) [#3501](https://www.github.com/googleapis/google-cloud-go/issues/3501) [#3497](https://www.github.com/googleapis/google-cloud-go/issues/3497) [#3455](https://www.github.com/googleapis/google-cloud-go/issues/3455) [#3448](https://www.github.com/googleapis/google-cloud-go/issues/3448) -* **channel:** start generating apiv1 ([#3517](https://www.github.com/googleapis/google-cloud-go/issues/3517)) ([2cf3b3c](https://www.github.com/googleapis/google-cloud-go/commit/2cf3b3cf7d99f2efd6868a710fad9e935fc87965)) - - -### Bug Fixes - -* **internal/gapicgen:** don't regen files that have been deleted ([#3471](https://www.github.com/googleapis/google-cloud-go/issues/3471)) ([112ca94](https://www.github.com/googleapis/google-cloud-go/commit/112ca9416cc8a2502b32547dc8d789655452f84a)) - -## [0.74.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.73.0...v0.74.0) (2020-12-10) - - -### Features - -* **all:** auto-regenerate gapics , refs [#3440](https://www.github.com/googleapis/google-cloud-go/issues/3440) [#3436](https://www.github.com/googleapis/google-cloud-go/issues/3436) [#3394](https://www.github.com/googleapis/google-cloud-go/issues/3394) [#3391](https://www.github.com/googleapis/google-cloud-go/issues/3391) [#3374](https://www.github.com/googleapis/google-cloud-go/issues/3374) -* **internal/gapicgen:** support generating only gapics with genlocal ([#3383](https://www.github.com/googleapis/google-cloud-go/issues/3383)) ([eaa742a](https://www.github.com/googleapis/google-cloud-go/commit/eaa742a248dc7d93c019863248f28e37f88aae84)) -* **servicedirectory:** start generating apiv1 ([#3382](https://www.github.com/googleapis/google-cloud-go/issues/3382)) ([2774925](https://www.github.com/googleapis/google-cloud-go/commit/2774925925909071ebc585cf7400373334c156ba)) - - -### Bug Fixes - -* **internal/gapicgen:** don't create genproto pr as draft ([#3379](https://www.github.com/googleapis/google-cloud-go/issues/3379)) ([517ab0f](https://www.github.com/googleapis/google-cloud-go/commit/517ab0f25e544498c5374b256354bc41ba936ad5)) - -## [0.73.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.72.0...v0.73.0) (2020-12-04) - - -### Features - -* **all:** auto-regenerate gapics , refs [#3335](https://www.github.com/googleapis/google-cloud-go/issues/3335) [#3294](https://www.github.com/googleapis/google-cloud-go/issues/3294) [#3250](https://www.github.com/googleapis/google-cloud-go/issues/3250) [#3229](https://www.github.com/googleapis/google-cloud-go/issues/3229) [#3211](https://www.github.com/googleapis/google-cloud-go/issues/3211) [#3217](https://www.github.com/googleapis/google-cloud-go/issues/3217) [#3212](https://www.github.com/googleapis/google-cloud-go/issues/3212) [#3209](https://www.github.com/googleapis/google-cloud-go/issues/3209) [#3206](https://www.github.com/googleapis/google-cloud-go/issues/3206) [#3199](https://www.github.com/googleapis/google-cloud-go/issues/3199) -* **artifactregistry:** start generating apiv1beta2 ([#3352](https://www.github.com/googleapis/google-cloud-go/issues/3352)) ([2e6f20b](https://www.github.com/googleapis/google-cloud-go/commit/2e6f20b0ab438b0b366a1a3802fc64d1a0e66fff)) -* **internal:** copy pubsub Message and PublishResult to internal/pubsub ([#3351](https://www.github.com/googleapis/google-cloud-go/issues/3351)) ([82521ee](https://www.github.com/googleapis/google-cloud-go/commit/82521ee5038735c1663525658d27e4df00ec90be)) -* **internal/gapicgen:** support adding context to regen ([#3174](https://www.github.com/googleapis/google-cloud-go/issues/3174)) ([941ab02](https://www.github.com/googleapis/google-cloud-go/commit/941ab029ba6f7f33e8b2e31e3818aeb68312a999)) -* **internal/kokoro:** add ability to regen all DocFX YAML ([#3191](https://www.github.com/googleapis/google-cloud-go/issues/3191)) ([e12046b](https://www.github.com/googleapis/google-cloud-go/commit/e12046bc4431d33aee72c324e6eb5cc907a4214a)) - - -### Bug Fixes - -* **internal/godocfx:** filter out test packages from other modules ([#3197](https://www.github.com/googleapis/google-cloud-go/issues/3197)) ([1d397aa](https://www.github.com/googleapis/google-cloud-go/commit/1d397aa8b41f8f980cba1d3dcc50f11e4d4f4ca0)) - -## [0.72.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.71.0...v0.72.0) (2020-11-10) - - -### Features - -* **all:** auto-regenerate gapics , refs [#3177](https://www.github.com/googleapis/google-cloud-go/issues/3177) [#3164](https://www.github.com/googleapis/google-cloud-go/issues/3164) [#3149](https://www.github.com/googleapis/google-cloud-go/issues/3149) [#3142](https://www.github.com/googleapis/google-cloud-go/issues/3142) [#3136](https://www.github.com/googleapis/google-cloud-go/issues/3136) [#3130](https://www.github.com/googleapis/google-cloud-go/issues/3130) [#3121](https://www.github.com/googleapis/google-cloud-go/issues/3121) [#3119](https://www.github.com/googleapis/google-cloud-go/issues/3119) - - -### Bug Fixes - -* **all:** Update hand-written clients to not use WithEndpoint override ([#3111](https://www.github.com/googleapis/google-cloud-go/issues/3111)) ([f0cfd05](https://www.github.com/googleapis/google-cloud-go/commit/f0cfd0532f5204ff16f7bae406efa72603d16f44)) -* **internal/godocfx:** rename README files to pkg-readme ([#3185](https://www.github.com/googleapis/google-cloud-go/issues/3185)) ([d3a8571](https://www.github.com/googleapis/google-cloud-go/commit/d3a85719be411b692aede3331abb29b5a7b3da9a)) - - -## [0.71.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.70.0...v0.71.0) (2020-10-30) - - -### Features - -* **all:** auto-regenerate gapics , refs [#3115](https://www.github.com/googleapis/google-cloud-go/issues/3115) [#3106](https://www.github.com/googleapis/google-cloud-go/issues/3106) [#3102](https://www.github.com/googleapis/google-cloud-go/issues/3102) [#3083](https://www.github.com/googleapis/google-cloud-go/issues/3083) [#3073](https://www.github.com/googleapis/google-cloud-go/issues/3073) [#3057](https://www.github.com/googleapis/google-cloud-go/issues/3057) [#3044](https://www.github.com/googleapis/google-cloud-go/issues/3044) -* **billing/budgets:** start generating apiv1 ([#3099](https://www.github.com/googleapis/google-cloud-go/issues/3099)) ([e760c85](https://www.github.com/googleapis/google-cloud-go/commit/e760c859de88a6e79b6dffc653dbf75f1630d8e3)) -* **internal:** auto-run godocfx on new mods ([#3069](https://www.github.com/googleapis/google-cloud-go/issues/3069)) ([49f497e](https://www.github.com/googleapis/google-cloud-go/commit/49f497eab80ce34dfb4ca41f033a5c0429ff5e42)) -* **pubsublite:** Added Pub/Sub Lite clients and routing headers ([#3105](https://www.github.com/googleapis/google-cloud-go/issues/3105)) ([98668fa](https://www.github.com/googleapis/google-cloud-go/commit/98668fa5457d26ed34debee708614f027020e5bc)) -* **pubsublite:** Message type and message routers ([#3077](https://www.github.com/googleapis/google-cloud-go/issues/3077)) ([179fc55](https://www.github.com/googleapis/google-cloud-go/commit/179fc550b545a5344358a243da7007ffaa7b5171)) -* **pubsublite:** Pub/Sub Lite admin client ([#3036](https://www.github.com/googleapis/google-cloud-go/issues/3036)) ([749473e](https://www.github.com/googleapis/google-cloud-go/commit/749473ead30bf1872634821d3238d1299b99acc6)) -* **pubsublite:** Publish settings and errors ([#3075](https://www.github.com/googleapis/google-cloud-go/issues/3075)) ([9eb9fcb](https://www.github.com/googleapis/google-cloud-go/commit/9eb9fcb79f17ad7c08c77c455ba3e8d89e3bdbf2)) -* **pubsublite:** Retryable stream wrapper ([#3068](https://www.github.com/googleapis/google-cloud-go/issues/3068)) ([97cfd45](https://www.github.com/googleapis/google-cloud-go/commit/97cfd4587f2f51996bd685ff486308b70eb51900)) - - -### Bug Fixes - -* **internal/kokoro:** remove unnecessary cd ([#3071](https://www.github.com/googleapis/google-cloud-go/issues/3071)) ([c1a4c3e](https://www.github.com/googleapis/google-cloud-go/commit/c1a4c3eaffcdc3cffe0e223fcfa1f60879cd23bb)) -* **pubsublite:** Disable integration tests for project id ([#3087](https://www.github.com/googleapis/google-cloud-go/issues/3087)) ([a0982f7](https://www.github.com/googleapis/google-cloud-go/commit/a0982f79d6461feabdf31363f29fed7dc5677fe7)) - -## [0.70.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.69.0...v0.70.0) (2020-10-19) - - -### Features - -* **all:** auto-regenerate gapics , refs [#3047](https://www.github.com/googleapis/google-cloud-go/issues/3047) [#3035](https://www.github.com/googleapis/google-cloud-go/issues/3035) [#3025](https://www.github.com/googleapis/google-cloud-go/issues/3025) -* **managedidentities:** start generating apiv1 ([#3032](https://www.github.com/googleapis/google-cloud-go/issues/3032)) ([10ccca2](https://www.github.com/googleapis/google-cloud-go/commit/10ccca238074d24fea580a4cd8e64478818b0b44)) -* **pubsublite:** Types for resource paths and topic/subscription configs ([#3026](https://www.github.com/googleapis/google-cloud-go/issues/3026)) ([6f7fa86](https://www.github.com/googleapis/google-cloud-go/commit/6f7fa86ed906258f98d996aab40184f3a46f9714)) - -## [0.69.1](https://www.github.com/googleapis/google-cloud-go/compare/v0.69.0...v0.69.1) (2020-10-14) - -This is an empty release that was created solely to aid in pubsublite's module -carve out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. - -## [0.69.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.68.0...v0.69.0) (2020-10-14) - - -### Features - -* **accessapproval:** start generating apiv1 ([#3002](https://www.github.com/googleapis/google-cloud-go/issues/3002)) ([709d6e7](https://www.github.com/googleapis/google-cloud-go/commit/709d6e76393e6ac00ff488efd83bfe873173b045)) -* **all:** auto-regenerate gapics , refs [#3010](https://www.github.com/googleapis/google-cloud-go/issues/3010) [#3005](https://www.github.com/googleapis/google-cloud-go/issues/3005) [#2993](https://www.github.com/googleapis/google-cloud-go/issues/2993) [#2989](https://www.github.com/googleapis/google-cloud-go/issues/2989) [#2981](https://www.github.com/googleapis/google-cloud-go/issues/2981) [#2976](https://www.github.com/googleapis/google-cloud-go/issues/2976) [#2968](https://www.github.com/googleapis/google-cloud-go/issues/2968) [#2958](https://www.github.com/googleapis/google-cloud-go/issues/2958) -* **cmd/go-cloud-debug-agent:** mark as deprecated ([#2964](https://www.github.com/googleapis/google-cloud-go/issues/2964)) ([276ec88](https://www.github.com/googleapis/google-cloud-go/commit/276ec88b05852c33a3ba437e18d072f7ffd8fd33)) -* **godocfx:** add nesting to TOC ([#2972](https://www.github.com/googleapis/google-cloud-go/issues/2972)) ([3a49b2d](https://www.github.com/googleapis/google-cloud-go/commit/3a49b2d142a353f98429235c3f380431430b4dbf)) -* **internal/godocfx:** HTML-ify package summary ([#2986](https://www.github.com/googleapis/google-cloud-go/issues/2986)) ([9e64b01](https://www.github.com/googleapis/google-cloud-go/commit/9e64b018255bd8d9b31d60e8f396966251de946b)) -* **internal/kokoro:** make publish_docs VERSION optional ([#2979](https://www.github.com/googleapis/google-cloud-go/issues/2979)) ([76e35f6](https://www.github.com/googleapis/google-cloud-go/commit/76e35f689cb60bd5db8e14b8c8d367c5902bcb0e)) -* **websecurityscanner:** start generating apiv1 ([#3006](https://www.github.com/googleapis/google-cloud-go/issues/3006)) ([1d92e20](https://www.github.com/googleapis/google-cloud-go/commit/1d92e2062a13f62d7a96be53a7354c0cacca6a85)) - - -### Bug Fixes - -* **godocfx:** make extra files optional, filter out third_party ([#2985](https://www.github.com/googleapis/google-cloud-go/issues/2985)) ([f268921](https://www.github.com/googleapis/google-cloud-go/commit/f2689214a24b2e325d3e8f54441bb11fbef925f0)) - -## [0.68.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.67.0...v0.68.0) (2020-10-02) - - -### Features - -* **all:** auto-regenerate gapics , refs [#2952](https://www.github.com/googleapis/google-cloud-go/issues/2952) [#2944](https://www.github.com/googleapis/google-cloud-go/issues/2944) [#2935](https://www.github.com/googleapis/google-cloud-go/issues/2935) - -## [0.67.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.66.0...v0.67.0) (2020-09-29) - - -### Features - -* **all:** auto-regenerate gapics , refs [#2933](https://www.github.com/googleapis/google-cloud-go/issues/2933) [#2919](https://www.github.com/googleapis/google-cloud-go/issues/2919) [#2913](https://www.github.com/googleapis/google-cloud-go/issues/2913) [#2910](https://www.github.com/googleapis/google-cloud-go/issues/2910) [#2899](https://www.github.com/googleapis/google-cloud-go/issues/2899) [#2897](https://www.github.com/googleapis/google-cloud-go/issues/2897) [#2886](https://www.github.com/googleapis/google-cloud-go/issues/2886) [#2877](https://www.github.com/googleapis/google-cloud-go/issues/2877) [#2869](https://www.github.com/googleapis/google-cloud-go/issues/2869) [#2864](https://www.github.com/googleapis/google-cloud-go/issues/2864) -* **assuredworkloads:** start generating apiv1beta1 ([#2866](https://www.github.com/googleapis/google-cloud-go/issues/2866)) ([7598c4d](https://www.github.com/googleapis/google-cloud-go/commit/7598c4dd2462e8270a2c7b1f496af58ca81ff568)) -* **dialogflow/cx:** start generating apiv3beta1 ([#2875](https://www.github.com/googleapis/google-cloud-go/issues/2875)) ([37ca93a](https://www.github.com/googleapis/google-cloud-go/commit/37ca93ad69eda363d956f0174d444ed5914f5a72)) -* **docfx:** add support for examples ([#2884](https://www.github.com/googleapis/google-cloud-go/issues/2884)) ([0cc0de3](https://www.github.com/googleapis/google-cloud-go/commit/0cc0de300d58be6d3b7eeb2f1baebfa6df076830)) -* **godocfx:** include README in output ([#2927](https://www.github.com/googleapis/google-cloud-go/issues/2927)) ([f084690](https://www.github.com/googleapis/google-cloud-go/commit/f084690a2ea08ce73bafaaced95ad271fd01e11e)) -* **talent:** start generating apiv4 ([#2871](https://www.github.com/googleapis/google-cloud-go/issues/2871)) ([5c98071](https://www.github.com/googleapis/google-cloud-go/commit/5c98071b03822c58862d1fa5442ff36d627f1a61)) - - -### Bug Fixes - -* **godocfx:** filter out other modules, sort pkgs ([#2894](https://www.github.com/googleapis/google-cloud-go/issues/2894)) ([868db45](https://www.github.com/googleapis/google-cloud-go/commit/868db45e2e6f4e9ad48432be86c849f335e1083d)) -* **godocfx:** shorten function names ([#2880](https://www.github.com/googleapis/google-cloud-go/issues/2880)) ([48a0217](https://www.github.com/googleapis/google-cloud-go/commit/48a0217930750c1f4327f2622b0f2a3ec8afc0b7)) -* **translate:** properly name examples ([#2892](https://www.github.com/googleapis/google-cloud-go/issues/2892)) ([c19e141](https://www.github.com/googleapis/google-cloud-go/commit/c19e1415e6fa76b7ea66a7fc67ad3ba22670a2ba)), refs [#2883](https://www.github.com/googleapis/google-cloud-go/issues/2883) - -## [0.66.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.65.0...v0.66.0) (2020-09-15) - - -### Features - -* **all:** auto-regenerate gapics , refs [#2849](https://www.github.com/googleapis/google-cloud-go/issues/2849) [#2843](https://www.github.com/googleapis/google-cloud-go/issues/2843) [#2841](https://www.github.com/googleapis/google-cloud-go/issues/2841) [#2819](https://www.github.com/googleapis/google-cloud-go/issues/2819) [#2816](https://www.github.com/googleapis/google-cloud-go/issues/2816) [#2809](https://www.github.com/googleapis/google-cloud-go/issues/2809) [#2801](https://www.github.com/googleapis/google-cloud-go/issues/2801) [#2795](https://www.github.com/googleapis/google-cloud-go/issues/2795) [#2791](https://www.github.com/googleapis/google-cloud-go/issues/2791) [#2788](https://www.github.com/googleapis/google-cloud-go/issues/2788) [#2781](https://www.github.com/googleapis/google-cloud-go/issues/2781) -* **analytics/data:** start generating apiv1alpha ([#2796](https://www.github.com/googleapis/google-cloud-go/issues/2796)) ([e93132c](https://www.github.com/googleapis/google-cloud-go/commit/e93132c77725de3c80c34d566df269eabfcfde93)) -* **area120/tables:** start generating apiv1alpha1 ([#2807](https://www.github.com/googleapis/google-cloud-go/issues/2807)) ([9e5a4d0](https://www.github.com/googleapis/google-cloud-go/commit/9e5a4d0dee0d83be0c020797a2f579d9e42ef521)) -* **cloudbuild:** Start generating apiv1/v3 ([#2830](https://www.github.com/googleapis/google-cloud-go/issues/2830)) ([358a536](https://www.github.com/googleapis/google-cloud-go/commit/358a5368da64cf4868551652e852ceb453504f64)) -* **godocfx:** create Go DocFX YAML generator ([#2854](https://www.github.com/googleapis/google-cloud-go/issues/2854)) ([37c70ac](https://www.github.com/googleapis/google-cloud-go/commit/37c70acd91768567106ff3b2b130835998d974c5)) -* **security/privateca:** start generating apiv1beta1 ([#2806](https://www.github.com/googleapis/google-cloud-go/issues/2806)) ([f985141](https://www.github.com/googleapis/google-cloud-go/commit/f9851412183989dc69733a7e61ad39a9378cd893)) -* **video/transcoder:** start generating apiv1beta1 ([#2797](https://www.github.com/googleapis/google-cloud-go/issues/2797)) ([390dda8](https://www.github.com/googleapis/google-cloud-go/commit/390dda8ff2c526e325e434ad0aec778b7aa97ea4)) -* **workflows:** start generating apiv1beta ([#2799](https://www.github.com/googleapis/google-cloud-go/issues/2799)) ([0e39665](https://www.github.com/googleapis/google-cloud-go/commit/0e39665ccb788caec800e2887d433ca6e0cf9901)) -* **workflows/executions:** start generating apiv1beta ([#2800](https://www.github.com/googleapis/google-cloud-go/issues/2800)) ([7eaa0d1](https://www.github.com/googleapis/google-cloud-go/commit/7eaa0d184c6a2141d8bf4514b3fd20715b50a580)) - - -### Bug Fixes - -* **internal/kokoro:** install the right version of docuploader ([#2861](https://www.github.com/googleapis/google-cloud-go/issues/2861)) ([d8489c1](https://www.github.com/googleapis/google-cloud-go/commit/d8489c141b8b02e83d6426f4baebd3658ae11639)) -* **internal/kokoro:** remove extra dash in doc tarball ([#2862](https://www.github.com/googleapis/google-cloud-go/issues/2862)) ([690ddcc](https://www.github.com/googleapis/google-cloud-go/commit/690ddccc5202b5a70f1afa5c518dca37b6a0861c)) -* **profiler:** do not collect disabled profile types ([#2836](https://www.github.com/googleapis/google-cloud-go/issues/2836)) ([faeb498](https://www.github.com/googleapis/google-cloud-go/commit/faeb4985bf6afdcddba4553efa874642bf7f08ed)), refs [#2835](https://www.github.com/googleapis/google-cloud-go/issues/2835) - - -### Reverts - -* **cloudbuild): "feat(cloudbuild:** Start generating apiv1/v3" ([#2840](https://www.github.com/googleapis/google-cloud-go/issues/2840)) ([3aaf755](https://www.github.com/googleapis/google-cloud-go/commit/3aaf755476dfea1700986fc086f53fc1ab756557)) - -## [0.65.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.64.0...v0.65.0) (2020-08-27) - - -### Announcements - -The following changes will be included in an upcoming release and are not -included in this one. - -#### Default Deadlines - -By default, non-streaming methods, like Create or Get methods, will have a -default deadline applied to the context provided at call time, unless a context -deadline is already set. Streaming methods have no default deadline and will run -indefinitely, unless the context provided at call time contains a deadline. - -To opt-out of this behavior, set the environment variable -`GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE` to `true` prior to -initializing a client. This opt-out mechanism will be removed in a later -release, with a notice similar to this one ahead of its removal. - - -### Features - -* **all:** auto-regenerate gapics , refs [#2774](https://www.github.com/googleapis/google-cloud-go/issues/2774) [#2764](https://www.github.com/googleapis/google-cloud-go/issues/2764) - - -### Bug Fixes - -* **all:** correct minor typos ([#2756](https://www.github.com/googleapis/google-cloud-go/issues/2756)) ([03d78b5](https://www.github.com/googleapis/google-cloud-go/commit/03d78b5627819cb64d1f3866f90043f709e825e1)) -* **compute/metadata:** remove leading slash for Get suffix ([#2760](https://www.github.com/googleapis/google-cloud-go/issues/2760)) ([f0d605c](https://www.github.com/googleapis/google-cloud-go/commit/f0d605ccf32391a9da056a2c551158bd076c128d)) - -## [0.64.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.63.0...v0.64.0) (2020-08-18) - - -### Features - -* **all:** auto-regenerate gapics , refs [#2734](https://www.github.com/googleapis/google-cloud-go/issues/2734) [#2731](https://www.github.com/googleapis/google-cloud-go/issues/2731) [#2730](https://www.github.com/googleapis/google-cloud-go/issues/2730) [#2725](https://www.github.com/googleapis/google-cloud-go/issues/2725) [#2722](https://www.github.com/googleapis/google-cloud-go/issues/2722) [#2706](https://www.github.com/googleapis/google-cloud-go/issues/2706) -* **pubsublite:** start generating v1 ([#2700](https://www.github.com/googleapis/google-cloud-go/issues/2700)) ([d2e777f](https://www.github.com/googleapis/google-cloud-go/commit/d2e777f56e08146646b3ffb7a78856795094ab4e)) - -## [0.63.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.62.0...v0.63.0) (2020-08-05) - - -### Features - -* **all:** auto-regenerate gapics ([#2682](https://www.github.com/googleapis/google-cloud-go/issues/2682)) ([63bfd63](https://www.github.com/googleapis/google-cloud-go/commit/63bfd638da169e0f1f4fa4a5125da2955022dc04)) -* **analytics/admin:** start generating apiv1alpha ([#2670](https://www.github.com/googleapis/google-cloud-go/issues/2670)) ([268199e](https://www.github.com/googleapis/google-cloud-go/commit/268199e5350a64a83ecf198e0e0fa4863f00fa6c)) -* **functions/metadata:** Special-case marshaling ([#2669](https://www.github.com/googleapis/google-cloud-go/issues/2669)) ([d8d7fc6](https://www.github.com/googleapis/google-cloud-go/commit/d8d7fc66cbc42f79bec25fb0daaf53d926e3645b)) -* **gaming:** start generate apiv1 ([#2681](https://www.github.com/googleapis/google-cloud-go/issues/2681)) ([1adfd0a](https://www.github.com/googleapis/google-cloud-go/commit/1adfd0aed6b2c0e1dd0c575a5ec0f49388fa5601)) -* **internal/kokoro:** add script to test compatibility with samples ([#2637](https://www.github.com/googleapis/google-cloud-go/issues/2637)) ([f2aa76a](https://www.github.com/googleapis/google-cloud-go/commit/f2aa76a0058e86c1c33bb634d2c084b58f77ab32)) - -## v0.62.0 - -### Announcements - -- There was a breaking change to `cloud.google.com/go/dataproc/apiv1` that was - merged in [this PR](https://github.com/googleapis/google-cloud-go/pull/2606). - This fixed a broken API response for `DiagnoseCluster`. When polling on the - Long Running Operation(LRO), the API now returns - `(*dataprocpb.DiagnoseClusterResults, error)` whereas it only returned an - `error` before. - -### Changes - -- all: - - Updated all direct dependencies. - - Updated contributing guidelines to suggest allowing edits from maintainers. -- billing/budgets: - - Start generating client for apiv1beta1. -- functions: - - Start generating client for apiv1. -- notebooks: - - Start generating client apiv1beta1. -- profiler: - - update proftest to support parsing floating-point backoff durations. - - Fix the regexp used to parse backoff duration. -- Various updates to autogenerated clients. - -## v0.61.0 - -### Changes - -- all: - - Update all direct dependencies. -- dashboard: - - Start generating client for apiv1. -- policytroubleshooter: - - Start generating client for apiv1. -- profiler: - - Disable OpenCensus Telemetry for requests made by the profiler package by default. You can re-enable it using `profiler.Config.EnableOCTelemetry`. -- Various updates to autogenerated clients. - -## v0.60.0 - -### Changes - -- all: - - Refactored examples to reduce module dependencies. - - Update sub-modules to use cloud.google.com/go v0.59.0. -- internal: - - Start generating client for gaming apiv1beta. -- Various updates to autogenerated clients. - -## v0.59.0 - -### Announcements - -goolgeapis/google-cloud-go has moved its source of truth to GitHub and is no longer a mirror. This means that our -contributing process has changed a bit. We will now be conducting all code reviews on GitHub which means we now accept -pull requests! If you have a version of the codebase previously checked out you may wish to update your git remote to -point to GitHub. - -### Changes - -- all: - - Remove dependency on honnef.co/go/tools. - - Update our contributing instructions now that we use GitHub for reviews. - - Remove some un-inclusive terminology. -- compute/metadata: - - Pass cancelable context to DNS lookup. -- .github: - - Update templates issue/PR templates. -- internal: - - Bump several clients to GA. - - Fix GoDoc badge source. - - Several automation changes related to the move to GitHub. - - Start generating a client for asset v1p5beta1. -- Various updates to autogenerated clients. - -## v0.58.0 - -### Deprecation notice - -- `cloud.google.com/go/monitoring/apiv3` has been deprecated due to breaking - changes in the API. Please migrate to `cloud.google.com/go/monitoring/apiv3/v2`. - -### Changes - -- all: - - The remaining uses of gtransport.Dial have been removed. - - The `genproto` dependency has been updated to a version that makes use of - new `protoreflect` library. For more information on these protobuf changes - please see the following post from the official Go blog: - https://blog.golang.org/protobuf-apiv2. -- internal: - - Started generation of datastore admin v1 client. - - Updated protofuf version used for generation to 3.12.X. - - Update the release levels for several APIs. - - Generate clients with protoc-gen-go@v1.4.1. -- monitoring: - - Re-enable generation of monitoring/apiv3 under v2 directory (see deprecation - notice above). -- profiler: - - Fixed flakiness in tests. -- Various updates to autogenerated clients. - -## v0.57.0 - -- all: - - Update module dependency `google.golang.org/api` to `v0.21.0`. -- errorreporting: - - Add exported SetGoogleClientInfo wrappers to manual file. -- expr/v1alpha1: - - Deprecate client. This client will be removed in a future release. -- internal: - - Fix possible data race in TestTracer. - - Pin versions of tools used for generation. - - Correct the release levels for BigQuery APIs. - - Start generation osconfig v1. -- longrunning: - - Add exported SetGoogleClientInfo wrappers to manual file. -- monitoring: - - Stop generation of monitoring/apiv3 because of incoming breaking change. -- trace: - - Add exported SetGoogleClientInfo wrappers to manual file. -- Various updates to autogenerated clients. - -## v0.56.0 - -- secretmanager: - - add IAM helper -- profiler: - - try all us-west1 zones for integration tests -- internal: - - add config to generate webrisk v1 - - add repo and commit to buildcop invocation - - add recaptchaenterprise v1 generation config - - update microgenerator to v0.12.5 - - add datacatalog client - - start generating security center settings v1beta - - start generating osconfig agentendpoint v1 - - setup generation for bigquery/connection/v1beta1 -- all: - - increase continous testing timeout to 45m - - various updates to autogenerated clients. - -## v0.55.0 - -- Various updates to autogenerated clients. - -## v0.54.0 - -- all: - - remove unused golang.org/x/exp from mod file - - update godoc.org links to pkg.go.dev -- compute/metadata: - - use defaultClient when http.Client is nil - - remove subscribeClient -- iam: - - add support for v3 policy and IAM conditions -- Various updates to autogenerated clients. - -## v0.53.0 - -- all: most clients now use transport/grpc.DialPool rather than Dial (see #1777 for outliers). - - Connection pooling now does not use the deprecated (and soon to be removed) gRPC load balancer API. -- profiler: remove symbolization (drops support for go1.10) -- Various updates to autogenerated clients. - -## v0.52.0 - -- internal/gapicgen: multiple improvements related to library generation. -- compute/metadata: unset ResponseHeaderTimeout in defaultClient -- docs: fix link to KMS in README.md -- Various updates to autogenerated clients. - -## v0.51.0 - -- secretmanager: - - add IAM helper for generic resource IAM handle -- cloudbuild: - - migrate to microgen in a major version -- Various updates to autogenerated clients. - -## v0.50.0 - -- profiler: - - Support disabling CPU profile collection. - - Log when a profile creation attempt begins. -- compute/metadata: - - Fix panic on malformed URLs. - - InstanceName returns actual instance name. -- Various updates to autogenerated clients. - -## v0.49.0 - -- functions/metadata: - - Handle string resources in JSON unmarshaller. -- Various updates to autogenerated clients. - -## v0.48.0 - -- Various updates to autogenerated clients - -## v0.47.0 - -This release drops support for Go 1.9 and Go 1.10: we continue to officially -support Go 1.11, Go 1.12, and Go 1.13. - -- Various updates to autogenerated clients. -- Add cloudbuild/apiv1 client. - -## v0.46.3 - -This is an empty release that was created solely to aid in storage's module -carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. - -## v0.46.2 - -This is an empty release that was created solely to aid in spanner's module -carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. - -## v0.46.1 - -This is an empty release that was created solely to aid in firestore's module -carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. - -## v0.46.0 - -- spanner: - - Retry "Session not found" for read-only transactions. - - Retry aborted PDMLs. -- spanner/spannertest: - - Fix a bug that was causing 0X-prefixed number to be parsed incorrectly. -- storage: - - Add HMACKeyOptions. - - Remove *REGIONAL from StorageClass documentation. Using MULTI_REGIONAL, - DURABLE_REDUCED_AVAILABILITY, and REGIONAL are no longer best practice - StorageClasses but they are still acceptable values. -- trace: - - Remove cloud.google.com/go/trace. Package cloud.google.com/go/trace has been - marked OBSOLETE for several years: it is now no longer provided. If you - relied on this package, please vendor it or switch to using - https://cloud.google.com/trace/docs/setup/go (which obsoleted it). - -## v0.45.1 - -This is an empty release that was created solely to aid in pubsub's module -carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. - -## v0.45.0 - -- compute/metadata: - - Add Email method. -- storage: - - Fix duplicated retry logic. - - Add ReaderObjectAttrs.StartOffset. - - Support reading last N bytes of a file when a negative range is given, such - as `obj.NewRangeReader(ctx, -10, -1)`. - - Add HMACKey listing functionality. -- spanner/spannertest: - - Support primary keys with no columns. - - Fix MinInt64 parsing. - - Implement deletion of key ranges. - - Handle reads during a read-write transaction. - - Handle returning DATE values. -- pubsub: - - Fix Ack/Modack request size calculation. -- logging: - - Add auto-detection of monitored resources on GAE Standard. - -## v0.44.3 - -This is an empty release that was created solely to aid in bigtable's module -carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. - -## v0.44.2 - -This is an empty release that was created solely to aid in bigquery's module -carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. - -## v0.44.1 - -This is an empty release that was created solely to aid in datastore's module -carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. - -## v0.44.0 - -- datastore: - - Interface elements whose underlying types are supported, are now supported. - - Reduce time to initial retry from 1s to 100ms. -- firestore: - - Add Increment transformation. -- storage: - - Allow emulator with STORAGE_EMULATOR_HOST. - - Add methods for HMAC key management. -- pubsub: - - Add PublishCount and PublishLatency measurements. - - Add DefaultPublishViews and DefaultSubscribeViews for convenience of - importing all views. - - Add add Subscription.PushConfig.AuthenticationMethod. -- spanner: - - Allow emulator usage with SPANNER_EMULATOR_HOST. - - Add cloud.google.com/go/spanner/spannertest, a spanner emulator. - - Add cloud.google.com/go/spanner/spansql which contains types and a parser - for the Cloud Spanner SQL dialect. -- asset: - - Add apiv1p2beta1 client. - -## v0.43.0 - -This is an empty release that was created solely to aid in logging's module -carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. - -## v0.42.0 - -- bigtable: - - Add an admin method to update an instance and clusters. - - Fix bttest regex matching behavior for alternations (things like `|a`). - - Expose BlockAllFilter filter. -- bigquery: - - Add Routines API support. -- storage: - - Add read-only Bucket.LocationType. -- logging: - - Add TraceSampled to Entry. - - Fix to properly extract {Trace, Span}Id from X-Cloud-Trace-Context. -- pubsub: - - Add Cloud Key Management to TopicConfig. - - Change ExpirationPolicy to optional.Duration. -- automl: - - Add apiv1beta1 client. -- iam: - - Fix compilation problem with iam/credentials/apiv1. - -## v0.41.0 - -- bigtable: - - Check results from PredicateFilter in bttest, which fixes certain false matches. -- profiler: - - debugLog checks user defined logging options before logging. -- spanner: - - PartitionedUpdates respect query parameters. - - StartInstance allows specifying cloud API access scopes. -- bigquery: - - Use empty slice instead of nil for ValueSaver, fixing an issue with zero-length, repeated, nested fields causing panics. -- firestore: - - Return same number of snapshots as doc refs (in the form of duplicate records) during GetAll. -- replay: - - Change references to IPv4 addresses to localhost, making replay compatible with IPv6. - -## v0.40.0 - -- all: - - Update to protobuf-golang v1.3.1. -- datastore: - - Attempt to decode GAE-encoded keys if initial decoding attempt fails. - - Support integer time conversion. -- pubsub: - - Add PublishSettings.BundlerByteLimit. If users receive pubsub.ErrOverflow, - this value should be adjusted higher. - - Use IPv6 compatible target in testutil. -- bigtable: - - Fix Latin-1 regexp filters in bttest, allowing \C. - - Expose PassAllFilter. -- profiler: - - Add log messages for slow path in start. - - Fix start to allow retry until success. -- firestore: - - Add admin client. -- containeranalysis: - - Add apiv1 client. -- grafeas: - - Add apiv1 client. - -## 0.39.0 - -- bigtable: - - Implement DeleteInstance in bttest. - - Return an error on invalid ReadRowsRequest.RowRange key ranges in bttest. -- bigquery: - - Move RequirePartitionFilter outside of TimePartioning. - - Expose models API. -- firestore: - - Allow array values in create and update calls. - - Add CollectionGroup method. -- pubsub: - - Add ExpirationPolicy to Subscription. -- storage: - - Add V4 signing. -- rpcreplay: - - Match streams by first sent request. This further improves rpcreplay's - ability to distinguish streams. -- httpreplay: - - Set up Man-In-The-Middle config only once. This should improve proxy - creation when multiple proxies are used in a single process. - - Remove error on empty Content-Type, allowing requests with no Content-Type - header but a non-empty body. -- all: - - Fix an edge case bug in auto-generated library pagination by properly - propagating pagetoken. - -## 0.38.0 - -This update includes a substantial reduction in our transitive dependency list -by way of updating to opencensus@v0.21.0. - -- spanner: - - Error implements GRPCStatus, allowing status.Convert. -- bigtable: - - Fix a bug in bttest that prevents single column queries returning results - that match other filters. - - Remove verbose retry logging. -- logging: - - Ensure RequestUrl has proper UTF-8, removing the need for users to wrap and - rune replace manually. -- recaptchaenterprise: - - Add v1beta1 client. -- phishingprotection: - - Add v1beta1 client. - -## 0.37.4 - -This patch releases re-builds the go.sum. This was not possible in the -previous release. - -- firestore: - - Add sentinel value DetectProjectID for auto-detecting project ID. - - Add OpenCensus tracing for public methods. - - Marked stable. All future changes come with a backwards compatibility - guarantee. - - Removed firestore/apiv1beta1. All users relying on this low-level library - should migrate to firestore/apiv1. Note that most users should use the - high-level firestore package instead. -- pubsub: - - Allow large messages in synchronous pull case. - - Cap bundler byte limit. This should prevent OOM conditions when there are - a very large number of message publishes occurring. -- storage: - - Add ETag to BucketAttrs and ObjectAttrs. -- datastore: - - Removed some non-sensical OpenCensus traces. -- webrisk: - - Add v1 client. -- asset: - - Add v1 client. -- cloudtasks: - - Add v2 client. - -## 0.37.3 - -This patch release removes github.com/golang/lint from the transitive -dependency list, resolving `go get -u` problems. - -Note: this release intentionally has a broken go.sum. Please use v0.37.4. - -## 0.37.2 - -This patch release is mostly intended to bring in v0.3.0 of -google.golang.org/api, which fixes a GCF deployment issue. - -Note: we had to-date accidentally marked Redis as stable. In this release, we've -fixed it by downgrading its documentation to alpha, as it is in other languages -and docs. - -- all: - - Document context in generated libraries. - -## 0.37.1 - -Small go.mod version bumps to bring in v0.2.0 of google.golang.org/api, which -introduces a new oauth2 url. - -## 0.37.0 - -- spanner: - - Add BatchDML method. - - Reduced initial time between retries. -- bigquery: - - Produce better error messages for InferSchema. - - Add logical type control for avro loads. - - Add support for the GEOGRAPHY type. -- datastore: - - Add sentinel value DetectProjectID for auto-detecting project ID. - - Allow flatten tag on struct pointers. - - Fixed a bug that caused queries to panic with invalid queries. Instead they - will now return an error. -- profiler: - - Add ability to override GCE zone and instance. -- pubsub: - - BEHAVIOR CHANGE: Refactor error code retry logic. RPCs should now more - consistently retry specific error codes based on whether they're idempotent - or non-idempotent. -- httpreplay: Fixed a bug when a non-GET request had a zero-length body causing - the Content-Length header to be dropped. -- iot: - - Add new apiv1 client. -- securitycenter: - - Add new apiv1 client. -- cloudscheduler: - - Add new apiv1 client. - -## 0.36.0 - -- spanner: - - Reduce minimum retry backoff from 1s to 100ms. This makes time between - retries much faster and should improve latency. -- storage: - - Add support for Bucket Policy Only. -- kms: - - Add ResourceIAM helper method. - - Deprecate KeyRingIAM and CryptoKeyIAM. Please use ResourceIAM. -- firestore: - - Switch from v1beta1 API to v1 API. - - Allow emulator with FIRESTORE_EMULATOR_HOST. -- bigquery: - - Add NumLongTermBytes to Table. - - Add TotalBytesProcessedAccuracy to QueryStatistics. -- irm: - - Add new v1alpha2 client. -- talent: - - Add new v4beta1 client. -- rpcreplay: - - Fix connection to work with grpc >= 1.17. - - It is now required for an actual gRPC server to be running for Dial to - succeed. - -## 0.35.1 - -- spanner: - - Adds OpenCensus views back to public API. - -## v0.35.0 - -- all: - - Add go.mod and go.sum. - - Switch usage of gax-go to gax-go/v2. -- bigquery: - - Fix bug where time partitioning could not be removed from a table. - - Fix panic that occurred with empty query parameters. -- bttest: - - Fix bug where deleted rows were returned by ReadRows. -- bigtable/emulator: - - Configure max message size to 256 MiB. -- firestore: - - Allow non-transactional queries in transactions. - - Allow StartAt/EndBefore on direct children at any depth. - - QuerySnapshotIterator.Stop may be called in an error state. - - Fix bug the prevented reset of transaction write state in between retries. -- functions/metadata: - - Make Metadata.Resource a pointer. -- logging: - - Make SpanID available in logging.Entry. -- metadata: - - Wrap !200 error code in a typed err. -- profiler: - - Add function to check if function name is within a particular file in the - profile. - - Set parent field in create profile request. - - Return kubernetes client to start cluster, so client can be used to poll - cluster. - - Add function for checking if filename is in profile. -- pubsub: - - Fix bug where messages expired without an initial modack in - synchronous=true mode. - - Receive does not retry ResourceExhausted errors. -- spanner: - - client.Close now cancels existing requests and should be much faster for - large amounts of sessions. - - Correctly allow MinOpened sessions to be spun up. - -## v0.34.0 - -- functions/metadata: - - Switch to using JSON in context. - - Make Resource a value. -- vision: Fix ProductSearch return type. -- datastore: Add an example for how to handle MultiError. - -## v0.33.1 - -- compute: Removes an erroneously added go.mod. -- logging: Populate source location in fromLogEntry. - -## v0.33.0 - -- bttest: - - Add support for apply_label_transformer. -- expr: - - Add expr library. -- firestore: - - Support retrieval of missing documents. -- kms: - - Add IAM methods. -- pubsub: - - Clarify extension documentation. -- scheduler: - - Add v1beta1 client. -- vision: - - Add product search helper. - - Add new product search client. - -## v0.32.0 - -Note: This release is the last to support Go 1.6 and 1.8. - -- bigquery: - - Add support for removing an expiration. - - Ignore NeverExpire in Table.Create. - - Validate table expiration time. -- cbt: - - Add note about not supporting arbitrary bytes. -- datastore: - - Align key checks. -- firestore: - - Return an error when using Start/End without providing values. -- pubsub: - - Add pstest Close method. - - Clarify MaxExtension documentation. -- securitycenter: - - Add v1beta1 client. -- spanner: - - Allow nil in mutations. - - Improve doc of SessionPoolConfig.MaxOpened. - - Increase session deletion timeout from 5s to 15s. - -## v0.31.0 - -- bigtable: - - Group mutations across multiple requests. -- bigquery: - - Link to bigquery troubleshooting errors page in bigquery.Error comment. -- cbt: - - Fix go generate command. - - Document usage of both maxage + maxversions. -- datastore: - - Passing nil keys results in ErrInvalidKey. -- firestore: - - Clarify what Document.DataTo does with untouched struct fields. -- profile: - - Validate service name in agent. -- pubsub: - - Fix deadlock with pstest and ctx.Cancel. - - Fix a possible deadlock in pstest. -- trace: - - Update doc URL with new fragment. - -Special thanks to @fastest963 for going above and beyond helping us to debug -hard-to-reproduce Pub/Sub issues. - -## v0.30.0 - -- spanner: DML support added. See https://godoc.org/cloud.google.com/go/spanner#hdr-DML_and_Partitioned_DML for more information. -- bigtable: bttest supports row sample filter. -- functions: metadata package added for accessing Cloud Functions resource metadata. - -## v0.29.0 - -- bigtable: - - Add retry to all idempotent RPCs. - - cbt supports complex GC policies. - - Emulator supports arbitrary bytes in regex filters. -- firestore: Add ArrayUnion and ArrayRemove. -- logging: Add the ContextFunc option to supply the context used for - asynchronous RPCs. -- profiler: Ignore NotDefinedError when fetching the instance name -- pubsub: - - BEHAVIOR CHANGE: Receive doesn't retry if an RPC returns codes.Cancelled. - - BEHAVIOR CHANGE: Receive retries on Unavailable intead of returning. - - Fix deadlock. - - Restore Ack/Nack/Modacks metrics. - - Improve context handling in iterator. - - Implement synchronous mode for Receive. - - pstest: add Pull. -- spanner: Add a metric for the number of sessions currently opened. -- storage: - - Canceling the context releases all resources. - - Add additional RetentionPolicy attributes. -- vision/apiv1: Add LocalizeObjects method. - -## v0.28.0 - -- bigtable: - - Emulator returns Unimplemented for snapshot RPCs. -- bigquery: - - Support zero-length repeated, nested fields. -- cloud assets: - - Add v1beta client. -- datastore: - - Don't nil out transaction ID on retry. -- firestore: - - BREAKING CHANGE: When watching a query with Query.Snapshots, QuerySnapshotIterator.Next - returns a QuerySnapshot which contains read time, result size, change list and the DocumentIterator - (previously, QuerySnapshotIterator.Next returned just the DocumentIterator). See: https://godoc.org/cloud.google.com/go/firestore#Query.Snapshots. - - Add array-contains operator. -- IAM: - - Add iam/credentials/apiv1 client. -- pubsub: - - Canceling the context passed to Subscription.Receive causes Receive to return when - processing finishes on all messages currently in progress, even if new messages are arriving. -- redis: - - Add redis/apiv1 client. -- storage: - - Add Reader.Attrs. - - Deprecate several Reader getter methods: please use Reader.Attrs for these instead. - - Add ObjectHandle.Bucket and ObjectHandle.Object methods. - -## v0.27.0 - -- bigquery: - - Allow modification of encryption configuration and partitioning options to a table via the Update call. - - Add a SchemaFromJSON function that converts a JSON table schema. -- bigtable: - - Restore cbt count functionality. -- containeranalysis: - - Add v1beta client. -- spanner: - - Fix a case where an iterator might not be closed correctly. -- storage: - - Add ServiceAccount method https://godoc.org/cloud.google.com/go/storage#Client.ServiceAccount. - - Add a method to Reader that returns the parsed value of the Last-Modified header. - -## v0.26.0 - -- bigquery: - - Support filtering listed jobs by min/max creation time. - - Support data clustering (https://godoc.org/cloud.google.com/go/bigquery#Clustering). - - Include job creator email in Job struct. -- bigtable: - - Add `RowSampleFilter`. - - emulator: BREAKING BEHAVIOR CHANGE: Regexps in row, family, column and value filters - must match the entire target string to succeed. Previously, the emulator was - succeeding on partial matches. - NOTE: As of this release, this change only affects the emulator when run - from this repo (bigtable/cmd/emulator/cbtemulator.go). The version launched - from `gcloud` will be updated in a subsequent `gcloud` release. -- dataproc: Add apiv1beta2 client. -- datastore: Save non-nil pointer fields on omitempty. -- logging: populate Entry.Trace from the HTTP X-Cloud-Trace-Context header. -- logging/logadmin: Support writer_identity and include_children. -- pubsub: - - Support labels on topics and subscriptions. - - Support message storage policy for topics. - - Use the distribution of ack times to determine when to extend ack deadlines. - The only user-visible effect of this change should be that programs that - call only `Subscription.Receive` need no IAM permissions other than `Pub/Sub - Subscriber`. -- storage: - - Support predefined ACLs. - - Support additional ACL fields other than Entity and Role. - - Support bucket websites. - - Support bucket logging. - - -## v0.25.0 - -- Added [Code of Conduct](https://github.com/googleapis/google-cloud-go/blob/master/CODE_OF_CONDUCT.md) -- bigtable: - - cbt: Support a GC policy of "never". -- errorreporting: - - Support User. - - Close now calls Flush. - - Use OnError (previously ignored). - - Pass through the RPC error as-is to OnError. -- httpreplay: A tool for recording and replaying HTTP requests - (for the bigquery and storage clients in this repo). -- kms: v1 client added -- logging: add SourceLocation to Entry. -- storage: improve CRC checking on read. - -## v0.24.0 - -- bigquery: Support for the NUMERIC type. -- bigtable: - - cbt: Optionally specify columns for read/lookup - - Support instance-level administration. -- oslogin: New client for the OS Login API. -- pubsub: - - The package is now stable. There will be no further breaking changes. - - Internal changes to improve Subscription.Receive behavior. -- storage: Support updating bucket lifecycle config. -- spanner: Support struct-typed parameter bindings. -- texttospeech: New client for the Text-to-Speech API. - -## v0.23.0 - -- bigquery: Add DDL stats to query statistics. -- bigtable: - - cbt: Add cells-per-column limit for row lookup. - - cbt: Make it possible to combine read filters. -- dlp: v2beta2 client removed. Use the v2 client instead. -- firestore, spanner: Fix compilation errors due to protobuf changes. - -## v0.22.0 - -- bigtable: - - cbt: Support cells per column limit for row read. - - bttest: Correctly handle empty RowSet. - - Fix ReadModifyWrite operation in emulator. - - Fix API path in GetCluster. - -- bigquery: - - BEHAVIOR CHANGE: Retry on 503 status code. - - Add dataset.DeleteWithContents. - - Add SchemaUpdateOptions for query jobs. - - Add Timeline to QueryStatistics. - - Add more stats to ExplainQueryStage. - - Support Parquet data format. - -- datastore: - - Support omitempty for times. - -- dlp: - - **BREAKING CHANGE:** Remove v1beta1 client. Please migrate to the v2 client, - which is now out of beta. - - Add v2 client. - -- firestore: - - BEHAVIOR CHANGE: Treat set({}, MergeAll) as valid. - -- iam: - - Support JWT signing via SignJwt callopt. - -- profiler: - - BEHAVIOR CHANGE: PollForSerialOutput returns an error when context.Done. - - BEHAVIOR CHANGE: Increase the initial backoff to 1 minute. - - Avoid returning empty serial port output. - -- pubsub: - - BEHAVIOR CHANGE: Don't backoff during next retryable error once stream is healthy. - - BEHAVIOR CHANGE: Don't backoff on EOF. - - pstest: Support Acknowledge and ModifyAckDeadline RPCs. - -- redis: - - Add v1 beta Redis client. - -- spanner: - - Support SessionLabels. - -- speech: - - Add api v1 beta1 client. - -- storage: - - BEHAVIOR CHANGE: Retry reads when retryable error occurs. - - Fix delete of object in requester-pays bucket. - - Support KMS integration. - -## v0.21.0 - -- bigquery: - - Add OpenCensus tracing. - -- firestore: - - **BREAKING CHANGE:** If a document does not exist, return a DocumentSnapshot - whose Exists method returns false. DocumentRef.Get and Transaction.Get - return the non-nil DocumentSnapshot in addition to a NotFound error. - **DocumentRef.GetAll and Transaction.GetAll return a non-nil - DocumentSnapshot instead of nil.** - - Add DocumentIterator.Stop. **Call Stop whenever you are done with a - DocumentIterator.** - - Added Query.Snapshots and DocumentRef.Snapshots, which provide realtime - notification of updates. See https://cloud.google.com/firestore/docs/query-data/listen. - - Canceling an RPC now always returns a grpc.Status with codes.Canceled. - -- spanner: - - Add `CommitTimestamp`, which supports inserting the commit timestamp of a - transaction into a column. - -## v0.20.0 - -- bigquery: Support SchemaUpdateOptions for load jobs. - -- bigtable: - - Add SampleRowKeys. - - cbt: Support union, intersection GCPolicy. - - Retry admin RPCS. - - Add trace spans to retries. - -- datastore: Add OpenCensus tracing. - -- firestore: - - Fix queries involving Null and NaN. - - Allow Timestamp protobuffers for time values. - -- logging: Add a WriteTimeout option. - -- spanner: Support Batch API. - -- storage: Add OpenCensus tracing. - -## v0.19.0 - -- bigquery: - - Support customer-managed encryption keys. - -- bigtable: - - Improved emulator support. - - Support GetCluster. - -- datastore: - - Add general mutations. - - Support pointer struct fields. - - Support transaction options. - -- firestore: - - Add Transaction.GetAll. - - Support document cursors. - -- logging: - - Support concurrent RPCs to the service. - - Support per-entry resources. - -- profiler: - - Add config options to disable heap and thread profiling. - - Read the project ID from $GOOGLE_CLOUD_PROJECT when it's set. - -- pubsub: - - BEHAVIOR CHANGE: Release flow control after ack/nack (instead of after the - callback returns). - - Add SubscriptionInProject. - - Add OpenCensus instrumentation for streaming pull. - -- storage: - - Support CORS. - -## v0.18.0 - -- bigquery: - - Marked stable. - - Schema inference of nullable fields supported. - - Added TimePartitioning to QueryConfig. - -- firestore: Data provided to DocumentRef.Set with a Merge option can contain - Delete sentinels. - -- logging: Clients can accept parent resources other than projects. - -- pubsub: - - pubsub/pstest: A lighweight fake for pubsub. Experimental; feedback welcome. - - Support updating more subscription metadata: AckDeadline, - RetainAckedMessages and RetentionDuration. - -- oslogin/apiv1beta: New client for the Cloud OS Login API. - -- rpcreplay: A package for recording and replaying gRPC traffic. - -- spanner: - - Add a ReadWithOptions that supports a row limit, as well as an index. - - Support query plan and execution statistics. - - Added [OpenCensus](http://opencensus.io) support. - -- storage: Clarify checksum validation for gzipped files (it is not validated - when the file is served uncompressed). - - -## v0.17.0 - -- firestore BREAKING CHANGES: - - Remove UpdateMap and UpdateStruct; rename UpdatePaths to Update. - Change - `docref.UpdateMap(ctx, map[string]interface{}{"a.b", 1})` - to - `docref.Update(ctx, []firestore.Update{{Path: "a.b", Value: 1}})` - - Change - `docref.UpdateStruct(ctx, []string{"Field"}, aStruct)` - to - `docref.Update(ctx, []firestore.Update{{Path: "Field", Value: aStruct.Field}})` - - Rename MergePaths to Merge; require args to be FieldPaths - - A value stored as an integer can be read into a floating-point field, and vice versa. -- bigtable/cmd/cbt: - - Support deleting a column. - - Add regex option for row read. -- spanner: Mark stable. -- storage: - - Add Reader.ContentEncoding method. - - Fix handling of SignedURL headers. -- bigquery: - - If Uploader.Put is called with no rows, it returns nil without making a - call. - - Schema inference supports the "nullable" option in struct tags for - non-required fields. - - TimePartitioning supports "Field". - - -## v0.16.0 - -- Other bigquery changes: - - `JobIterator.Next` returns `*Job`; removed `JobInfo` (BREAKING CHANGE). - - UseStandardSQL is deprecated; set UseLegacySQL to true if you need - Legacy SQL. - - Uploader.Put will generate a random insert ID if you do not provide one. - - Support time partitioning for load jobs. - - Support dry-run queries. - - A `Job` remembers its last retrieved status. - - Support retrieving job configuration. - - Support labels for jobs and tables. - - Support dataset access lists. - - Improve support for external data sources, including data from Bigtable and - Google Sheets, and tables with external data. - - Support updating a table's view configuration. - - Fix uploading civil times with nanoseconds. - -- storage: - - Support PubSub notifications. - - Support Requester Pays buckets. - -- profiler: Support goroutine and mutex profile types. - -## v0.15.0 - -- firestore: beta release. See the - [announcement](https://firebase.googleblog.com/2017/10/introducing-cloud-firestore.html). - -- errorreporting: The existing package has been redesigned. - -- errors: This package has been removed. Use errorreporting. - - -## v0.14.0 - -- bigquery BREAKING CHANGES: - - Standard SQL is the default for queries and views. - - `Table.Create` takes `TableMetadata` as a second argument, instead of - options. - - `Dataset.Create` takes `DatasetMetadata` as a second argument. - - `DatasetMetadata` field `ID` renamed to `FullID` - - `TableMetadata` field `ID` renamed to `FullID` - -- Other bigquery changes: - - The client will append a random suffix to a provided job ID if you set - `AddJobIDSuffix` to true in a job config. - - Listing jobs is supported. - - Better retry logic. - -- vision, language, speech: clients are now stable - -- monitoring: client is now beta - -- profiler: - - Rename InstanceName to Instance, ZoneName to Zone - - Auto-detect service name and version on AppEngine. - -## v0.13.0 - -- bigquery: UseLegacySQL options for CreateTable and QueryConfig. Use these - options to continue using Legacy SQL after the client switches its default - to Standard SQL. - -- bigquery: Support for updating dataset labels. - -- bigquery: Set DatasetIterator.ProjectID to list datasets in a project other - than the client's. DatasetsInProject is no longer needed and is deprecated. - -- bigtable: Fail ListInstances when any zones fail. - -- spanner: support decoding of slices of basic types (e.g. []string, []int64, - etc.) - -- logging/logadmin: UpdateSink no longer creates a sink if it is missing - (actually a change to the underlying service, not the client) - -- profiler: Service and ServiceVersion replace Target in Config. - -## v0.12.0 - -- pubsub: Subscription.Receive now uses streaming pull. - -- pubsub: add Client.TopicInProject to access topics in a different project - than the client. - -- errors: renamed errorreporting. The errors package will be removed shortly. - -- datastore: improved retry behavior. - -- bigquery: support updates to dataset metadata, with etags. - -- bigquery: add etag support to Table.Update (BREAKING: etag argument added). - -- bigquery: generate all job IDs on the client. - -- storage: support bucket lifecycle configurations. - - -## v0.11.0 - -- Clients for spanner, pubsub and video are now in beta. - -- New client for DLP. - -- spanner: performance and testing improvements. - -- storage: requester-pays buckets are supported. - -- storage, profiler, bigtable, bigquery: bug fixes and other minor improvements. - -- pubsub: bug fixes and other minor improvements - -## v0.10.0 - -- pubsub: Subscription.ModifyPushConfig replaced with Subscription.Update. - -- pubsub: Subscription.Receive now runs concurrently for higher throughput. - -- vision: cloud.google.com/go/vision is deprecated. Use -cloud.google.com/go/vision/apiv1 instead. - -- translation: now stable. - -- trace: several changes to the surface. See the link below. - -### Code changes required from v0.9.0 - -- pubsub: Replace - - ``` - sub.ModifyPushConfig(ctx, pubsub.PushConfig{Endpoint: "https://example.com/push"}) - ``` - - with - - ``` - sub.Update(ctx, pubsub.SubscriptionConfigToUpdate{ - PushConfig: &pubsub.PushConfig{Endpoint: "https://example.com/push"}, - }) - ``` - -- trace: traceGRPCServerInterceptor will be provided from *trace.Client. -Given an initialized `*trace.Client` named `tc`, instead of - - ``` - s := grpc.NewServer(grpc.UnaryInterceptor(trace.GRPCServerInterceptor(tc))) - ``` - - write - - ``` - s := grpc.NewServer(grpc.UnaryInterceptor(tc.GRPCServerInterceptor())) - ``` - -- trace trace.GRPCClientInterceptor will also provided from *trace.Client. -Instead of - - ``` - conn, err := grpc.Dial(srv.Addr, grpc.WithUnaryInterceptor(trace.GRPCClientInterceptor())) - ``` - - write - - ``` - conn, err := grpc.Dial(srv.Addr, grpc.WithUnaryInterceptor(tc.GRPCClientInterceptor())) - ``` - -- trace: We removed the deprecated `trace.EnableGRPCTracing`. Use the gRPC -interceptor as a dial option as shown below when initializing Cloud package -clients: - - ``` - c, err := pubsub.NewClient(ctx, "project-id", option.WithGRPCDialOption(grpc.WithUnaryInterceptor(tc.GRPCClientInterceptor()))) - if err != nil { - ... - } - ``` - - -## v0.9.0 - -- Breaking changes to some autogenerated clients. -- rpcreplay package added. - -## v0.8.0 - -- profiler package added. -- storage: - - Retry Objects.Insert call. - - Add ProgressFunc to WRiter. -- pubsub: breaking changes: - - Publish is now asynchronous ([announcement](https://groups.google.com/d/topic/google-api-go-announce/aaqRDIQ3rvU/discussion)). - - Subscription.Pull replaced by Subscription.Receive, which takes a callback ([announcement](https://groups.google.com/d/topic/google-api-go-announce/8pt6oetAdKc/discussion)). - - Message.Done replaced with Message.Ack and Message.Nack. - -## v0.7.0 - -- Release of a client library for Spanner. See -the -[blog -post](https://cloudplatform.googleblog.com/2017/02/introducing-Cloud-Spanner-a-global-database-service-for-mission-critical-applications.html). -Note that although the Spanner service is beta, the Go client library is alpha. - -## v0.6.0 - -- Beta release of BigQuery, DataStore, Logging and Storage. See the -[blog post](https://cloudplatform.googleblog.com/2016/12/announcing-new-google-cloud-client.html). - -- bigquery: - - struct support. Read a row directly into a struct with -`RowIterator.Next`, and upload a row directly from a struct with `Uploader.Put`. -You can also use field tags. See the [package documentation][cloud-bigquery-ref] -for details. - - - The `ValueList` type was removed. It is no longer necessary. Instead of - ```go - var v ValueList - ... it.Next(&v) .. - ``` - use - - ```go - var v []Value - ... it.Next(&v) ... - ``` - - - Previously, repeatedly calling `RowIterator.Next` on the same `[]Value` or - `ValueList` would append to the slice. Now each call resets the size to zero first. - - - Schema inference will infer the SQL type BYTES for a struct field of - type []byte. Previously it inferred STRING. - - - The types `uint`, `uint64` and `uintptr` are no longer supported in schema - inference. BigQuery's integer type is INT64, and those types may hold values - that are not correctly represented in a 64-bit signed integer. - -## v0.5.0 - -- bigquery: - - The SQL types DATE, TIME and DATETIME are now supported. They correspond to - the `Date`, `Time` and `DateTime` types in the new `cloud.google.com/go/civil` - package. - - Support for query parameters. - - Support deleting a dataset. - - Values from INTEGER columns will now be returned as int64, not int. This - will avoid errors arising from large values on 32-bit systems. -- datastore: - - Nested Go structs encoded as Entity values, instead of a -flattened list of the embedded struct's fields. This means that you may now have twice-nested slices, eg. - ```go - type State struct { - Cities []struct{ - Populations []int - } - } - ``` - See [the announcement](https://groups.google.com/forum/#!topic/google-api-go-announce/79jtrdeuJAg) for -more details. - - Contexts no longer hold namespaces; instead you must set a key's namespace - explicitly. Also, key functions have been changed and renamed. - - The WithNamespace function has been removed. To specify a namespace in a Query, use the Query.Namespace method: - ```go - q := datastore.NewQuery("Kind").Namespace("ns") - ``` - - All the fields of Key are exported. That means you can construct any Key with a struct literal: - ```go - k := &Key{Kind: "Kind", ID: 37, Namespace: "ns"} - ``` - - As a result of the above, the Key methods Kind, ID, d.Name, Parent, SetParent and Namespace have been removed. - - `NewIncompleteKey` has been removed, replaced by `IncompleteKey`. Replace - ```go - NewIncompleteKey(ctx, kind, parent) - ``` - with - ```go - IncompleteKey(kind, parent) - ``` - and if you do use namespaces, make sure you set the namespace on the returned key. - - `NewKey` has been removed, replaced by `NameKey` and `IDKey`. Replace - ```go - NewKey(ctx, kind, name, 0, parent) - NewKey(ctx, kind, "", id, parent) - ``` - with - ```go - NameKey(kind, name, parent) - IDKey(kind, id, parent) - ``` - and if you do use namespaces, make sure you set the namespace on the returned key. - - The `Done` variable has been removed. Replace `datastore.Done` with `iterator.Done`, from the package `google.golang.org/api/iterator`. - - The `Client.Close` method will have a return type of error. It will return the result of closing the underlying gRPC connection. - - See [the announcement](https://groups.google.com/forum/#!topic/google-api-go-announce/hqXtM_4Ix-0) for -more details. - -## v0.4.0 - -- bigquery: - -`NewGCSReference` is now a function, not a method on `Client`. - - `Table.LoaderFrom` now accepts a `ReaderSource`, enabling - loading data into a table from a file or any `io.Reader`. - * Client.Table and Client.OpenTable have been removed. - Replace - ```go - client.OpenTable("project", "dataset", "table") - ``` - with - ```go - client.DatasetInProject("project", "dataset").Table("table") - ``` - - * Client.CreateTable has been removed. - Replace - ```go - client.CreateTable(ctx, "project", "dataset", "table") - ``` - with - ```go - client.DatasetInProject("project", "dataset").Table("table").Create(ctx) - ``` - - * Dataset.ListTables have been replaced with Dataset.Tables. - Replace - ```go - tables, err := ds.ListTables(ctx) - ``` - with - ```go - it := ds.Tables(ctx) - for { - table, err := it.Next() - if err == iterator.Done { - break - } - if err != nil { - // TODO: Handle error. - } - // TODO: use table. - } - ``` - - * Client.Read has been replaced with Job.Read, Table.Read and Query.Read. - Replace - ```go - it, err := client.Read(ctx, job) - ``` - with - ```go - it, err := job.Read(ctx) - ``` - and similarly for reading from tables or queries. - - * The iterator returned from the Read methods is now named RowIterator. Its - behavior is closer to the other iterators in these libraries. It no longer - supports the Schema method; see the next item. - Replace - ```go - for it.Next(ctx) { - var vals ValueList - if err := it.Get(&vals); err != nil { - // TODO: Handle error. - } - // TODO: use vals. - } - if err := it.Err(); err != nil { - // TODO: Handle error. - } - ``` - with - ``` - for { - var vals ValueList - err := it.Next(&vals) - if err == iterator.Done { - break - } - if err != nil { - // TODO: Handle error. - } - // TODO: use vals. - } - ``` - Instead of the `RecordsPerRequest(n)` option, write - ```go - it.PageInfo().MaxSize = n - ``` - Instead of the `StartIndex(i)` option, write - ```go - it.StartIndex = i - ``` - - * ValueLoader.Load now takes a Schema in addition to a slice of Values. - Replace - ```go - func (vl *myValueLoader) Load(v []bigquery.Value) - ``` - with - ```go - func (vl *myValueLoader) Load(v []bigquery.Value, s bigquery.Schema) - ``` - - - * Table.Patch is replace by Table.Update. - Replace - ```go - p := table.Patch() - p.Description("new description") - metadata, err := p.Apply(ctx) - ``` - with - ```go - metadata, err := table.Update(ctx, bigquery.TableMetadataToUpdate{ - Description: "new description", - }) - ``` - - * Client.Copy is replaced by separate methods for each of its four functions. - All options have been replaced by struct fields. - - * To load data from Google Cloud Storage into a table, use Table.LoaderFrom. - - Replace - ```go - client.Copy(ctx, table, gcsRef) - ``` - with - ```go - table.LoaderFrom(gcsRef).Run(ctx) - ``` - Instead of passing options to Copy, set fields on the Loader: - ```go - loader := table.LoaderFrom(gcsRef) - loader.WriteDisposition = bigquery.WriteTruncate - ``` - - * To extract data from a table into Google Cloud Storage, use - Table.ExtractorTo. Set fields on the returned Extractor instead of - passing options. - - Replace - ```go - client.Copy(ctx, gcsRef, table) - ``` - with - ```go - table.ExtractorTo(gcsRef).Run(ctx) - ``` - - * To copy data into a table from one or more other tables, use - Table.CopierFrom. Set fields on the returned Copier instead of passing options. - - Replace - ```go - client.Copy(ctx, dstTable, srcTable) - ``` - with - ```go - dst.Table.CopierFrom(srcTable).Run(ctx) - ``` - - * To start a query job, create a Query and call its Run method. Set fields - on the query instead of passing options. - - Replace - ```go - client.Copy(ctx, table, query) - ``` - with - ```go - query.Run(ctx) - ``` - - * Table.NewUploader has been renamed to Table.Uploader. Instead of options, - configure an Uploader by setting its fields. - Replace - ```go - u := table.NewUploader(bigquery.UploadIgnoreUnknownValues()) - ``` - with - ```go - u := table.NewUploader(bigquery.UploadIgnoreUnknownValues()) - u.IgnoreUnknownValues = true - ``` - -- pubsub: remove `pubsub.Done`. Use `iterator.Done` instead, where `iterator` is the package -`google.golang.org/api/iterator`. - -## v0.3.0 - -- storage: - * AdminClient replaced by methods on Client. - Replace - ```go - adminClient.CreateBucket(ctx, bucketName, attrs) - ``` - with - ```go - client.Bucket(bucketName).Create(ctx, projectID, attrs) - ``` - - * BucketHandle.List replaced by BucketHandle.Objects. - Replace - ```go - for query != nil { - objs, err := bucket.List(d.ctx, query) - if err != nil { ... } - query = objs.Next - for _, obj := range objs.Results { - fmt.Println(obj) - } - } - ``` - with - ```go - iter := bucket.Objects(d.ctx, query) - for { - obj, err := iter.Next() - if err == iterator.Done { - break - } - if err != nil { ... } - fmt.Println(obj) - } - ``` - (The `iterator` package is at `google.golang.org/api/iterator`.) - - Replace `Query.Cursor` with `ObjectIterator.PageInfo().Token`. - - Replace `Query.MaxResults` with `ObjectIterator.PageInfo().MaxSize`. - - - * ObjectHandle.CopyTo replaced by ObjectHandle.CopierFrom. - Replace - ```go - attrs, err := src.CopyTo(ctx, dst, nil) - ``` - with - ```go - attrs, err := dst.CopierFrom(src).Run(ctx) - ``` - - Replace - ```go - attrs, err := src.CopyTo(ctx, dst, &storage.ObjectAttrs{ContextType: "text/html"}) - ``` - with - ```go - c := dst.CopierFrom(src) - c.ContextType = "text/html" - attrs, err := c.Run(ctx) - ``` - - * ObjectHandle.ComposeFrom replaced by ObjectHandle.ComposerFrom. - Replace - ```go - attrs, err := dst.ComposeFrom(ctx, []*storage.ObjectHandle{src1, src2}, nil) - ``` - with - ```go - attrs, err := dst.ComposerFrom(src1, src2).Run(ctx) - ``` - - * ObjectHandle.Update's ObjectAttrs argument replaced by ObjectAttrsToUpdate. - Replace - ```go - attrs, err := obj.Update(ctx, &storage.ObjectAttrs{ContextType: "text/html"}) - ``` - with - ```go - attrs, err := obj.Update(ctx, storage.ObjectAttrsToUpdate{ContextType: "text/html"}) - ``` - - * ObjectHandle.WithConditions replaced by ObjectHandle.If. - Replace - ```go - obj.WithConditions(storage.Generation(gen), storage.IfMetaGenerationMatch(mgen)) - ``` - with - ```go - obj.Generation(gen).If(storage.Conditions{MetagenerationMatch: mgen}) - ``` - - Replace - ```go - obj.WithConditions(storage.IfGenerationMatch(0)) - ``` - with - ```go - obj.If(storage.Conditions{DoesNotExist: true}) - ``` - - * `storage.Done` replaced by `iterator.Done` (from package `google.golang.org/api/iterator`). - -- Package preview/logging deleted. Use logging instead. - -## v0.2.0 - -- Logging client replaced with preview version (see below). - -- New clients for some of Google's Machine Learning APIs: Vision, Speech, and -Natural Language. - -- Preview version of a new [Stackdriver Logging][cloud-logging] client in -[`cloud.google.com/go/preview/logging`](https://godoc.org/cloud.google.com/go/preview/logging). -This client uses gRPC as its transport layer, and supports log reading, sinks -and metrics. It will replace the current client at `cloud.google.com/go/logging` shortly. diff --git a/vendor/cloud.google.com/go/CODE_OF_CONDUCT.md b/vendor/cloud.google.com/go/CODE_OF_CONDUCT.md deleted file mode 100644 index 8fd1bc9c2..000000000 --- a/vendor/cloud.google.com/go/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,44 +0,0 @@ -# Contributor Code of Conduct - -As contributors and maintainers of this project, -and in the interest of fostering an open and welcoming community, -we pledge to respect all people who contribute through reporting issues, -posting feature requests, updating documentation, -submitting pull requests or patches, and other activities. - -We are committed to making participation in this project -a harassment-free experience for everyone, -regardless of level of experience, gender, gender identity and expression, -sexual orientation, disability, personal appearance, -body size, race, ethnicity, age, religion, or nationality. - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery -* Personal attacks -* Trolling or insulting/derogatory comments -* Public or private harassment -* Publishing other's private information, -such as physical or electronic -addresses, without explicit permission -* Other unethical or unprofessional conduct. - -Project maintainers have the right and responsibility to remove, edit, or reject -comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct. -By adopting this Code of Conduct, -project maintainers commit themselves to fairly and consistently -applying these principles to every aspect of managing this project. -Project maintainers who do not follow or enforce the Code of Conduct -may be permanently removed from the project team. - -This code of conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. - -Instances of abusive, harassing, or otherwise unacceptable behavior -may be reported by opening an issue -or contacting one or more of the project maintainers. - -This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0, -available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/) - diff --git a/vendor/cloud.google.com/go/CONTRIBUTING.md b/vendor/cloud.google.com/go/CONTRIBUTING.md deleted file mode 100644 index 6d6e48b65..000000000 --- a/vendor/cloud.google.com/go/CONTRIBUTING.md +++ /dev/null @@ -1,327 +0,0 @@ -# Contributing - -1. [File an issue](https://github.com/googleapis/google-cloud-go/issues/new/choose). - The issue will be used to discuss the bug or feature and should be created - before sending a PR. - -1. [Install Go](https://golang.org/dl/). - 1. Ensure that your `GOBIN` directory (by default `$(go env GOPATH)/bin`) - is in your `PATH`. - 1. Check it's working by running `go version`. - * If it doesn't work, check the install location, usually - `/usr/local/go`, is on your `PATH`. - -1. Sign one of the -[contributor license agreements](#contributor-license-agreements) below. - -1. Clone the repo: - `git clone https://github.com/googleapis/google-cloud-go` - -1. Change into the checked out source: - `cd google-cloud-go` - -1. Fork the repo. - -1. Set your fork as a remote: - `git remote add fork git@github.com:GITHUB_USERNAME/google-cloud-go.git` - -1. Make changes, commit to your fork. - - Commit messages should follow the - [Conventional Commits Style](https://www.conventionalcommits.org). The scope - portion should always be filled with the name of the package affected by the - changes being made. For example: - ``` - feat(functions): add gophers codelab - ``` - -1. Send a pull request with your changes. - - To minimize friction, consider setting `Allow edits from maintainers` on the - PR, which will enable project committers and automation to update your PR. - -1. A maintainer will review the pull request and make comments. - - Prefer adding additional commits over amending and force-pushing since it can - be difficult to follow code reviews when the commit history changes. - - Commits will be squashed when they're merged. - -## Testing - -We test code against two versions of Go, the minimum and maximum versions -supported by our clients. To see which versions these are checkout our -[README](README.md#supported-versions). - -### Integration Tests - -In addition to the unit tests, you may run the integration test suite. These -directions describe setting up your environment to run integration tests for -_all_ packages: note that many of these instructions may be redundant if you -intend only to run integration tests on a single package. - -#### GCP Setup - -To run the integrations tests, creation and configuration of two projects in -the Google Developers Console is required: one specifically for Firestore -integration tests, and another for all other integration tests. We'll refer to -these projects as "general project" and "Firestore project". - -After creating each project, you must [create a service account](https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount) -for each project. Ensure the project-level **Owner** -[IAM role](https://console.cloud.google.com/iam-admin/iam/project) role is added to -each service account. During the creation of the service account, you should -download the JSON credential file for use later. - -Next, ensure the following APIs are enabled in the general project: - -- BigQuery API -- BigQuery Data Transfer API -- Cloud Dataproc API -- Cloud Dataproc Control API Private -- Cloud Datastore API -- Cloud Firestore API -- Cloud Key Management Service (KMS) API -- Cloud Natural Language API -- Cloud OS Login API -- Cloud Pub/Sub API -- Cloud Resource Manager API -- Cloud Spanner API -- Cloud Speech API -- Cloud Translation API -- Cloud Video Intelligence API -- Cloud Vision API -- Compute Engine API -- Compute Engine Instance Group Manager API -- Container Registry API -- Firebase Rules API -- Google Cloud APIs -- Google Cloud Deployment Manager V2 API -- Google Cloud SQL -- Google Cloud Storage -- Google Cloud Storage JSON API -- Google Compute Engine Instance Group Updater API -- Google Compute Engine Instance Groups API -- Kubernetes Engine API -- Cloud Error Reporting API -- Pub/Sub Lite API - -Next, create a Datastore database in the general project, and a Firestore -database in the Firestore project. - -Finally, in the general project, create an API key for the translate API: - -- Go to GCP Developer Console. -- Navigate to APIs & Services > Credentials. -- Click Create Credentials > API Key. -- Save this key for use in `GCLOUD_TESTS_API_KEY` as described below. - -#### Local Setup - -Once the two projects are created and configured, set the following environment -variables: - -- `GCLOUD_TESTS_GOLANG_PROJECT_ID`: Developers Console project's ID (e.g. -bamboo-shift-455) for the general project. -- `GCLOUD_TESTS_GOLANG_KEY`: The path to the JSON key file of the general -project's service account. -- `GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID`: Developers Console project's ID -(e.g. doorway-cliff-677) for the Firestore project. -- `GCLOUD_TESTS_GOLANG_FIRESTORE_KEY`: The path to the JSON key file of the -Firestore project's service account. -- `GCLOUD_TESTS_API_KEY`: API key for using the Translate API created above. - -As part of the setup that follows, the following variables will be configured: - -- `GCLOUD_TESTS_GOLANG_KEYRING`: The full name of the keyring for the tests, -in the form -"projects/P/locations/L/keyRings/R". The creation of this is described below. -- `GCLOUD_TESTS_BIGTABLE_KEYRING`: The full name of the keyring for the bigtable tests, -in the form -"projects/P/locations/L/keyRings/R". The creation of this is described below. Expected to be single region. -- `GCLOUD_TESTS_GOLANG_ZONE`: Compute Engine zone. - -Install the [gcloud command-line tool][gcloudcli] to your machine and use it to -create some resources used in integration tests. - -From the project's root directory: - -``` sh -# Sets the default project in your env. -$ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID - -# Authenticates the gcloud tool with your account. -$ gcloud auth login - -# Create the indexes used in the datastore integration tests. -$ gcloud datastore indexes create datastore/testdata/index.yaml - -# Creates a Google Cloud storage bucket with the same name as your test project, -# and with the Cloud Logging service account as owner, for the sink -# integration tests in logging. -$ gsutil mb gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID -$ gsutil acl ch -g cloud-logs@google.com:O gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID - -# Creates a PubSub topic for integration tests of storage notifications. -$ gcloud beta pubsub topics create go-storage-notification-test -# Next, go to the Pub/Sub dashboard in GCP console. Authorize the user -# "service-@gs-project-accounts.iam.gserviceaccount.com" -# as a publisher to that topic. - -# Creates a Spanner instance for the spanner integration tests. -$ gcloud beta spanner instances create go-integration-test --config regional-us-central1 --nodes 10 --description 'Instance for go client test' -# NOTE: Spanner instances are priced by the node-hour, so you may want to -# delete the instance after testing with 'gcloud beta spanner instances delete'. - -$ export MY_KEYRING=some-keyring-name -$ export MY_LOCATION=global -$ export MY_SINGLE_LOCATION=us-central1 -# Creates a KMS keyring, in the same location as the default location for your -# project's buckets. -$ gcloud kms keyrings create $MY_KEYRING --location $MY_LOCATION -# Creates two keys in the keyring, named key1 and key2. -$ gcloud kms keys create key1 --keyring $MY_KEYRING --location $MY_LOCATION --purpose encryption -$ gcloud kms keys create key2 --keyring $MY_KEYRING --location $MY_LOCATION --purpose encryption -# Sets the GCLOUD_TESTS_GOLANG_KEYRING environment variable. -$ export GCLOUD_TESTS_GOLANG_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/$MY_LOCATION/keyRings/$MY_KEYRING -# Authorizes Google Cloud Storage to encrypt and decrypt using key1. -$ gsutil kms authorize -p $GCLOUD_TESTS_GOLANG_PROJECT_ID -k $GCLOUD_TESTS_GOLANG_KEYRING/cryptoKeys/key1 - -# Create KMS Key in one region for Bigtable -$ gcloud kms keyrings create $MY_KEYRING --location $MY_SINGLE_LOCATION -$ gcloud kms keys create key1 --keyring $MY_KEYRING --location $MY_SINGLE_LOCATION --purpose encryption -# Sets the GCLOUD_TESTS_BIGTABLE_KEYRING environment variable. -$ export GCLOUD_TESTS_BIGTABLE_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/$MY_SINGLE_LOCATION/keyRings/$MY_KEYRING -# Create a service agent, https://cloud.google.com/bigtable/docs/use-cmek#gcloud: -$ gcloud beta services identity create \ - --service=bigtableadmin.googleapis.com \ - --project $GCLOUD_TESTS_GOLANG_PROJECT_ID -# Note the service agent email for the agent created. -$ export SERVICE_AGENT_EMAIL= - -# Authorizes Google Cloud Bigtable to encrypt and decrypt using key1 -$ gcloud kms keys add-iam-policy-binding key1 \ - --keyring $MY_KEYRING \ - --location $MY_SINGLE_LOCATION \ - --role roles/cloudkms.cryptoKeyEncrypterDecrypter \ - --member "serviceAccount:$SERVICE_AGENT_EMAIL" \ - --project $GCLOUD_TESTS_GOLANG_PROJECT_ID -``` - -It may be useful to add exports to your shell initialization for future use. -For instance, in `.zshrc`: - -```sh -#### START GO SDK Test Variables -# Developers Console project's ID (e.g. bamboo-shift-455) for the general project. -export GCLOUD_TESTS_GOLANG_PROJECT_ID=your-project - -# The path to the JSON key file of the general project's service account. -export GCLOUD_TESTS_GOLANG_KEY=~/directory/your-project-abcd1234.json - -# Developers Console project's ID (e.g. doorway-cliff-677) for the Firestore project. -export GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID=your-firestore-project - -# The path to the JSON key file of the Firestore project's service account. -export GCLOUD_TESTS_GOLANG_FIRESTORE_KEY=~/directory/your-firestore-project-abcd1234.json - -# The full name of the keyring for the tests, in the form "projects/P/locations/L/keyRings/R". -# The creation of this is described below. -export MY_KEYRING=my-golang-sdk-test -export MY_LOCATION=global -export GCLOUD_TESTS_GOLANG_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/$MY_LOCATION/keyRings/$MY_KEYRING - -# API key for using the Translate API. -export GCLOUD_TESTS_API_KEY=abcdefghijk123456789 - -# Compute Engine zone. (https://cloud.google.com/compute/docs/regions-zones) -export GCLOUD_TESTS_GOLANG_ZONE=your-chosen-region -#### END GO SDK Test Variables -``` - -#### Running - -Once you've done the necessary setup, you can run the integration tests by -running: - -``` sh -$ go test -v ./... -``` - -Note that the above command will not run the tests in other modules. To run -tests on other modules, first navigate to the appropriate -subdirectory. For instance, to run only the tests for datastore: -``` sh -$ cd datastore -$ go test -v ./... -``` - -#### Replay - -Some packages can record the RPCs during integration tests to a file for -subsequent replay. To record, pass the `-record` flag to `go test`. The -recording will be saved to the _package_`.replay` file. To replay integration -tests from a saved recording, the replay file must be present, the `-short` -flag must be passed to `go test`, and the `GCLOUD_TESTS_GOLANG_ENABLE_REPLAY` -environment variable must have a non-empty value. - -## Contributor License Agreements - -Before we can accept your pull requests you'll need to sign a Contributor -License Agreement (CLA): - -- **If you are an individual writing original source code** and **you own the -intellectual property**, then you'll need to sign an [individual CLA][indvcla]. -- **If you work for a company that wants to allow you to contribute your -work**, then you'll need to sign a [corporate CLA][corpcla]. - -You can sign these electronically (just scroll to the bottom). After that, -we'll be able to accept your pull requests. - -## Contributor Code of Conduct - -As contributors and maintainers of this project, -and in the interest of fostering an open and welcoming community, -we pledge to respect all people who contribute through reporting issues, -posting feature requests, updating documentation, -submitting pull requests or patches, and other activities. - -We are committed to making participation in this project -a harassment-free experience for everyone, -regardless of level of experience, gender, gender identity and expression, -sexual orientation, disability, personal appearance, -body size, race, ethnicity, age, religion, or nationality. - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery -* Personal attacks -* Trolling or insulting/derogatory comments -* Public or private harassment -* Publishing other's private information, -such as physical or electronic -addresses, without explicit permission -* Other unethical or unprofessional conduct. - -Project maintainers have the right and responsibility to remove, edit, or reject -comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct. -By adopting this Code of Conduct, -project maintainers commit themselves to fairly and consistently -applying these principles to every aspect of managing this project. -Project maintainers who do not follow or enforce the Code of Conduct -may be permanently removed from the project team. - -This code of conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. - -Instances of abusive, harassing, or otherwise unacceptable behavior -may be reported by opening an issue -or contacting one or more of the project maintainers. - -This Code of Conduct is adapted from the [Contributor Covenant](https://contributor-covenant.org), version 1.2.0, -available at [https://contributor-covenant.org/version/1/2/0/](https://contributor-covenant.org/version/1/2/0/) - -[gcloudcli]: https://developers.google.com/cloud/sdk/gcloud/ -[indvcla]: https://developers.google.com/open-source/cla/individual -[corpcla]: https://developers.google.com/open-source/cla/corporate diff --git a/vendor/cloud.google.com/go/README.md b/vendor/cloud.google.com/go/README.md deleted file mode 100644 index 01453cc69..000000000 --- a/vendor/cloud.google.com/go/README.md +++ /dev/null @@ -1,139 +0,0 @@ -# Google Cloud Client Libraries for Go - -[![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go.svg)](https://pkg.go.dev/cloud.google.com/go) - -Go packages for [Google Cloud Platform](https://cloud.google.com) services. - -``` go -import "cloud.google.com/go" -``` - -To install the packages on your system, *do not clone the repo*. Instead: - -1. Change to your project directory: - - ```bash - cd /my/cloud/project - ``` -1. Get the package you want to use. Some products have their own module, so it's - best to `go get` the package(s) you want to use: - - ``` - $ go get cloud.google.com/go/firestore # Replace with the package you want to use. - ``` - -**NOTE:** Some of these packages are under development, and may occasionally -make backwards-incompatible changes. - -## Supported APIs - -For an updated list of all of our released APIs please see our -[reference docs](https://cloud.google.com/go/docs/reference). - -## [Go Versions Supported](#supported-versions) - -Our libraries are compatible with at least the three most recent, major Go -releases. They are currently compatible with: - -- Go 1.19 -- Go 1.18 -- Go 1.17 -- Go 1.16 -- Go 1.15 - -## Authorization - -By default, each API will use [Google Application Default Credentials](https://developers.google.com/identity/protocols/application-default-credentials) -for authorization credentials used in calling the API endpoints. This will allow your -application to run in many environments without requiring explicit configuration. - -[snip]:# (auth) -```go -client, err := storage.NewClient(ctx) -``` - -To authorize using a -[JSON key file](https://cloud.google.com/iam/docs/managing-service-account-keys), -pass -[`option.WithCredentialsFile`](https://pkg.go.dev/google.golang.org/api/option#WithCredentialsFile) -to the `NewClient` function of the desired package. For example: - -[snip]:# (auth-JSON) -```go -client, err := storage.NewClient(ctx, option.WithCredentialsFile("path/to/keyfile.json")) -``` - -You can exert more control over authorization by using the -[`golang.org/x/oauth2`](https://pkg.go.dev/golang.org/x/oauth2) package to -create an `oauth2.TokenSource`. Then pass -[`option.WithTokenSource`](https://pkg.go.dev/google.golang.org/api/option#WithTokenSource) -to the `NewClient` function: -[snip]:# (auth-ts) -```go -tokenSource := ... -client, err := storage.NewClient(ctx, option.WithTokenSource(tokenSource)) -``` - -## Contributing - -Contributions are welcome. Please, see the -[CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md) -document for details. - -Please note that this project is released with a Contributor Code of Conduct. -By participating in this project you agree to abide by its terms. -See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct) -for more information. - -[cloud-asset]: https://cloud.google.com/security-command-center/docs/how-to-asset-inventory -[cloud-automl]: https://cloud.google.com/automl -[cloud-build]: https://cloud.google.com/cloud-build/ -[cloud-bigquery]: https://cloud.google.com/bigquery/ -[cloud-bigtable]: https://cloud.google.com/bigtable/ -[cloud-compute]: https://cloud.google.com/compute -[cloud-container]: https://cloud.google.com/containers/ -[cloud-containeranalysis]: https://cloud.google.com/container-registry/docs/container-analysis -[cloud-dataproc]: https://cloud.google.com/dataproc/ -[cloud-datastore]: https://cloud.google.com/datastore/ -[cloud-dialogflow]: https://cloud.google.com/dialogflow-enterprise/ -[cloud-debugger]: https://cloud.google.com/debugger/ -[cloud-dlp]: https://cloud.google.com/dlp/ -[cloud-errors]: https://cloud.google.com/error-reporting/ -[cloud-firestore]: https://cloud.google.com/firestore/ -[cloud-iam]: https://cloud.google.com/iam/ -[cloud-iot]: https://cloud.google.com/iot-core/ -[cloud-irm]: https://cloud.google.com/incident-response/docs/concepts -[cloud-kms]: https://cloud.google.com/kms/ -[cloud-pubsub]: https://cloud.google.com/pubsub/ -[cloud-pubsublite]: https://cloud.google.com/pubsub/lite -[cloud-storage]: https://cloud.google.com/storage/ -[cloud-language]: https://cloud.google.com/natural-language -[cloud-logging]: https://cloud.google.com/logging/ -[cloud-natural-language]: https://cloud.google.com/natural-language/ -[cloud-memorystore]: https://cloud.google.com/memorystore/ -[cloud-monitoring]: https://cloud.google.com/monitoring/ -[cloud-oslogin]: https://cloud.google.com/compute/docs/oslogin/rest -[cloud-phishingprotection]: https://cloud.google.com/phishing-protection/ -[cloud-securitycenter]: https://cloud.google.com/security-command-center/ -[cloud-scheduler]: https://cloud.google.com/scheduler -[cloud-spanner]: https://cloud.google.com/spanner/ -[cloud-speech]: https://cloud.google.com/speech -[cloud-talent]: https://cloud.google.com/solutions/talent-solution/ -[cloud-tasks]: https://cloud.google.com/tasks/ -[cloud-texttospeech]: https://cloud.google.com/texttospeech/ -[cloud-talent]: https://cloud.google.com/solutions/talent-solution/ -[cloud-trace]: https://cloud.google.com/trace/ -[cloud-translate]: https://cloud.google.com/translate -[cloud-recaptcha]: https://cloud.google.com/recaptcha-enterprise/ -[cloud-recommender]: https://cloud.google.com/recommendations/ -[cloud-video]: https://cloud.google.com/video-intelligence/ -[cloud-vision]: https://cloud.google.com/vision -[cloud-webrisk]: https://cloud.google.com/web-risk/ - -## Links - -- [Go on Google Cloud](https://cloud.google.com/go/home) -- [Getting started with Go on Google Cloud](https://cloud.google.com/go/getting-started) -- [App Engine Quickstart](https://cloud.google.com/appengine/docs/standard/go/quickstart) -- [Cloud Functions Quickstart](https://cloud.google.com/functions/docs/quickstart-go) -- [Cloud Run Quickstart](https://cloud.google.com/run/docs/quickstarts/build-and-deploy#go) diff --git a/vendor/cloud.google.com/go/RELEASING.md b/vendor/cloud.google.com/go/RELEASING.md deleted file mode 100644 index 6d0fcf4f9..000000000 --- a/vendor/cloud.google.com/go/RELEASING.md +++ /dev/null @@ -1,141 +0,0 @@ -# Releasing - -## Determine which module to release - -The Go client libraries have several modules. Each module does not strictly -correspond to a single library - they correspond to trees of directories. If a -file needs to be released, you must release the closest ancestor module. - -To see all modules: - -```bash -$ cat `find . -name go.mod` | grep module -module cloud.google.com/go/pubsub -module cloud.google.com/go/spanner -module cloud.google.com/go -module cloud.google.com/go/bigtable -module cloud.google.com/go/bigquery -module cloud.google.com/go/storage -module cloud.google.com/go/pubsublite -module cloud.google.com/go/firestore -module cloud.google.com/go/logging -module cloud.google.com/go/internal/gapicgen -module cloud.google.com/go/internal/godocfx -module cloud.google.com/go/internal/examples/fake -module cloud.google.com/go/internal/examples/mock -module cloud.google.com/go/datastore -``` - -The `cloud.google.com/go` is the repository root module. Each other module is -a submodule. - -So, if you need to release a change in `bigtable/bttest/inmem.go`, the closest -ancestor module is `cloud.google.com/go/bigtable` - so you should release a new -version of the `cloud.google.com/go/bigtable` submodule. - -If you need to release a change in `asset/apiv1/asset_client.go`, the closest -ancestor module is `cloud.google.com/go` - so you should release a new version -of the `cloud.google.com/go` repository root module. Note: releasing -`cloud.google.com/go` has no impact on any of the submodules, and vice-versa. -They are released entirely independently. - -## Test failures - -If there are any test failures in the Kokoro build, releases are blocked until -the failures have been resolved. - -## How to release - -### Automated Releases (`cloud.google.com/go` and submodules) - -We now use [release-please](https://github.com/googleapis/release-please) to -perform automated releases for `cloud.google.com/go` and all submodules. - -1. If there are changes that have not yet been released, a - [pull request](https://github.com/googleapis/google-cloud-go/pull/2971) will - be automatically opened by release-please - with a title like "chore: release X.Y.Z" (for the root module) or - "chore: release datastore X.Y.Z" (for the datastore submodule), where X.Y.Z - is the next version to be released. Find the desired pull request - [here](https://github.com/googleapis/google-cloud-go/pulls) -1. Check for failures in the - [continuous Kokoro build](http://go/google-cloud-go-continuous). If there are - any failures in the most recent build, address them before proceeding with - the release. (This applies even if the failures are in a different submodule - from the one being released.) -1. Review the release notes. These are automatically generated from the titles - of any merged commits since the previous release. If you would like to edit - them, this can be done by updating the changes in the release PR. -1. To cut a release, approve and merge the pull request. Doing so will - update the `CHANGES.md`, tag the merged commit with the appropriate version, - and draft a GitHub release which will copy the notes from `CHANGES.md`. - -### Manual Release (`cloud.google.com/go`) - -If for whatever reason the automated release process is not working as expected, -here is how to manually cut a release of `cloud.google.com/go`. - -1. Check for failures in the - [continuous Kokoro build](http://go/google-cloud-go-continuous). If there are - any failures in the most recent build, address them before proceeding with - the release. -1. Navigate to `google-cloud-go/` and switch to main. -1. `git pull` -1. Run `git tag -l | grep -v beta | grep -v alpha` to see all existing releases. - The current latest tag `$CV` is the largest tag. It should look something - like `vX.Y.Z` (note: ignore all `LIB/vX.Y.Z` tags - these are tags for a - specific library, not the module root). We'll call the current version `$CV` - and the new version `$NV`. -1. On main, run `git log $CV...` to list all the changes since the last - release. NOTE: You must manually visually parse out changes to submodules [1] - (the `git log` is going to show you things in submodules, which are not going - to be part of your release). -1. Edit `CHANGES.md` to include a summary of the changes. -1. In `internal/version/version.go`, update `const Repo` to today's date with - the format `YYYYMMDD`. -1. In `internal/version` run `go generate`. -1. Commit the changes, ignoring the generated `.go-r` file. Push to your fork, - and create a PR titled `chore: release $NV`. -1. Wait for the PR to be reviewed and merged. Once it's merged, and without - merging any other PRs in the meantime: - a. Switch to main. - b. `git pull` - c. Tag the repo with the next version: `git tag $NV`. - d. Push the tag to origin: - `git push origin $NV` -1. Update [the releases page](https://github.com/googleapis/google-cloud-go/releases) - with the new release, copying the contents of `CHANGES.md`. - -### Manual Releases (submodules) - -If for whatever reason the automated release process is not working as expected, -here is how to manually cut a release of a submodule. - -(these instructions assume we're releasing `cloud.google.com/go/datastore` - adjust accordingly) - -1. Check for failures in the - [continuous Kokoro build](http://go/google-cloud-go-continuous). If there are - any failures in the most recent build, address them before proceeding with - the release. (This applies even if the failures are in a different submodule - from the one being released.) -1. Navigate to `google-cloud-go/` and switch to main. -1. `git pull` -1. Run `git tag -l | grep datastore | grep -v beta | grep -v alpha` to see all - existing releases. The current latest tag `$CV` is the largest tag. It - should look something like `datastore/vX.Y.Z`. We'll call the current version - `$CV` and the new version `$NV`. -1. On main, run `git log $CV.. -- datastore/` to list all the changes to the - submodule directory since the last release. -1. Edit `datastore/CHANGES.md` to include a summary of the changes. -1. In `internal/version` run `go generate`. -1. Commit the changes, ignoring the generated `.go-r` file. Push to your fork, - and create a PR titled `chore(datastore): release $NV`. -1. Wait for the PR to be reviewed and merged. Once it's merged, and without - merging any other PRs in the meantime: - a. Switch to main. - b. `git pull` - c. Tag the repo with the next version: `git tag $NV`. - d. Push the tag to origin: - `git push origin $NV` -1. Update [the releases page](https://github.com/googleapis/google-cloud-go/releases) - with the new release, copying the contents of `datastore/CHANGES.md`. diff --git a/vendor/cloud.google.com/go/SECURITY.md b/vendor/cloud.google.com/go/SECURITY.md deleted file mode 100644 index 8b58ae9c0..000000000 --- a/vendor/cloud.google.com/go/SECURITY.md +++ /dev/null @@ -1,7 +0,0 @@ -# Security Policy - -To report a security issue, please use [g.co/vulnz](https://g.co/vulnz). - -The Google Security Team will respond within 5 working days of your report on g.co/vulnz. - -We use g.co/vulnz for our intake, and do coordination and disclosure here using GitHub Security Advisory to privately discuss and fix the issue. diff --git a/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.go b/vendor/cloud.google.com/go/compute/internal/version.go similarity index 60% rename from vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.go rename to vendor/cloud.google.com/go/compute/internal/version.go index d10ad6653..ddddbd21f 100644 --- a/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.go +++ b/vendor/cloud.google.com/go/compute/internal/version.go @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,12 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package field_mask aliases all exported identifiers in -// package "google.golang.org/protobuf/types/known/fieldmaskpb". -package field_mask +package internal -import "google.golang.org/protobuf/types/known/fieldmaskpb" - -type FieldMask = fieldmaskpb.FieldMask - -var File_google_protobuf_field_mask_proto = fieldmaskpb.File_google_protobuf_field_mask_proto +// Version is the current tagged release of the library. +const Version = "1.18.0" diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md new file mode 100644 index 000000000..06b957349 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md @@ -0,0 +1,19 @@ +# Changes + +## [0.2.3](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.2...compute/metadata/v0.2.3) (2022-12-15) + + +### Bug Fixes + +* **compute/metadata:** Switch DNS lookup to an absolute lookup ([119b410](https://github.com/googleapis/google-cloud-go/commit/119b41060c7895e45e48aee5621ad35607c4d021)), refs [#7165](https://github.com/googleapis/google-cloud-go/issues/7165) + +## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.1...compute/metadata/v0.2.2) (2022-12-01) + + +### Bug Fixes + +* **compute/metadata:** Set IdleConnTimeout for http.Client ([#7084](https://github.com/googleapis/google-cloud-go/issues/7084)) ([766516a](https://github.com/googleapis/google-cloud-go/commit/766516aaf3816bfb3159efeea65aa3d1d205a3e2)), refs [#5430](https://github.com/googleapis/google-cloud-go/issues/5430) + +## [0.1.0] (2022-10-26) + +Initial release of metadata being it's own module. diff --git a/vendor/cloud.google.com/go/compute/metadata/LICENSE b/vendor/cloud.google.com/go/compute/metadata/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/cloud.google.com/go/compute/metadata/README.md b/vendor/cloud.google.com/go/compute/metadata/README.md new file mode 100644 index 000000000..f940fb2c8 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/README.md @@ -0,0 +1,27 @@ +# Compute API + +[![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/compute.svg)](https://pkg.go.dev/cloud.google.com/go/compute/metadata) + +This is a utility library for communicating with Google Cloud metadata service +on Google Cloud. + +## Install + +```bash +go get cloud.google.com/go/compute/metadata +``` + +## Go Version Support + +See the [Go Versions Supported](https://github.com/googleapis/google-cloud-go#go-versions-supported) +section in the root directory's README. + +## Contributing + +Contributions are welcome. Please, see the [CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md) +document for details. + +Please note that this project is released with a Contributor Code of Conduct. +By participating in this project you agree to abide by its terms. See +[Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct) +for more information. diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index 1405d0967..c17faa142 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -70,7 +70,9 @@ func newDefaultHTTPClient() *http.Client { Timeout: 2 * time.Second, KeepAlive: 30 * time.Second, }).Dial, + IdleConnTimeout: 60 * time.Second, }, + Timeout: 5 * time.Second, } } @@ -145,7 +147,7 @@ func testOnGCE() bool { go func() { resolver := &net.Resolver{} - addrs, err := resolver.LookupHost(ctx, "metadata.google.internal") + addrs, err := resolver.LookupHost(ctx, "metadata.google.internal.") if err != nil || len(addrs) == 0 { resc <- false return diff --git a/vendor/cloud.google.com/go/iam/go_mod_tidy_hack.go b/vendor/cloud.google.com/go/compute/metadata/tidyfix.go similarity index 85% rename from vendor/cloud.google.com/go/iam/go_mod_tidy_hack.go rename to vendor/cloud.google.com/go/compute/metadata/tidyfix.go index c3f49e045..4cef48500 100644 --- a/vendor/cloud.google.com/go/iam/go_mod_tidy_hack.go +++ b/vendor/cloud.google.com/go/compute/metadata/tidyfix.go @@ -12,12 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -// This file, and the cloud.google.com/go import, won't actually become part of +// This file, and the {{.RootMod}} import, won't actually become part of // the resultant binary. //go:build modhack // +build modhack -package iam +package metadata // Necessary for safely adding multi-module repo. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "cloud.google.com/go" +import _ "cloud.google.com/go/compute/internal" diff --git a/vendor/cloud.google.com/go/doc.go b/vendor/cloud.google.com/go/doc.go deleted file mode 100644 index 871645ef1..000000000 --- a/vendor/cloud.google.com/go/doc.go +++ /dev/null @@ -1,228 +0,0 @@ -// Copyright 2014 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package cloud is the root of the packages used to access Google Cloud -Services. See https://godoc.org/cloud.google.com/go for a full list -of sub-packages. - -# Client Options - -All clients in sub-packages are configurable via client options. These options are -described here: https://godoc.org/google.golang.org/api/option. - -# Authentication and Authorization - -All the clients in sub-packages support authentication via Google Application Default -Credentials (see https://cloud.google.com/docs/authentication/production), or -by providing a JSON key file for a Service Account. See examples below. - -Google Application Default Credentials (ADC) is the recommended way to authorize -and authenticate clients. For information on how to create and obtain -Application Default Credentials, see -https://cloud.google.com/docs/authentication/production. Here is an example -of a client using ADC to authenticate: - - client, err := secretmanager.NewClient(context.Background()) - if err != nil { - // TODO: handle error. - } - _ = client // Use the client. - -You can use a file with credentials to authenticate and authorize, such as a JSON -key file associated with a Google service account. Service Account keys can be -created and downloaded from -https://console.cloud.google.com/iam-admin/serviceaccounts. This example uses -the Secret Manger client, but the same steps apply to the other client libraries -underneath this package. Example: - - client, err := secretmanager.NewClient(context.Background(), - option.WithCredentialsFile("/path/to/service-account-key.json")) - if err != nil { - // TODO: handle error. - } - _ = client // Use the client. - -In some cases (for instance, you don't want to store secrets on disk), you can -create credentials from in-memory JSON and use the WithCredentials option. -The google package in this example is at golang.org/x/oauth2/google. -This example uses the Secret Manager client, but the same steps apply to -the other client libraries underneath this package. Note that scopes can be -found at https://developers.google.com/identity/protocols/oauth2/scopes, and -are also provided in all auto-generated libraries: for example, -cloud.google.com/go/secretmanager/apiv1 provides DefaultAuthScopes. Example: - - ctx := context.Background() - creds, err := google.CredentialsFromJSON(ctx, []byte("JSON creds"), secretmanager.DefaultAuthScopes()...) - if err != nil { - // TODO: handle error. - } - client, err := secretmanager.NewClient(ctx, option.WithCredentials(creds)) - if err != nil { - // TODO: handle error. - } - _ = client // Use the client. - -# Timeouts and Cancellation - -By default, non-streaming methods, like Create or Get, will have a default deadline applied to the -context provided at call time, unless a context deadline is already set. Streaming -methods have no default deadline and will run indefinitely. To set timeouts or -arrange for cancellation, use contexts. Transient -errors will be retried when correctness allows. - -Here is an example of how to set a timeout for an RPC, use context.WithTimeout: - - ctx := context.Background() - // Do not set a timeout on the context passed to NewClient: dialing happens - // asynchronously, and the context is used to refresh credentials in the - // background. - client, err := secretmanager.NewClient(ctx) - if err != nil { - // TODO: handle error. - } - // Time out if it takes more than 10 seconds to create a dataset. - tctx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() // Always call cancel. - - req := &secretmanagerpb.DeleteSecretRequest{Name: "projects/project-id/secrets/name"} - if err := client.DeleteSecret(tctx, req); err != nil { - // TODO: handle error. - } - -Here is an example of how to arrange for an RPC to be canceled, use context.WithCancel: - - ctx := context.Background() - // Do not cancel the context passed to NewClient: dialing happens asynchronously, - // and the context is used to refresh credentials in the background. - client, err := secretmanager.NewClient(ctx) - if err != nil { - // TODO: handle error. - } - cctx, cancel := context.WithCancel(ctx) - defer cancel() // Always call cancel. - - // TODO: Make the cancel function available to whatever might want to cancel the - // call--perhaps a GUI button. - req := &secretmanagerpb.DeleteSecretRequest{Name: "projects/proj/secrets/name"} - if err := client.DeleteSecret(cctx, req); err != nil { - // TODO: handle error. - } - -To opt out of default deadlines, set the temporary environment variable -GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE to "true" prior to client -creation. This affects all Google Cloud Go client libraries. This opt-out -mechanism will be removed in a future release. File an issue at -https://github.com/googleapis/google-cloud-go if the default deadlines -cannot work for you. - -Do not attempt to control the initial connection (dialing) of a service by setting a -timeout on the context passed to NewClient. Dialing is non-blocking, so timeouts -would be ineffective and would only interfere with credential refreshing, which uses -the same context. - -# Connection Pooling - -Connection pooling differs in clients based on their transport. Cloud -clients either rely on HTTP or gRPC transports to communicate -with Google Cloud. - -Cloud clients that use HTTP (bigquery, compute, storage, and translate) rely on the -underlying HTTP transport to cache connections for later re-use. These are cached to -the default http.MaxIdleConns and http.MaxIdleConnsPerHost settings in -http.DefaultTransport. - -For gRPC clients (all others in this repo), connection pooling is configurable. Users -of cloud client libraries may specify option.WithGRPCConnectionPool(n) as a client -option to NewClient calls. This configures the underlying gRPC connections to be -pooled and addressed in a round robin fashion. - -# Using the Libraries with Docker - -Minimal docker images like Alpine lack CA certificates. This causes RPCs to appear to -hang, because gRPC retries indefinitely. See https://github.com/googleapis/google-cloud-go/issues/928 -for more information. - -# Debugging - -To see gRPC logs, set the environment variable GRPC_GO_LOG_SEVERITY_LEVEL. See -https://godoc.org/google.golang.org/grpc/grpclog for more information. - -For HTTP logging, set the GODEBUG environment variable to "http2debug=1" or "http2debug=2". - -# Inspecting errors - -Most of the errors returned by the generated clients are wrapped in an -`apierror.APIError` (https://pkg.go.dev/github.com/googleapis/gax-go/v2/apierror) -and can be further unwrapped into a `grpc.Status` or `googleapi.Error` depending -on the transport used to make the call (gRPC or REST). Converting your errors to -these types can be a useful way to get more information about what went wrong -while debugging. - -`apierror.APIError` gives access to specific details in the -error. The transport-specific errors can still be unwrapped using the -`apierror.APIError`. - - if err != nil { - var ae *apierror.APIError - if errors.As(err, &ae) { - log.Println(ae.Reason()) - log.Println(ae.Details().Help.GetLinks()) - } - } - -If the gRPC transport was used, the `grpc.Status` can still be parsed using the -`status.FromError` function. - - if err != nil { - if s, ok := status.FromError(err); ok { - log.Println(s.Message()) - for _, d := range s.Proto().Details { - log.Println(d) - } - } - } - -If the REST transport was used, the `googleapi.Error` can be parsed in a similar -way. - - if err != nil { - var gerr *googleapi.Error - if errors.As(err, &gerr) { - log.Println(gerr.Message) - } - } - -# Client Stability - -Clients in this repository are considered alpha or beta unless otherwise -marked as stable in the README.md. Semver is not used to communicate stability -of clients. - -Alpha and beta clients may change or go away without notice. - -Clients marked stable will maintain compatibility with future versions for as -long as we can reasonably sustain. Incompatible changes might be made in some -situations, including: - -- Security bugs may prompt backwards-incompatible changes. - -- Situations in which components are no longer feasible to maintain without -making breaking changes, including removal. - -- Parts of the client surface may be outright unstable and subject to change. -These parts of the surface will be labeled with the note, "It is EXPERIMENTAL -and subject to change or removal without notice." -*/ -package cloud // import "cloud.google.com/go" diff --git a/vendor/cloud.google.com/go/iam/CHANGES.md b/vendor/cloud.google.com/go/iam/CHANGES.md index 12f1167f1..9d39f9806 100644 --- a/vendor/cloud.google.com/go/iam/CHANGES.md +++ b/vendor/cloud.google.com/go/iam/CHANGES.md @@ -1,5 +1,68 @@ # Changes +## [0.12.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.11.0...iam/v0.12.0) (2023-02-17) + + +### Features + +* **iam:** Migrate to new stubs ([a61ddcd](https://github.com/googleapis/google-cloud-go/commit/a61ddcd3041c7af4a15109dc4431f9b327c497fb)) + +## [0.11.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.10.0...iam/v0.11.0) (2023-02-16) + + +### Features + +* **iam:** Start generating proto stubs ([970d763](https://github.com/googleapis/google-cloud-go/commit/970d763531b54b2bc75d7ff26a20b6e05150cab8)) + +## [0.10.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.9.0...iam/v0.10.0) (2023-01-04) + + +### Features + +* **iam:** Add REST client ([06a54a1](https://github.com/googleapis/google-cloud-go/commit/06a54a16a5866cce966547c51e203b9e09a25bc0)) + +## [0.9.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.8.0...iam/v0.9.0) (2022-12-15) + + +### Features + +* **iam:** Rewrite iam sigs and update proto import ([#7137](https://github.com/googleapis/google-cloud-go/issues/7137)) ([ad67fa3](https://github.com/googleapis/google-cloud-go/commit/ad67fa36c263c161226f7fecbab5221592374dca)) + +## [0.8.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.7.0...iam/v0.8.0) (2022-12-05) + + +### Features + +* **iam:** Start generating and refresh some libraries ([#7089](https://github.com/googleapis/google-cloud-go/issues/7089)) ([a9045ff](https://github.com/googleapis/google-cloud-go/commit/a9045ff191a711089c37f1d94a63522d9939ce38)) + +## [0.7.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.6.0...iam/v0.7.0) (2022-11-03) + + +### Features + +* **iam:** rewrite signatures in terms of new location ([3c4b2b3](https://github.com/googleapis/google-cloud-go/commit/3c4b2b34565795537aac1661e6af2442437e34ad)) + +## [0.6.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.5.0...iam/v0.6.0) (2022-10-25) + + +### Features + +* **iam:** start generating stubs dir ([de2d180](https://github.com/googleapis/google-cloud-go/commit/de2d18066dc613b72f6f8db93ca60146dabcfdcc)) + +## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.4.0...iam/v0.5.0) (2022-09-28) + + +### Features + +* **iam:** remove ListApplicablePolicies ([52dddd1](https://github.com/googleapis/google-cloud-go/commit/52dddd1ed89fbe77e1859311c3b993a77a82bfc7)) + +## [0.4.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.3.0...iam/v0.4.0) (2022-09-06) + + +### Features + +* **iam:** start generating apiv2 ([#6605](https://github.com/googleapis/google-cloud-go/issues/6605)) ([a6004e7](https://github.com/googleapis/google-cloud-go/commit/a6004e762f782869cd85688937475744f7b17e50)) + ## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.2.0...iam/v0.3.0) (2022-02-23) diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go similarity index 99% rename from vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go rename to vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go index 6fbf54f44..9ef7373d2 100644 --- a/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go @@ -15,10 +15,10 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.12 // source: google/iam/v1/iam_policy.proto -package iam +package iampb import ( context "context" diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/options.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go similarity index 99% rename from vendor/google.golang.org/genproto/googleapis/iam/v1/options.pb.go rename to vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go index abea46d9b..026c115c2 100644 --- a/vendor/google.golang.org/genproto/googleapis/iam/v1/options.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go @@ -15,10 +15,10 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.12 // source: google/iam/v1/options.proto -package iam +package iampb import ( reflect "reflect" diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go similarity index 94% rename from vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go rename to vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go index 5869d9207..16bed436c 100644 --- a/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go @@ -15,10 +15,10 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.12 // source: google/iam/v1/policy.proto -package iam +package iampb import ( reflect "reflect" @@ -279,11 +279,11 @@ type Policy struct { // Any operation that affects conditional role bindings must specify version // `3`. This requirement applies to the following operations: // - // * Getting a policy that includes a conditional role binding - // * Adding a conditional role binding to a policy - // * Changing a conditional role binding in a policy - // * Removing any role binding, with or without a condition, from a policy - // that includes conditions + // - Getting a policy that includes a conditional role binding + // - Adding a conditional role binding to a policy + // - Changing a conditional role binding in a policy + // - Removing any role binding, with or without a condition, from a policy + // that includes conditions // // **Important:** If you use IAM Conditions, you must include the `etag` field // whenever you call `setIamPolicy`. If you omit this field, then IAM allows @@ -396,47 +396,43 @@ type Binding struct { // Specifies the principals requesting access for a Cloud Platform resource. // `members` can have the following values: // - // * `allUsers`: A special identifier that represents anyone who is - // on the internet; with or without a Google account. + // - `allUsers`: A special identifier that represents anyone who is + // on the internet; with or without a Google account. // - // * `allAuthenticatedUsers`: A special identifier that represents anyone - // who is authenticated with a Google account or a service account. + // - `allAuthenticatedUsers`: A special identifier that represents anyone + // who is authenticated with a Google account or a service account. // - // * `user:{emailid}`: An email address that represents a specific Google - // account. For example, `alice@example.com` . + // - `user:{emailid}`: An email address that represents a specific Google + // account. For example, `alice@example.com` . // + // - `serviceAccount:{emailid}`: An email address that represents a service + // account. For example, `my-other-app@appspot.gserviceaccount.com`. // - // * `serviceAccount:{emailid}`: An email address that represents a service - // account. For example, `my-other-app@appspot.gserviceaccount.com`. + // - `group:{emailid}`: An email address that represents a Google group. + // For example, `admins@example.com`. // - // * `group:{emailid}`: An email address that represents a Google group. - // For example, `admins@example.com`. + // - `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique + // identifier) representing a user that has been recently deleted. For + // example, `alice@example.com?uid=123456789012345678901`. If the user is + // recovered, this value reverts to `user:{emailid}` and the recovered user + // retains the role in the binding. // - // * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique - // identifier) representing a user that has been recently deleted. For - // example, `alice@example.com?uid=123456789012345678901`. If the user is - // recovered, this value reverts to `user:{emailid}` and the recovered user - // retains the role in the binding. - // - // * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus - // unique identifier) representing a service account that has been recently - // deleted. For example, - // `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. - // If the service account is undeleted, this value reverts to - // `serviceAccount:{emailid}` and the undeleted service account retains the - // role in the binding. - // - // * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique - // identifier) representing a Google group that has been recently - // deleted. For example, `admins@example.com?uid=123456789012345678901`. If - // the group is recovered, this value reverts to `group:{emailid}` and the - // recovered group retains the role in the binding. - // - // - // * `domain:{domain}`: The G Suite domain (primary) that represents all the - // users of that domain. For example, `google.com` or `example.com`. + // - `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus + // unique identifier) representing a service account that has been recently + // deleted. For example, + // `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. + // If the service account is undeleted, this value reverts to + // `serviceAccount:{emailid}` and the undeleted service account retains the + // role in the binding. // + // - `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique + // identifier) representing a Google group that has been recently + // deleted. For example, `admins@example.com?uid=123456789012345678901`. If + // the group is recovered, this value reverts to `group:{emailid}` and the + // recovered group retains the role in the binding. // + // - `domain:{domain}`: The G Suite domain (primary) that represents all the + // users of that domain. For example, `google.com` or `example.com`. Members []string `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"` // The condition that is associated with this binding. // diff --git a/vendor/cloud.google.com/go/iam/iam.go b/vendor/cloud.google.com/go/iam/iam.go index 0a06ea2e8..f004a7afb 100644 --- a/vendor/cloud.google.com/go/iam/iam.go +++ b/vendor/cloud.google.com/go/iam/iam.go @@ -26,8 +26,8 @@ import ( "fmt" "time" + pb "cloud.google.com/go/iam/apiv1/iampb" gax "github.com/googleapis/gax-go/v2" - pb "google.golang.org/genproto/googleapis/iam/v1" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" diff --git a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json index be8372087..2dd0e1814 100644 --- a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json +++ b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json @@ -251,6 +251,15 @@ "release_level": "ga", "library_type": "GAPIC_MANUAL" }, + "cloud.google.com/go/bigquery/analyticshub/apiv1": { + "distribution_name": "cloud.google.com/go/bigquery/analyticshub/apiv1", + "description": "Analytics Hub API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/analyticshub/apiv1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/bigquery/connection/apiv1": { "distribution_name": "cloud.google.com/go/bigquery/connection/apiv1", "description": "BigQuery Connection API", @@ -278,6 +287,24 @@ "release_level": "beta", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/bigquery/datapolicies/apiv1": { + "distribution_name": "cloud.google.com/go/bigquery/datapolicies/apiv1", + "description": "BigQuery Data Policy API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/datapolicies/apiv1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/bigquery/datapolicies/apiv1beta1": { + "distribution_name": "cloud.google.com/go/bigquery/datapolicies/apiv1beta1", + "description": "BigQuery Data Policy API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/datapolicies/apiv1beta1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/bigquery/datatransfer/apiv1": { "distribution_name": "cloud.google.com/go/bigquery/datatransfer/apiv1", "description": "BigQuery Data Transfer API", @@ -314,15 +341,6 @@ "release_level": "ga", "library_type": "GAPIC_AUTO" }, - "cloud.google.com/go/bigquery/reservation/apiv1beta1": { - "distribution_name": "cloud.google.com/go/bigquery/reservation/apiv1beta1", - "description": "BigQuery Reservation API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/reservation/apiv1beta1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, "cloud.google.com/go/bigquery/storage/apiv1": { "distribution_name": "cloud.google.com/go/bigquery/storage/apiv1", "description": "BigQuery Storage API", @@ -410,7 +428,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/certificatemanager/latest/apiv1", - "release_level": "beta", + "release_level": "ga", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/channel/apiv1": { @@ -530,6 +548,15 @@ "release_level": "beta", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/datacatalog/lineage/apiv1": { + "distribution_name": "cloud.google.com/go/datacatalog/lineage/apiv1", + "description": "Data Lineage API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datacatalog/latest/lineage/apiv1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/dataflow/apiv1beta3": { "distribution_name": "cloud.google.com/go/dataflow/apiv1beta3", "description": "Dataflow API", @@ -548,6 +575,15 @@ "release_level": "alpha", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/dataform/apiv1beta1": { + "distribution_name": "cloud.google.com/go/dataform/apiv1beta1", + "description": "Dataform API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataform/latest/apiv1beta1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/datafusion/apiv1": { "distribution_name": "cloud.google.com/go/datafusion/apiv1", "description": "Cloud Data Fusion API", @@ -656,6 +692,15 @@ "release_level": "ga", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/dialogflow/apiv2beta1": { + "distribution_name": "cloud.google.com/go/dialogflow/apiv2beta1", + "description": "Dialogflow API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dialogflow/latest/apiv2beta1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/dialogflow/cx/apiv3": { "distribution_name": "cloud.google.com/go/dialogflow/cx/apiv3", "description": "Dialogflow API", @@ -674,6 +719,15 @@ "release_level": "beta", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/discoveryengine/apiv1beta": { + "distribution_name": "cloud.google.com/go/discoveryengine/apiv1beta", + "description": "Discovery Engine API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/discoveryengine/latest/apiv1beta", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/dlp/apiv2": { "distribution_name": "cloud.google.com/go/dlp/apiv2", "description": "Cloud Data Loss Prevention (DLP) API", @@ -710,6 +764,15 @@ "release_level": "beta", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/edgecontainer/apiv1": { + "distribution_name": "cloud.google.com/go/edgecontainer/apiv1", + "description": "Distributed Cloud Edge Container API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/edgecontainer/latest/apiv1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/errorreporting": { "distribution_name": "cloud.google.com/go/errorreporting", "description": "Cloud Error Reporting API", @@ -899,6 +962,24 @@ "release_level": "ga", "library_type": "CORE" }, + "cloud.google.com/go/iam/apiv1": { + "distribution_name": "cloud.google.com/go/iam/apiv1", + "description": "IAM Meta API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/iam/latest/apiv1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/iam/apiv2": { + "distribution_name": "cloud.google.com/go/iam/apiv2", + "description": "Identity and Access Management (IAM) API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/iam/latest/apiv2", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/iam/credentials/apiv1": { "distribution_name": "cloud.google.com/go/iam/credentials/apiv1", "description": "IAM Service Account Credentials API", @@ -955,7 +1036,7 @@ }, "cloud.google.com/go/language/apiv1beta2": { "distribution_name": "cloud.google.com/go/language/apiv1beta2", - "description": "Google Cloud Natural Language API", + "description": "Cloud Natural Language API", "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/language/latest/apiv1beta2", @@ -994,7 +1075,7 @@ "description": "Long Running Operations API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/longrunning/autogen", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/longrunning/latest/autogen", "release_level": "alpha", "library_type": "GAPIC_AUTO" }, @@ -1007,6 +1088,33 @@ "release_level": "ga", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/maps/addressvalidation/apiv1": { + "distribution_name": "cloud.google.com/go/maps/addressvalidation/apiv1", + "description": "Address Validation API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/addressvalidation/apiv1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/maps/mapsplatformdatasets/apiv1alpha": { + "distribution_name": "cloud.google.com/go/maps/mapsplatformdatasets/apiv1alpha", + "description": "Maps Platform Datasets API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/mapsplatformdatasets/apiv1alpha", + "release_level": "alpha", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/maps/routing/apiv2": { + "distribution_name": "cloud.google.com/go/maps/routing/apiv2", + "description": "Routes API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/routing/apiv2", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/mediatranslation/apiv1beta1": { "distribution_name": "cloud.google.com/go/mediatranslation/apiv1beta1", "description": "Media Translation API", @@ -1292,7 +1400,7 @@ "language": "Go", "client_library_type": "manual", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/pubsublite/latest", - "release_level": "beta", + "release_level": "ga", "library_type": "GAPIC_MANUAL" }, "cloud.google.com/go/pubsublite/apiv1": { @@ -1304,22 +1412,22 @@ "release_level": "ga", "library_type": "GAPIC_AUTO" }, - "cloud.google.com/go/recaptchaenterprise/apiv1beta1": { - "distribution_name": "cloud.google.com/go/recaptchaenterprise/apiv1beta1", + "cloud.google.com/go/recaptchaenterprise/v2/apiv1": { + "distribution_name": "cloud.google.com/go/recaptchaenterprise/v2/apiv1", "description": "reCAPTCHA Enterprise API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recaptchaenterprise/latest/apiv1beta1", - "release_level": "beta", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recaptchaenterprise/v2/latest/apiv1", + "release_level": "ga", "library_type": "GAPIC_AUTO" }, - "cloud.google.com/go/recaptchaenterprise/v2/apiv1": { - "distribution_name": "cloud.google.com/go/recaptchaenterprise/v2/apiv1", + "cloud.google.com/go/recaptchaenterprise/v2/apiv1beta1": { + "distribution_name": "cloud.google.com/go/recaptchaenterprise/v2/apiv1beta1", "description": "reCAPTCHA Enterprise API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recaptchaenterprise/v2/latest/apiv1", - "release_level": "ga", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recaptchaenterprise/v2/latest/apiv1beta1", + "release_level": "beta", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/recommendationengine/apiv1beta1": { @@ -1466,15 +1574,6 @@ "release_level": "ga", "library_type": "GAPIC_AUTO" }, - "cloud.google.com/go/secretmanager/apiv1beta1": { - "distribution_name": "cloud.google.com/go/secretmanager/apiv1beta1", - "description": "Secret Manager API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/secretmanager/latest/apiv1beta1", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, "cloud.google.com/go/security/privateca/apiv1": { "distribution_name": "cloud.google.com/go/security/privateca/apiv1", "description": "Certificate Authority API", @@ -1493,6 +1592,15 @@ "release_level": "beta", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/security/publicca/apiv1beta1": { + "distribution_name": "cloud.google.com/go/security/publicca/apiv1beta1", + "description": "Public Certificate Authority API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/security/latest/publicca/apiv1beta1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/securitycenter/apiv1": { "distribution_name": "cloud.google.com/go/securitycenter/apiv1", "description": "Security Command Center API", @@ -1637,6 +1745,15 @@ "release_level": "beta", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/speech/apiv2": { + "distribution_name": "cloud.google.com/go/speech/apiv2", + "description": "Cloud Speech-to-Text API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/speech/latest/apiv2", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/storage": { "distribution_name": "cloud.google.com/go/storage", "description": "Cloud Storage (GCS)", @@ -1781,22 +1898,22 @@ "release_level": "beta", "library_type": "GAPIC_AUTO" }, - "cloud.google.com/go/vision/apiv1p1beta1": { - "distribution_name": "cloud.google.com/go/vision/apiv1p1beta1", + "cloud.google.com/go/vision/v2/apiv1": { + "distribution_name": "cloud.google.com/go/vision/v2/apiv1", "description": "Cloud Vision API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vision/latest/apiv1p1beta1", - "release_level": "beta", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vision/v2/latest/apiv1", + "release_level": "ga", "library_type": "GAPIC_AUTO" }, - "cloud.google.com/go/vision/v2/apiv1": { - "distribution_name": "cloud.google.com/go/vision/v2/apiv1", + "cloud.google.com/go/vision/v2/apiv1p1beta1": { + "distribution_name": "cloud.google.com/go/vision/v2/apiv1p1beta1", "description": "Cloud Vision API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vision/v2/latest/apiv1", - "release_level": "ga", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vision/v2/latest/apiv1p1beta1", + "release_level": "beta", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/vmmigration/apiv1": { @@ -1808,6 +1925,15 @@ "release_level": "ga", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/vmwareengine/apiv1": { + "distribution_name": "cloud.google.com/go/vmwareengine/apiv1", + "description": "VMware Engine API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vmwareengine/latest/apiv1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/vpcaccess/apiv1": { "distribution_name": "cloud.google.com/go/vpcaccess/apiv1", "description": "Serverless VPC Access API", diff --git a/vendor/cloud.google.com/go/internal/README.md b/vendor/cloud.google.com/go/internal/README.md index 8857c8f6f..c1dc6bdff 100644 --- a/vendor/cloud.google.com/go/internal/README.md +++ b/vendor/cloud.google.com/go/internal/README.md @@ -15,4 +15,29 @@ One day, we may want to create individual `.repo-metadata.json` files next to each package, which is the pattern followed by some other languages. External tools would then talk to pkg.go.dev or some other service to get the overall list of packages and use the `.repo-metadata.json` files to get the additional -metadata required. For now, `.repo-metadata-full.json` includes everything. \ No newline at end of file +metadata required. For now, `.repo-metadata-full.json` includes everything. + +## cloudbuild.yaml + +To kick off a build locally run from the repo root: + +```bash +gcloud builds submit --project=cloud-devrel-kokoro-resources --config=internal/cloudbuild.yaml +``` + +### Updating OwlBot SHA + +You may want to manually update the which version of the post processor will be +used -- to do this you need to update the SHA in the OwlBot lock file. Start by +running the following commands: + +```bash +docker pull gcr.io/cloud-devrel-public-resources/owlbot-go:latest +docker inspect --format='{{index .RepoDigests 0}}' gcr.io/cloud-devrel-public-resources/owlbot-go:latest +``` + +This will give you a SHA. You can use this value to update the value in +`.github/.OwlBot.lock.yaml`. + +*Note*: OwlBot will eventually open a pull request to update this value if it +discovers a new version of the container. diff --git a/vendor/cloud.google.com/go/internal/cloudbuild.yaml b/vendor/cloud.google.com/go/internal/cloudbuild.yaml new file mode 100644 index 000000000..71281cec2 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/cloudbuild.yaml @@ -0,0 +1,25 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# note: /workspace is a special directory in the docker image where all the files in this folder +# get placed on your behalf + +timeout: 7200s # 2 hours +steps: +- name: gcr.io/cloud-builders/docker + args: ['build', '-t', 'gcr.io/cloud-devrel-public-resources/owlbot-go', '-f', 'postprocessor/Dockerfile', '.'] + dir: internal + +images: +- gcr.io/cloud-devrel-public-resources/owlbot-go:latest diff --git a/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json b/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json deleted file mode 100644 index fe028df58..000000000 --- a/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json +++ /dev/null @@ -1,334 +0,0 @@ -{ - "release-type": "go-yoshi", - "include-component-in-tag": true, - "tag-separator": "/", - "packages": { - "accessapproval": { - "component": "accessapproval" - }, - "accesscontextmanager": { - "component": "accesscontextmanager" - }, - "aiplatform": { - "component": "aiplatform" - }, - "analytics": { - "component": "analytics" - }, - "apigateway": { - "component": "apigateway" - }, - "apigeeconnect": { - "component": "apigeeconnect" - }, - "apigeeregistry": { - "component": "apigeeregistry" - }, - "apikeys": { - "component": "apikeys" - }, - "appengine": { - "component": "appengine" - }, - "area120": { - "component": "area120" - }, - "artifactregistry": { - "component": "artifactregistry" - }, - "asset": { - "component": "asset" - }, - "assuredworkloads": { - "component": "assuredworkloads" - }, - "automl": { - "component": "automl" - }, - "baremetalsolution": { - "component": "baremetalsolution" - }, - "batch": { - "component": "batch" - }, - "beyondcorp": { - "component": "beyondcorp" - }, - "billing": { - "component": "billing" - }, - "binaryauthorization": { - "component": "binaryauthorization" - }, - "certificatemanager": { - "component": "certificatemanager" - }, - "channel": { - "component": "channel" - }, - "cloudbuild": { - "component": "cloudbuild" - }, - "clouddms": { - "component": "clouddms" - }, - "cloudtasks": { - "component": "cloudtasks" - }, - "compute": { - "component": "compute" - }, - "contactcenterinsights": { - "component": "contactcenterinsights" - }, - "container": { - "component": "container" - }, - "containeranalysis": { - "component": "containeranalysis" - }, - "datacatalog": { - "component": "datacatalog" - }, - "dataflow": { - "component": "dataflow" - }, - "dataform": { - "component": "dataform" - }, - "datafusion": { - "component": "datafusion" - }, - "datalabeling": { - "component": "datalabeling" - }, - "dataplex": { - "component": "dataplex" - }, - "dataproc": { - "component": "dataproc" - }, - "dataqna": { - "component": "dataqna" - }, - "datastream": { - "component": "datastream" - }, - "deploy": { - "component": "deploy" - }, - "dialogflow": { - "component": "dialogflow" - }, - "dlp": { - "component": "dlp" - }, - "documentai": { - "component": "documentai" - }, - "domains": { - "component": "domains" - }, - "essentialcontacts": { - "component": "essentialcontacts" - }, - "eventarc": { - "component": "eventarc" - }, - "filestore": { - "component": "filestore" - }, - "functions": { - "component": "functions" - }, - "gaming": { - "component": "gaming" - }, - "gkebackup": { - "component": "gkebackup" - }, - "gkeconnect": { - "component": "gkeconnect" - }, - "gkehub": { - "component": "gkehub" - }, - "gkemulticloud": { - "component": "gkemulticloud" - }, - "grafeas": { - "component": "grafeas" - }, - "gsuiteaddons": { - "component": "gsuiteaddons" - }, - "iam": { - "component": "iam" - }, - "iap": { - "component": "iap" - }, - "ids": { - "component": "ids" - }, - "iot": { - "component": "iot" - }, - "kms": { - "component": "kms" - }, - "language": { - "component": "language" - }, - "lifesciences": { - "component": "lifesciences" - }, - "managedidentities": { - "component": "managedidentities" - }, - "mediatranslation": { - "component": "mediatranslation" - }, - "memcache": { - "component": "memcache" - }, - "metastore": { - "component": "metastore" - }, - "monitoring": { - "component": "monitoring" - }, - "networkconnectivity": { - "component": "networkconnectivity" - }, - "networkmanagement": { - "component": "networkmanagement" - }, - "networksecurity": { - "component": "networksecurity" - }, - "notebooks": { - "component": "notebooks" - }, - "optimization": { - "component": "optimization" - }, - "orchestration": { - "component": "orchestration" - }, - "orgpolicy": { - "component": "orgpolicy" - }, - "osconfig": { - "component": "osconfig" - }, - "oslogin": { - "component": "oslogin" - }, - "phishingprotection": { - "component": "phishingprotection" - }, - "policytroubleshooter": { - "component": "policytroubleshooter" - }, - "privatecatalog": { - "component": "privatecatalog" - }, - "recaptchaenterprise/v2": { - "component": "recaptchaenterprise" - }, - "recommendationengine": { - "component": "recommendationengine" - }, - "recommender": { - "component": "recommender" - }, - "redis": { - "component": "redis" - }, - "resourcemanager": { - "component": "resourcemanager" - }, - "resourcesettings": { - "component": "resourcesettings" - }, - "retail": { - "component": "retail" - }, - "run": { - "component": "run" - }, - "scheduler": { - "component": "scheduler" - }, - "secretmanager": { - "component": "secretmanager" - }, - "security": { - "component": "security" - }, - "securitycenter": { - "component": "securitycenter" - }, - "servicecontrol": { - "component": "servicecontrol" - }, - "servicedirectory": { - "component": "servicedirectory" - }, - "servicemanagement": { - "component": "servicemanagement" - }, - "serviceusage": { - "component": "serviceusage" - }, - "shell": { - "component": "shell" - }, - "speech": { - "component": "speech" - }, - "storagetransfer": { - "component": "storagetransfer" - }, - "talent": { - "component": "talent" - }, - "texttospeech": { - "component": "texttospeech" - }, - "tpu": { - "component": "tpu" - }, - "trace": { - "component": "trace" - }, - "translate": { - "component": "translate" - }, - "video": { - "component": "video" - }, - "videointelligence": { - "component": "videointelligence" - }, - "vision/v2": { - "component": "vision" - }, - "vmmigration": { - "component": "vmmigration" - }, - "vpcaccess": { - "component": "vpcaccess" - }, - "webrisk": { - "component": "webrisk" - }, - "websecurityscanner": { - "component": "websecurityscanner" - }, - "workflows": { - "component": "workflows" - } - } -} diff --git a/vendor/cloud.google.com/go/release-please-config.json b/vendor/cloud.google.com/go/release-please-config.json deleted file mode 100644 index 546e7c31a..000000000 --- a/vendor/cloud.google.com/go/release-please-config.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "release-type": "go-yoshi", - "separate-pull-requests": true, - "include-component-in-tag": false, - "packages": { - ".": { - "component": "main" - } - } -} diff --git a/vendor/cloud.google.com/go/storage/.release-please-manifest.json b/vendor/cloud.google.com/go/storage/.release-please-manifest.json deleted file mode 100644 index 3186e6d25..000000000 --- a/vendor/cloud.google.com/go/storage/.release-please-manifest.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "storage": "1.27.0" -} \ No newline at end of file diff --git a/vendor/cloud.google.com/go/storage/CHANGES.md b/vendor/cloud.google.com/go/storage/CHANGES.md index 7dbf460aa..f12da250e 100644 --- a/vendor/cloud.google.com/go/storage/CHANGES.md +++ b/vendor/cloud.google.com/go/storage/CHANGES.md @@ -1,6 +1,32 @@ # Changes +## [1.28.1](https://github.com/googleapis/google-cloud-go/compare/storage/v1.28.0...storage/v1.28.1) (2022-12-02) + + +### Bug Fixes + +* **storage:** downgrade some dependencies ([7540152](https://github.com/googleapis/google-cloud-go/commit/754015236d5af7c82a75da218b71a87b9ead6eb5)) + +## [1.28.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.27.0...storage/v1.28.0) (2022-11-03) + + +### Features + +* **storage/internal:** Add routing annotations ([ce3f945](https://github.com/googleapis/google-cloud-go/commit/ce3f9458e511eca0910992763232abbcd64698f1)) +* **storage:** Add Autoclass support ([#6828](https://github.com/googleapis/google-cloud-go/issues/6828)) ([f7c7f41](https://github.com/googleapis/google-cloud-go/commit/f7c7f41e4d7fcffe05860e1114cb20f40c869da8)) + + +### Bug Fixes + +* **storage:** Fix read-write race in Writer.Write ([#6817](https://github.com/googleapis/google-cloud-go/issues/6817)) ([4766d3e](https://github.com/googleapis/google-cloud-go/commit/4766d3e1004119b93c6bd352024b5bf3404252eb)) +* **storage:** Fix request token passing for Copier.Run ([#6863](https://github.com/googleapis/google-cloud-go/issues/6863)) ([faaab06](https://github.com/googleapis/google-cloud-go/commit/faaab066d8e509dc440bcbc87391557ecee7dbf2)), refs [#6857](https://github.com/googleapis/google-cloud-go/issues/6857) + + +### Documentation + +* **storage:** Update broken links for SignURL and PostPolicy ([#6779](https://github.com/googleapis/google-cloud-go/issues/6779)) ([776138b](https://github.com/googleapis/google-cloud-go/commit/776138bc06a1e5fd45acbf8f9d36e9dc6ce31dd3)) + ## [1.27.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.26.0...storage/v1.27.0) (2022-09-22) diff --git a/vendor/cloud.google.com/go/storage/README.md b/vendor/cloud.google.com/go/storage/README.md index f18c37309..b2f411210 100644 --- a/vendor/cloud.google.com/go/storage/README.md +++ b/vendor/cloud.google.com/go/storage/README.md @@ -25,7 +25,7 @@ if err != nil { log.Fatal(err) } defer rc.Close() -body, err := ioutil.ReadAll(rc) +body, err := io.ReadAll(rc) if err != nil { log.Fatal(err) } diff --git a/vendor/cloud.google.com/go/storage/bucket.go b/vendor/cloud.google.com/go/storage/bucket.go index ea83e81a1..28a73b8d9 100644 --- a/vendor/cloud.google.com/go/storage/bucket.go +++ b/vendor/cloud.google.com/go/storage/bucket.go @@ -170,7 +170,7 @@ func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) ( // for this method. // // [Overview of access control]: https://cloud.google.com/storage/docs/accesscontrol#signed_urls_query_string_authentication -// [automatic detection of credentials]: https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_[BucketHandle.SignedURL]_and_[BucketHandle.GenerateSignedPostPolicyV4] +// [automatic detection of credentials]: https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_signing func (b *BucketHandle) SignedURL(object string, opts *SignedURLOptions) (string, error) { if opts.GoogleAccessID != "" && (opts.SignBytes != nil || len(opts.PrivateKey) > 0) { return SignedURL(b.name, object, opts) @@ -212,7 +212,7 @@ func (b *BucketHandle) SignedURL(object string, opts *SignedURLOptions) (string, // to be non-nil. You may need to set the GoogleAccessID and PrivateKey fields // in some cases. Read more on the [automatic detection of credentials] for this method. // -// [automatic detection of credentials]: https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_[BucketHandle.SignedURL]_and_[BucketHandle.GenerateSignedPostPolicyV4] +// [automatic detection of credentials]: https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_signing func (b *BucketHandle) GenerateSignedPostPolicyV4(object string, opts *PostPolicyV4Options) (*PostPolicyV4, error) { if opts.GoogleAccessID != "" && (opts.SignRawBytes != nil || opts.SignBytes != nil || len(opts.PrivateKey) > 0) { return GenerateSignedPostPolicyV4(b.name, object, opts) @@ -298,18 +298,18 @@ func (b *BucketHandle) defaultSignBytesFunc(email string) func([]byte) ([]byte, // circumventing the cost of recreating the auth/transport layer svc, err := iamcredentials.NewService(ctx, option.WithHTTPClient(b.c.hc)) if err != nil { - return nil, fmt.Errorf("unable to create iamcredentials client: %v", err) + return nil, fmt.Errorf("unable to create iamcredentials client: %w", err) } resp, err := svc.Projects.ServiceAccounts.SignBlob(fmt.Sprintf("projects/-/serviceAccounts/%s", email), &iamcredentials.SignBlobRequest{ Payload: base64.StdEncoding.EncodeToString(in), }).Do() if err != nil { - return nil, fmt.Errorf("unable to sign bytes: %v", err) + return nil, fmt.Errorf("unable to sign bytes: %w", err) } out, err := base64.StdEncoding.DecodeString(resp.SignedBlob) if err != nil { - return nil, fmt.Errorf("unable to base64 decode response: %v", err) + return nil, fmt.Errorf("unable to base64 decode response: %w", err) } return out, nil } @@ -444,6 +444,11 @@ type BucketAttrs struct { // See https://cloud.google.com/storage/docs/managing-turbo-replication for // more information. RPO RPO + + // Autoclass holds the bucket's autoclass configuration. If enabled, + // allows for the automatic selection of the best storage class + // based on object access patterns. + Autoclass *Autoclass } // BucketPolicyOnly is an alias for UniformBucketLevelAccess. @@ -710,6 +715,20 @@ type CustomPlacementConfig struct { DataLocations []string } +// Autoclass holds the bucket's autoclass configuration. If enabled, +// allows for the automatic selection of the best storage class +// based on object access patterns. See +// https://cloud.google.com/storage/docs/using-autoclass for more information. +type Autoclass struct { + // Enabled specifies whether the autoclass feature is enabled + // on the bucket. + Enabled bool + // ToggleTime is the time from which Autoclass was last toggled. + // If Autoclass is enabled when the bucket is created, the ToggleTime + // is set to the bucket creation time. This field is read-only. + ToggleTime time.Time +} + func newBucket(b *raw.Bucket) (*BucketAttrs, error) { if b == nil { return nil, nil @@ -744,6 +763,7 @@ func newBucket(b *raw.Bucket) (*BucketAttrs, error) { ProjectNumber: b.ProjectNumber, RPO: toRPO(b), CustomPlacementConfig: customPlacementFromRaw(b.CustomPlacementConfig), + Autoclass: toAutoclassFromRaw(b.Autoclass), }, nil } @@ -776,6 +796,7 @@ func newBucketFromProto(b *storagepb.Bucket) *BucketAttrs { RPO: toRPOFromProto(b), CustomPlacementConfig: customPlacementFromProto(b.GetCustomPlacementConfig()), ProjectNumber: parseProjectNumber(b.GetProject()), // this can return 0 the project resource name is ID based + Autoclass: toAutoclassFromProto(b.GetAutoclass()), } } @@ -830,6 +851,7 @@ func (b *BucketAttrs) toRawBucket() *raw.Bucket { IamConfiguration: bktIAM, Rpo: b.RPO.String(), CustomPlacementConfig: b.CustomPlacementConfig.toRawCustomPlacement(), + Autoclass: b.Autoclass.toRawAutoclass(), } } @@ -889,6 +911,7 @@ func (b *BucketAttrs) toProtoBucket() *storagepb.Bucket { IamConfig: bktIAM, Rpo: b.RPO.String(), CustomPlacementConfig: b.CustomPlacementConfig.toProtoCustomPlacement(), + Autoclass: b.Autoclass.toProtoAutoclass(), } } @@ -907,23 +930,30 @@ func (ua *BucketAttrsToUpdate) toProtoBucket() *storagepb.Bucket { if ua.RequesterPays != nil { bb = &storagepb.Bucket_Billing{RequesterPays: optional.ToBool(ua.RequesterPays)} } + var bktIAM *storagepb.Bucket_IamConfig - var ublaEnabled bool - var bktPolicyOnlyEnabled bool - if ua.UniformBucketLevelAccess != nil { - ublaEnabled = optional.ToBool(ua.UniformBucketLevelAccess.Enabled) - } - if ua.BucketPolicyOnly != nil { - bktPolicyOnlyEnabled = optional.ToBool(ua.BucketPolicyOnly.Enabled) - } - if ublaEnabled || bktPolicyOnlyEnabled { - bktIAM.UniformBucketLevelAccess = &storagepb.Bucket_IamConfig_UniformBucketLevelAccess{ - Enabled: true, + if ua.UniformBucketLevelAccess != nil || ua.BucketPolicyOnly != nil || ua.PublicAccessPrevention != PublicAccessPreventionUnknown { + bktIAM = &storagepb.Bucket_IamConfig{} + + if ua.BucketPolicyOnly != nil { + bktIAM.UniformBucketLevelAccess = &storagepb.Bucket_IamConfig_UniformBucketLevelAccess{ + Enabled: optional.ToBool(ua.BucketPolicyOnly.Enabled), + } + } + + if ua.UniformBucketLevelAccess != nil { + // UniformBucketLevelAccess takes precedence over BucketPolicyOnly, + // so Enabled will be overriden here if both are set + bktIAM.UniformBucketLevelAccess = &storagepb.Bucket_IamConfig_UniformBucketLevelAccess{ + Enabled: optional.ToBool(ua.UniformBucketLevelAccess.Enabled), + } + } + + if ua.PublicAccessPrevention != PublicAccessPreventionUnknown { + bktIAM.PublicAccessPrevention = ua.PublicAccessPrevention.String() } } - if ua.PublicAccessPrevention != PublicAccessPreventionUnknown { - bktIAM.PublicAccessPrevention = ua.PublicAccessPrevention.String() - } + var defaultHold bool if ua.DefaultEventBasedHold != nil { defaultHold = optional.ToBool(ua.DefaultEventBasedHold) @@ -964,6 +994,7 @@ func (ua *BucketAttrsToUpdate) toProtoBucket() *storagepb.Bucket { Website: ua.Website.toProtoBucketWebsite(), IamConfig: bktIAM, Rpo: ua.RPO.String(), + Autoclass: ua.Autoclass.toProtoAutoclass(), } } @@ -1079,6 +1110,10 @@ type BucketAttrsToUpdate struct { // more information. RPO RPO + // If set, updates the autoclass configuration of the bucket. + // See https://cloud.google.com/storage/docs/using-autoclass for more information. + Autoclass *Autoclass + // acl is the list of access control rules on the bucket. // It is unexported and only used internally by the gRPC client. // Library users should use ACLHandle methods directly. @@ -1192,6 +1227,12 @@ func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket { rb.Website = ua.Website.toRawBucketWebsite() } } + if ua.Autoclass != nil { + rb.Autoclass = &raw.BucketAutoclass{ + Enabled: ua.Autoclass.Enabled, + ForceSendFields: []string{"Enabled"}, + } + } if ua.PredefinedACL != "" { // Clear ACL or the call will fail. rb.Acl = nil @@ -1346,8 +1387,14 @@ func (rp *RetentionPolicy) toProtoRetentionPolicy() *storagepb.Bucket_RetentionP if rp == nil { return nil } + // RetentionPeriod must be greater than 0, so if it is 0, the user left it + // unset, and so we should not send it in the request i.e. nil is sent. + var period *int64 + if rp.RetentionPeriod != 0 { + period = proto.Int64(int64(rp.RetentionPeriod / time.Second)) + } return &storagepb.Bucket_RetentionPolicy{ - RetentionPeriod: int64(rp.RetentionPeriod / time.Second), + RetentionPeriod: period, } } @@ -1367,7 +1414,7 @@ func toRetentionPolicy(rp *raw.BucketRetentionPolicy) (*RetentionPolicy, error) } func toRetentionPolicyFromProto(rp *storagepb.Bucket_RetentionPolicy) *RetentionPolicy { - if rp == nil { + if rp == nil || rp.GetEffectiveTime().AsTime().Unix() == 0 { return nil } return &RetentionPolicy{ @@ -1886,6 +1933,53 @@ func customPlacementFromProto(c *storagepb.Bucket_CustomPlacementConfig) *Custom return &CustomPlacementConfig{DataLocations: c.GetDataLocations()} } +func (a *Autoclass) toRawAutoclass() *raw.BucketAutoclass { + if a == nil { + return nil + } + // Excluding read only field ToggleTime. + return &raw.BucketAutoclass{ + Enabled: a.Enabled, + } +} + +func (a *Autoclass) toProtoAutoclass() *storagepb.Bucket_Autoclass { + if a == nil { + return nil + } + // Excluding read only field ToggleTime. + return &storagepb.Bucket_Autoclass{ + Enabled: a.Enabled, + } +} + +func toAutoclassFromRaw(a *raw.BucketAutoclass) *Autoclass { + if a == nil || a.ToggleTime == "" { + return nil + } + // Return Autoclass.ToggleTime only if parsed with a valid value. + t, err := time.Parse(time.RFC3339, a.ToggleTime) + if err != nil { + return &Autoclass{ + Enabled: a.Enabled, + } + } + return &Autoclass{ + Enabled: a.Enabled, + ToggleTime: t, + } +} + +func toAutoclassFromProto(a *storagepb.Bucket_Autoclass) *Autoclass { + if a == nil || a.GetToggleTime().AsTime().Unix() == 0 { + return nil + } + return &Autoclass{ + Enabled: a.GetEnabled(), + ToggleTime: a.GetToggleTime().AsTime(), + } +} + // Objects returns an iterator over the objects in the bucket that match the // Query q. If q is nil, no filtering is done. Objects will be iterated over // lexicographically by name. diff --git a/vendor/cloud.google.com/go/storage/client.go b/vendor/cloud.google.com/go/storage/client.go index 40eb576ce..d579a2b1e 100644 --- a/vendor/cloud.google.com/go/storage/client.go +++ b/vendor/cloud.google.com/go/storage/client.go @@ -317,10 +317,11 @@ type destinationObject struct { } type rewriteObjectRequest struct { - srcObject sourceObject - dstObject destinationObject - predefinedACL string - token string + srcObject sourceObject + dstObject destinationObject + predefinedACL string + token string + maxBytesRewrittenPerCall int64 } type rewriteObjectResponse struct { diff --git a/vendor/cloud.google.com/go/storage/copy.go b/vendor/cloud.google.com/go/storage/copy.go index 60ed81391..a0b9a2683 100644 --- a/vendor/cloud.google.com/go/storage/copy.go +++ b/vendor/cloud.google.com/go/storage/copy.go @@ -69,6 +69,15 @@ type Copier struct { DestinationKMSKeyName string dst, src *ObjectHandle + + // The maximum number of bytes that will be rewritten per rewrite request. + // Most callers shouldn't need to specify this parameter - it is primarily + // in place to support testing. If specified the value must be an integral + // multiple of 1 MiB (1048576). Also, this only applies to requests where + // the source and destination span locations and/or storage classes. Finally, + // this value must not change across rewrite calls else you'll get an error + // that the `rewriteToken` is invalid. + maxBytesRewrittenPerCall int64 } // Run performs the copy. @@ -108,8 +117,9 @@ func (c *Copier) Run(ctx context.Context) (attrs *ObjectAttrs, err error) { encryptionKey: c.dst.encryptionKey, keyName: c.DestinationKMSKeyName, }, - predefinedACL: c.PredefinedACL, - token: c.RewriteToken, + predefinedACL: c.PredefinedACL, + token: c.RewriteToken, + maxBytesRewrittenPerCall: c.maxBytesRewrittenPerCall, } isIdempotent := c.dst.conds != nil && (c.dst.conds.GenerationMatch != 0 || c.dst.conds.DoesNotExist) @@ -127,6 +137,7 @@ func (c *Copier) Run(ctx context.Context) (attrs *ObjectAttrs, err error) { return nil, err } c.RewriteToken = res.token + req.token = res.token if c.ProgressFunc != nil { c.ProgressFunc(uint64(res.written), uint64(res.size)) } diff --git a/vendor/cloud.google.com/go/storage/grpc_client.go b/vendor/cloud.google.com/go/storage/grpc_client.go index 049fbae2f..4a44cee8b 100644 --- a/vendor/cloud.google.com/go/storage/grpc_client.go +++ b/vendor/cloud.google.com/go/storage/grpc_client.go @@ -25,6 +25,7 @@ import ( "cloud.google.com/go/internal/trace" gapic "cloud.google.com/go/storage/internal/apiv2" storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" + "github.com/googleapis/gax-go/v2" "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/option/internaloption" @@ -246,7 +247,8 @@ func (c *grpcStorageClient) DeleteBucket(ctx context.Context, bucket string, con func (c *grpcStorageClient) GetBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) { s := callSettings(c.settings, opts...) req := &storagepb.GetBucketRequest{ - Name: bucketResourceName(globalProjectAlias, bucket), + Name: bucketResourceName(globalProjectAlias, bucket), + ReadMask: &fieldmaskpb.FieldMask{Paths: []string{"*"}}, } if err := applyBucketCondsProto("grpcStorageClient.GetBucket", conds, req); err != nil { return nil, err @@ -344,6 +346,9 @@ func (c *grpcStorageClient) UpdateBucket(ctx context.Context, bucket string, uat if uattrs.RPO != RPOUnknown { fieldMask.Paths = append(fieldMask.Paths, "rpo") } + if uattrs.Autoclass != nil { + fieldMask.Paths = append(fieldMask.Paths, "autoclass") + } // TODO(cathyo): Handle labels. Pending b/230510191. req.UpdateMask = fieldMask @@ -380,14 +385,14 @@ func (c *grpcStorageClient) ListObjects(ctx context.Context, bucket string, q *Q it.query = *q } req := &storagepb.ListObjectsRequest{ - Parent: bucketResourceName(globalProjectAlias, bucket), - Prefix: it.query.Prefix, - Delimiter: it.query.Delimiter, - Versions: it.query.Versions, - LexicographicStart: it.query.StartOffset, - LexicographicEnd: it.query.EndOffset, - // TODO(noahietz): Convert a projection to a FieldMask. - // ReadMask: q.Projection, + Parent: bucketResourceName(globalProjectAlias, bucket), + Prefix: it.query.Prefix, + Delimiter: it.query.Delimiter, + Versions: it.query.Versions, + LexicographicStart: it.query.StartOffset, + LexicographicEnd: it.query.EndOffset, + IncludeTrailingDelimiter: it.query.IncludeTrailingDelimiter, + ReadMask: q.toFieldMask(), // a nil Query still results in a "*" FieldMask } if s.userProject != "" { ctx = setUserProjectMetadata(ctx, s.userProject) @@ -411,6 +416,12 @@ func (c *grpcStorageClient) ListObjects(ctx context.Context, bucket string, q *Q it.items = append(it.items, b) } + // Response is always non-nil after a successful request. + res := gitr.Response.(*storagepb.ListObjectsResponse) + for _, prefix := range res.GetPrefixes() { + it.items = append(it.items, &ObjectAttrs{Prefix: prefix}) + } + return token, nil } it.pageInfo, it.nextFunc = iterator.NewPageInfo( @@ -449,6 +460,8 @@ func (c *grpcStorageClient) GetObject(ctx context.Context, bucket, object string req := &storagepb.GetObjectRequest{ Bucket: bucketResourceName(globalProjectAlias, bucket), Object: object, + // ProjectionFull by default. + ReadMask: &fieldmaskpb.FieldMask{Paths: []string{"*"}}, } if err := applyCondsProto("grpcStorageClient.GetObject", gen, conds, req); err != nil { return nil, err @@ -492,10 +505,7 @@ func (c *grpcStorageClient) UpdateObject(ctx context.Context, bucket, object str req.CommonObjectRequestParams = toProtoCommonObjectRequestParams(encryptionKey) } - var paths []string - fieldMask := &fieldmaskpb.FieldMask{ - Paths: paths, - } + fieldMask := &fieldmaskpb.FieldMask{Paths: nil} if uattrs.EventBasedHold != nil { fieldMask.Paths = append(fieldMask.Paths, "event_based_hold") } @@ -522,7 +532,7 @@ func (c *grpcStorageClient) UpdateObject(ctx context.Context, bucket, object str } // Note: This API currently does not support entites using project ID. // Use project numbers in ACL entities. Pending b/233617896. - if uattrs.ACL != nil { + if uattrs.ACL != nil || len(uattrs.PredefinedACL) > 0 { fieldMask.Paths = append(fieldMask.Paths, "acl") } // TODO(cathyo): Handle metadata. Pending b/230510191. @@ -782,14 +792,15 @@ func (c *grpcStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec s := callSettings(c.settings, opts...) obj := req.dstObject.attrs.toProtoObject("") call := &storagepb.RewriteObjectRequest{ - SourceBucket: bucketResourceName(globalProjectAlias, req.srcObject.bucket), - SourceObject: req.srcObject.name, - RewriteToken: req.token, - DestinationBucket: bucketResourceName(globalProjectAlias, req.dstObject.bucket), - DestinationName: req.dstObject.name, - Destination: obj, - DestinationKmsKey: req.dstObject.keyName, - DestinationPredefinedAcl: req.predefinedACL, + SourceBucket: bucketResourceName(globalProjectAlias, req.srcObject.bucket), + SourceObject: req.srcObject.name, + RewriteToken: req.token, + DestinationBucket: bucketResourceName(globalProjectAlias, req.dstObject.bucket), + DestinationName: req.dstObject.name, + Destination: obj, + DestinationKmsKey: req.dstObject.keyName, + DestinationPredefinedAcl: req.predefinedACL, + CommonObjectRequestParams: toProtoCommonObjectRequestParams(req.dstObject.encryptionKey), } // The userProject, whether source or destination project, is decided by the code calling the interface. @@ -812,6 +823,9 @@ func (c *grpcStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec call.CopySourceEncryptionKeyBytes = srcParams.GetEncryptionKeyBytes() call.CopySourceEncryptionKeySha256Bytes = srcParams.GetEncryptionKeySha256Bytes() } + + call.MaxBytesRewrittenPerCall = req.maxBytesRewrittenPerCall + var res *storagepb.RewriteResponse var err error @@ -850,10 +864,10 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange } b := bucketResourceName(globalProjectAlias, params.bucket) - // TODO(noahdietz): Use encryptionKey to set relevant request fields. req := &storagepb.ReadObjectRequest{ - Bucket: b, - Object: params.object, + Bucket: b, + Object: params.object, + CommonObjectRequestParams: toProtoCommonObjectRequestParams(params.encryptionKey), } // The default is a negative value, which means latest. if params.gen >= 0 { @@ -943,6 +957,7 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange // Store the content from the first Recv in the // client buffer for reading later. leftovers: msg.GetChecksummedData().GetContent(), + settings: s, }, } @@ -964,6 +979,8 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange } func (c *grpcStorageClient) OpenWriter(params *openWriterParams, opts ...storageOption) (*io.PipeWriter, error) { + s := callSettings(c.settings, opts...) + var offset int64 errorf := params.setError progress := params.progress @@ -971,6 +988,10 @@ func (c *grpcStorageClient) OpenWriter(params *openWriterParams, opts ...storage pr, pw := io.Pipe() gw := newGRPCWriter(c, params, pr) + gw.settings = s + if s.userProject != "" { + gw.ctx = setUserProjectMetadata(gw.ctx, s.userProject) + } // This function reads the data sent to the pipe and sends sets of messages // on the gRPC client-stream as the buffer is filled. @@ -988,8 +1009,6 @@ func (c *grpcStorageClient) OpenWriter(params *openWriterParams, opts ...storage return } - // TODO(noahdietz): Send encryption key via CommonObjectRequestParams. - // The chunk buffer is full, but there is no end in sight. This // means that a resumable upload will need to be used to send // multiple chunks, until we are done reading data. Start a @@ -1315,6 +1334,7 @@ type gRPCReader struct { reopen func(seen int64) (*readStreamResponse, context.CancelFunc, error) leftovers []byte cancel context.CancelFunc + settings *settings } // Read reads bytes into the user's buffer from an open gRPC stream. @@ -1390,7 +1410,11 @@ func (r *gRPCReader) Close() error { // an attempt to reopen the stream. func (r *gRPCReader) recv() (*storagepb.ReadObjectResponse, error) { msg, err := r.stream.Recv() - if err != nil && ShouldRetry(err) { + var shouldRetry = ShouldRetry + if r.settings.retry != nil && r.settings.retry.shouldRetry != nil { + shouldRetry = r.settings.retry.shouldRetry + } + if err != nil && shouldRetry(err) { // This will "close" the existing stream and immediately attempt to // reopen the stream, but will backoff if further attempts are necessary. // Reopening the stream Recvs the first message, so if retrying is @@ -1454,6 +1478,7 @@ type gRPCWriter struct { attrs *ObjectAttrs conds *Conditions encryptionKey []byte + settings *settings sendCRC32C bool @@ -1471,21 +1496,30 @@ func (w *gRPCWriter) startResumableUpload() error { if err != nil { return err } - upres, err := w.c.raw.StartResumableWrite(w.ctx, &storagepb.StartResumableWriteRequest{ - WriteObjectSpec: spec, - }) - - w.upid = upres.GetUploadId() - return err + return run(w.ctx, func() error { + upres, err := w.c.raw.StartResumableWrite(w.ctx, &storagepb.StartResumableWriteRequest{ + WriteObjectSpec: spec, + CommonObjectRequestParams: toProtoCommonObjectRequestParams(w.encryptionKey), + }) + w.upid = upres.GetUploadId() + return err + }, w.settings.retry, w.settings.idempotent, setRetryHeaderGRPC(w.ctx)) } // queryProgress is a helper that queries the status of the resumable upload // associated with the given upload ID. func (w *gRPCWriter) queryProgress() (int64, error) { - q, err := w.c.raw.QueryWriteStatus(w.ctx, &storagepb.QueryWriteStatusRequest{UploadId: w.upid}) + var persistedSize int64 + err := run(w.ctx, func() error { + q, err := w.c.raw.QueryWriteStatus(w.ctx, &storagepb.QueryWriteStatusRequest{ + UploadId: w.upid, + }) + persistedSize = q.GetPersistedSize() + return err + }, w.settings.retry, true, setRetryHeaderGRPC(w.ctx)) // q.GetCommittedSize() will return 0 if q is nil. - return q.GetPersistedSize(), err + return persistedSize, err } // uploadBuffer opens a Write stream and uploads the buffer at the given offset (if @@ -1500,6 +1534,10 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st var err error var finishWrite bool var sent, limit int = 0, maxPerMessageWriteSize + var shouldRetry = ShouldRetry + if w.settings.retry != nil && w.settings.retry.shouldRetry != nil { + shouldRetry = w.settings.retry.shouldRetry + } offset := start toWrite := w.buf[:recvd] for { @@ -1546,6 +1584,7 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st req.FirstMessage = &storagepb.WriteObjectRequest_WriteObjectSpec{ WriteObjectSpec: spec, } + req.CommonObjectRequestParams = toProtoCommonObjectRequestParams(w.encryptionKey) } // TODO: Currently the checksums are only sent on the first message @@ -1553,8 +1592,16 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st // on the *last* message of the stream (instead of the first). if w.sendCRC32C { req.ObjectChecksums = &storagepb.ObjectChecksums{ - Crc32C: proto.Uint32(w.attrs.CRC32C), - Md5Hash: w.attrs.MD5, + Crc32C: proto.Uint32(w.attrs.CRC32C), + } + } + if len(w.attrs.MD5) != 0 { + if cs := req.GetObjectChecksums(); cs == nil { + req.ObjectChecksums = &storagepb.ObjectChecksums{ + Md5Hash: w.attrs.MD5, + } + } else { + cs.Md5Hash = w.attrs.MD5 } } } @@ -1570,7 +1617,7 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st // resend the entire buffer via a new stream. // If not retriable, falling through will return the error received // from closing the stream. - if ShouldRetry(err) { + if shouldRetry(err) { sent = 0 finishWrite = false // TODO: Add test case for failure modes of querying progress. @@ -1601,7 +1648,7 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st // resend the entire buffer via a new stream. // If not retriable, falling through will return the error received // from closing the stream. - if ShouldRetry(err) { + if shouldRetry(err) { sent = 0 finishWrite = false offset, err = w.determineOffset(start) @@ -1673,7 +1720,12 @@ func (w *gRPCWriter) writeObjectSpec() (*storagepb.WriteObjectSpec, error) { // read copies the data in the reader to the given buffer and reports how much // data was read into the buffer and if there is no more data to read (EOF). +// Furthermore, if the attrs.ContentType is unset, the first bytes of content +// will be sniffed for a matching content type. func (w *gRPCWriter) read() (int, bool, error) { + if w.attrs.ContentType == "" { + w.reader, w.attrs.ContentType = gax.DetermineContentType(w.reader) + } // Set n to -1 to start the Read loop. var n, recvd int = -1, 0 var err error diff --git a/vendor/cloud.google.com/go/storage/hmac.go b/vendor/cloud.google.com/go/storage/hmac.go index d21fba141..422a7c233 100644 --- a/vendor/cloud.google.com/go/storage/hmac.go +++ b/vendor/cloud.google.com/go/storage/hmac.go @@ -147,11 +147,11 @@ func toHMACKeyFromRaw(hk *raw.HmacKey, updatedTimeCanBeNil bool) (*HMACKey, erro } createdTime, err := time.Parse(time.RFC3339, hkmd.TimeCreated) if err != nil { - return nil, fmt.Errorf("field CreatedTime: %v", err) + return nil, fmt.Errorf("field CreatedTime: %w", err) } updatedTime, err := time.Parse(time.RFC3339, hkmd.Updated) if err != nil && !updatedTimeCanBeNil { - return nil, fmt.Errorf("field UpdatedTime: %v", err) + return nil, fmt.Errorf("field UpdatedTime: %w", err) } hmKey := &HMACKey{ diff --git a/vendor/cloud.google.com/go/storage/http_client.go b/vendor/cloud.google.com/go/storage/http_client.go index a589d3d0f..fae96043a 100644 --- a/vendor/cloud.google.com/go/storage/http_client.go +++ b/vendor/cloud.google.com/go/storage/http_client.go @@ -114,17 +114,17 @@ func newHTTPStorageClient(ctx context.Context, opts ...storageOption) (storageCl // htransport selects the correct endpoint among WithEndpoint (user override), WithDefaultEndpoint, and WithDefaultMTLSEndpoint. hc, ep, err := htransport.NewClient(ctx, s.clientOption...) if err != nil { - return nil, fmt.Errorf("dialing: %v", err) + return nil, fmt.Errorf("dialing: %w", err) } // RawService should be created with the chosen endpoint to take account of user override. rawService, err := raw.NewService(ctx, option.WithEndpoint(ep), option.WithHTTPClient(hc)) if err != nil { - return nil, fmt.Errorf("storage client: %v", err) + return nil, fmt.Errorf("storage client: %w", err) } // Update readHost and scheme with the chosen endpoint. u, err := url.Parse(ep) if err != nil { - return nil, fmt.Errorf("supplied endpoint %q is not valid: %v", ep, err) + return nil, fmt.Errorf("supplied endpoint %q is not valid: %w", ep, err) } return &httpStorageClient{ @@ -344,8 +344,8 @@ func (c *httpStorageClient) ListObjects(ctx context.Context, bucket string, q *Q req.EndOffset(it.query.EndOffset) req.Versions(it.query.Versions) req.IncludeTrailingDelimiter(it.query.IncludeTrailingDelimiter) - if len(it.query.fieldSelection) > 0 { - req.Fields("nextPageToken", googleapi.Field(it.query.fieldSelection)) + if selection := it.query.toFieldSelection(); selection != "" { + req.Fields("nextPageToken", googleapi.Field(selection)) } req.PageToken(pageToken) if s.userProject != "" { @@ -747,6 +747,11 @@ func (c *httpStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec if err := setEncryptionHeaders(call.Header(), req.srcObject.encryptionKey, true); err != nil { return nil, err } + + if req.maxBytesRewrittenPerCall != 0 { + call.MaxBytesRewrittenPerCall(req.maxBytesRewrittenPerCall) + } + var res *raw.RewriteResponse var err error setClientHeader(call.Header()) @@ -905,7 +910,7 @@ func (c *httpStorageClient) NewRangeReader(ctx context.Context, params *newRange if dashIndex >= 0 { startOffset, err = strconv.ParseInt(cr[len("bytes="):dashIndex], 10, 64) if err != nil { - return nil, fmt.Errorf("storage: invalid Content-Range %q: %v", cr, err) + return nil, fmt.Errorf("storage: invalid Content-Range %q: %w", cr, err) } } } else { @@ -1033,9 +1038,8 @@ func (c *httpStorageClient) OpenWriter(params *openWriterParams, opts ...storage // there is no need to add retries here. // Retry only when the operation is idempotent or the retry policy is RetryAlways. - isIdempotent := params.conds != nil && (params.conds.GenerationMatch >= 0 || params.conds.DoesNotExist == true) var useRetry bool - if (s.retry == nil || s.retry.policy == RetryIdempotent) && isIdempotent { + if (s.retry == nil || s.retry.policy == RetryIdempotent) && s.idempotent { useRetry = true } else if s.retry != nil && s.retry.policy == RetryAlways { useRetry = true diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go b/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go index cf7e0e785..6e904ee5c 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go @@ -589,7 +589,18 @@ func (c *gRPCClient) GetBucket(ctx context.Context, req *storagepb.GetBucketRequ } func (c *gRPCClient) CreateBucket(ctx context.Context, req *storagepb.CreateBucketRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) { - ctx = insertMetadata(ctx, c.xGoogMetadata) + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 { + routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append((*c.CallOptions).CreateBucket[0:len((*c.CallOptions).CreateBucket):len((*c.CallOptions).CreateBucket)], opts...) var resp *storagepb.Bucket err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -604,7 +615,18 @@ func (c *gRPCClient) CreateBucket(ctx context.Context, req *storagepb.CreateBuck } func (c *gRPCClient) ListBuckets(ctx context.Context, req *storagepb.ListBucketsRequest, opts ...gax.CallOption) *BucketIterator { - ctx = insertMetadata(ctx, c.xGoogMetadata) + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 { + routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append((*c.CallOptions).ListBuckets[0:len((*c.CallOptions).ListBuckets):len((*c.CallOptions).ListBuckets)], opts...) it := &BucketIterator{} req = proto.Clone(req).(*storagepb.ListBucketsRequest) @@ -1215,7 +1237,18 @@ func (c *gRPCClient) QueryWriteStatus(ctx context.Context, req *storagepb.QueryW } func (c *gRPCClient) GetServiceAccount(ctx context.Context, req *storagepb.GetServiceAccountRequest, opts ...gax.CallOption) (*storagepb.ServiceAccount, error) { - ctx = insertMetadata(ctx, c.xGoogMetadata) + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 { + routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append((*c.CallOptions).GetServiceAccount[0:len((*c.CallOptions).GetServiceAccount):len((*c.CallOptions).GetServiceAccount)], opts...) var resp *storagepb.ServiceAccount err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -1230,7 +1263,18 @@ func (c *gRPCClient) GetServiceAccount(ctx context.Context, req *storagepb.GetSe } func (c *gRPCClient) CreateHmacKey(ctx context.Context, req *storagepb.CreateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.CreateHmacKeyResponse, error) { - ctx = insertMetadata(ctx, c.xGoogMetadata) + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 { + routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append((*c.CallOptions).CreateHmacKey[0:len((*c.CallOptions).CreateHmacKey):len((*c.CallOptions).CreateHmacKey)], opts...) var resp *storagepb.CreateHmacKeyResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -1245,7 +1289,18 @@ func (c *gRPCClient) CreateHmacKey(ctx context.Context, req *storagepb.CreateHma } func (c *gRPCClient) DeleteHmacKey(ctx context.Context, req *storagepb.DeleteHmacKeyRequest, opts ...gax.CallOption) error { - ctx = insertMetadata(ctx, c.xGoogMetadata) + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 { + routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append((*c.CallOptions).DeleteHmacKey[0:len((*c.CallOptions).DeleteHmacKey):len((*c.CallOptions).DeleteHmacKey)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -1256,7 +1311,18 @@ func (c *gRPCClient) DeleteHmacKey(ctx context.Context, req *storagepb.DeleteHma } func (c *gRPCClient) GetHmacKey(ctx context.Context, req *storagepb.GetHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) { - ctx = insertMetadata(ctx, c.xGoogMetadata) + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 { + routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append((*c.CallOptions).GetHmacKey[0:len((*c.CallOptions).GetHmacKey):len((*c.CallOptions).GetHmacKey)], opts...) var resp *storagepb.HmacKeyMetadata err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -1271,7 +1337,18 @@ func (c *gRPCClient) GetHmacKey(ctx context.Context, req *storagepb.GetHmacKeyRe } func (c *gRPCClient) ListHmacKeys(ctx context.Context, req *storagepb.ListHmacKeysRequest, opts ...gax.CallOption) *HmacKeyMetadataIterator { - ctx = insertMetadata(ctx, c.xGoogMetadata) + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 { + routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append((*c.CallOptions).ListHmacKeys[0:len((*c.CallOptions).ListHmacKeys):len((*c.CallOptions).ListHmacKeys)], opts...) it := &HmacKeyMetadataIterator{} req = proto.Clone(req).(*storagepb.ListHmacKeysRequest) @@ -1314,7 +1391,18 @@ func (c *gRPCClient) ListHmacKeys(ctx context.Context, req *storagepb.ListHmacKe } func (c *gRPCClient) UpdateHmacKey(ctx context.Context, req *storagepb.UpdateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) { - ctx = insertMetadata(ctx, c.xGoogMetadata) + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetHmacKey().GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetHmacKey().GetProject())[1])) > 0 { + routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetHmacKey().GetProject())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append((*c.CallOptions).UpdateHmacKey[0:len((*c.CallOptions).UpdateHmacKey):len((*c.CallOptions).UpdateHmacKey)], opts...) var resp *storagepb.HmacKeyMetadata err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/stubs/storage.pb.go b/vendor/cloud.google.com/go/storage/internal/apiv2/stubs/storage.pb.go index 28c00714b..c36634b1a 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/stubs/storage.pb.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/stubs/storage.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.9 // source: google/storage/v2/storage.proto package storage @@ -25,17 +25,17 @@ import ( reflect "reflect" sync "sync" - empty "github.com/golang/protobuf/ptypes/empty" - timestamp "github.com/golang/protobuf/ptypes/timestamp" _ "google.golang.org/genproto/googleapis/api/annotations" v1 "google.golang.org/genproto/googleapis/iam/v1" date "google.golang.org/genproto/googleapis/type/date" - field_mask "google.golang.org/genproto/protobuf/field_mask" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" ) const ( @@ -264,7 +264,7 @@ type GetBucketRequest struct { // Mask specifying which fields to read. // A "*" field may be used to indicate all fields. // If no mask is specified, will default to all fields. - ReadMask *field_mask.FieldMask `protobuf:"bytes,5,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` + ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,5,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` } func (x *GetBucketRequest) Reset() { @@ -320,7 +320,7 @@ func (x *GetBucketRequest) GetIfMetagenerationNotMatch() int64 { return 0 } -func (x *GetBucketRequest) GetReadMask() *field_mask.FieldMask { +func (x *GetBucketRequest) GetReadMask() *fieldmaskpb.FieldMask { if x != nil { return x.ReadMask } @@ -335,7 +335,7 @@ type CreateBucketRequest struct { // Required. The project to which this bucket will belong. Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` - // Required. Properties of the new bucket being inserted. + // Properties of the new bucket being inserted. // The project and name of the bucket are specified in the parent and // bucket_id fields, respectively. Populating those fields in `bucket` will // result in an error. @@ -443,7 +443,7 @@ type ListBucketsRequest struct { // If no mask is specified, will default to all fields except items.owner, // items.acl, and items.default_object_acl. // * may be used to mean "all fields". - ReadMask *field_mask.FieldMask `protobuf:"bytes,5,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` + ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,5,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` } func (x *ListBucketsRequest) Reset() { @@ -506,7 +506,7 @@ func (x *ListBucketsRequest) GetPrefix() string { return "" } -func (x *ListBucketsRequest) GetReadMask() *field_mask.FieldMask { +func (x *ListBucketsRequest) GetReadMask() *fieldmaskpb.FieldMask { if x != nil { return x.ReadMask } @@ -580,7 +580,7 @@ type LockBucketRetentionPolicyRequest struct { // Required. Name of a bucket. Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` - // Makes the operation conditional on whether bucket's current metageneration + // Required. Makes the operation conditional on whether bucket's current metageneration // matches the given value. Must be positive. IfMetagenerationMatch int64 `protobuf:"varint,2,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3" json:"if_metageneration_match,omitempty"` } @@ -637,7 +637,7 @@ type UpdateBucketRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // The bucket to update. + // Required. The bucket to update. // The bucket's `name` field will be used to identify the bucket. Bucket *Bucket `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` // If set, will only modify the bucket if its metageneration matches this @@ -654,7 +654,7 @@ type UpdateBucketRequest struct { // Valid values are "authenticatedRead", "bucketOwnerFullControl", // "bucketOwnerRead", "private", "projectPrivate", or "publicRead". PredefinedDefaultObjectAcl string `protobuf:"bytes,9,opt,name=predefined_default_object_acl,json=predefinedDefaultObjectAcl,proto3" json:"predefined_default_object_acl,omitempty"` - // List of fields to be updated. + // Required. List of fields to be updated. // // To specify ALL fields, equivalent to the JSON API's "update" function, // specify a single field with the value `*`. Note: not recommended. If a new @@ -664,7 +664,7 @@ type UpdateBucketRequest struct { // Not specifying any fields is an error. // Not specifying a field while setting that field to a non-default value is // an error. - UpdateMask *field_mask.FieldMask `protobuf:"bytes,6,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,6,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` } func (x *UpdateBucketRequest) Reset() { @@ -734,7 +734,7 @@ func (x *UpdateBucketRequest) GetPredefinedDefaultObjectAcl() string { return "" } -func (x *UpdateBucketRequest) GetUpdateMask() *field_mask.FieldMask { +func (x *UpdateBucketRequest) GetUpdateMask() *fieldmaskpb.FieldMask { if x != nil { return x.UpdateMask } @@ -1361,9 +1361,9 @@ type ReadObjectRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // The name of the bucket containing the object to read. + // Required. The name of the bucket containing the object to read. Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` - // The name of the object to read. + // Required. The name of the object to read. Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"` // If present, selects a specific revision of this object (as opposed // to the latest version, the default). @@ -1408,7 +1408,7 @@ type ReadObjectRequest struct { // If no mask is specified, will default to all fields except metadata.owner // and metadata.acl. // * may be used to mean "all fields". - ReadMask *field_mask.FieldMask `protobuf:"bytes,12,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` + ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,12,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` } func (x *ReadObjectRequest) Reset() { @@ -1513,7 +1513,7 @@ func (x *ReadObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestP return nil } -func (x *ReadObjectRequest) GetReadMask() *field_mask.FieldMask { +func (x *ReadObjectRequest) GetReadMask() *fieldmaskpb.FieldMask { if x != nil { return x.ReadMask } @@ -1554,7 +1554,7 @@ type GetObjectRequest struct { // If no mask is specified, will default to all fields except metadata.acl and // metadata.owner. // * may be used to mean "all fields". - ReadMask *field_mask.FieldMask `protobuf:"bytes,10,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` + ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,10,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` } func (x *GetObjectRequest) Reset() { @@ -1645,7 +1645,7 @@ func (x *GetObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestPa return nil } -func (x *GetObjectRequest) GetReadMask() *field_mask.FieldMask { +func (x *GetObjectRequest) GetReadMask() *fieldmaskpb.FieldMask { if x != nil { return x.ReadMask } @@ -1742,7 +1742,7 @@ type WriteObjectSpec struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Destination object, including its name and its metadata. + // Required. Destination object, including its name and its metadata. Resource *Object `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` // Apply a predefined set of access controls to this object. // Valid values are "authenticatedRead", "bucketOwnerFullControl", @@ -2158,7 +2158,7 @@ type ListObjectsRequest struct { // If no mask is specified, will default to all fields except items.acl and // items.owner. // * may be used to mean "all fields". - ReadMask *field_mask.FieldMask `protobuf:"bytes,8,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` + ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,8,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` // Filter results to objects whose names are lexicographically equal to or // after lexicographic_start. If lexicographic_end is also set, the objects // listed have names between lexicographic_start (inclusive) and @@ -2252,7 +2252,7 @@ func (x *ListObjectsRequest) GetVersions() bool { return false } -func (x *ListObjectsRequest) GetReadMask() *field_mask.FieldMask { +func (x *ListObjectsRequest) GetReadMask() *fieldmaskpb.FieldMask { if x != nil { return x.ReadMask } @@ -2435,7 +2435,7 @@ type RewriteObjectRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Immutable. The name of the destination object. + // Required. Immutable. The name of the destination object. // See the // [Naming Guidelines](https://cloud.google.com/storage/docs/naming-objects). // Example: `test.txt` @@ -2443,7 +2443,7 @@ type RewriteObjectRequest struct { // object. A Cloud Storage object is uniquely identified by the tuple of // (bucket, object, generation). DestinationName string `protobuf:"bytes,24,opt,name=destination_name,json=destinationName,proto3" json:"destination_name,omitempty"` - // Immutable. The name of the bucket containing the destination object. + // Required. Immutable. The name of the bucket containing the destination object. DestinationBucket string `protobuf:"bytes,25,opt,name=destination_bucket,json=destinationBucket,proto3" json:"destination_bucket,omitempty"` // The name of the Cloud KMS key that will be used to encrypt the destination // object. The Cloud KMS key must be located in same location as the object. @@ -2809,7 +2809,7 @@ type StartResumableWriteRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // The destination bucket, object, and metadata, as well as any preconditions. + // Required. The destination bucket, object, and metadata, as well as any preconditions. WriteObjectSpec *WriteObjectSpec `protobuf:"bytes,1,opt,name=write_object_spec,json=writeObjectSpec,proto3" json:"write_object_spec,omitempty"` // A set of parameters common to Storage API requests concerning an object. CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,3,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` @@ -2917,7 +2917,7 @@ type UpdateObjectRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // The object to update. + // Required. The object to update. // The object's bucket and name fields are used to identify the object to // update. If present, the object's generation field selects a specific // revision of this object whose metadata should be updated. Otherwise, @@ -2942,7 +2942,7 @@ type UpdateObjectRequest struct { // Valid values are "authenticatedRead", "bucketOwnerFullControl", // "bucketOwnerRead", "private", "projectPrivate", or "publicRead". PredefinedAcl string `protobuf:"bytes,10,opt,name=predefined_acl,json=predefinedAcl,proto3" json:"predefined_acl,omitempty"` - // List of fields to be updated. + // Required. List of fields to be updated. // // To specify ALL fields, equivalent to the JSON API's "update" function, // specify a single field with the value `*`. Note: not recommended. If a new @@ -2952,7 +2952,7 @@ type UpdateObjectRequest struct { // Not specifying any fields is an error. // Not specifying a field while setting that field to a non-default value is // an error. - UpdateMask *field_mask.FieldMask `protobuf:"bytes,7,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,7,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` // A set of parameters common to Storage API requests concerning an object. CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` } @@ -3031,7 +3031,7 @@ func (x *UpdateObjectRequest) GetPredefinedAcl() string { return "" } -func (x *UpdateObjectRequest) GetUpdateMask() *field_mask.FieldMask { +func (x *UpdateObjectRequest) GetUpdateMask() *fieldmaskpb.FieldMask { if x != nil { return x.UpdateMask } @@ -3344,13 +3344,13 @@ type ListHmacKeysRequest struct { // "projects/". // can be the project ID or project number. Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"` - // Optional. The maximum number of keys to return. + // The maximum number of keys to return. PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` - // Optional. A previously returned token from ListHmacKeysResponse to get the next page. + // A previously returned token from ListHmacKeysResponse to get the next page. PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` - // Optional. If set, filters to only return HMAC keys for specified service account. + // If set, filters to only return HMAC keys for specified service account. ServiceAccountEmail string `protobuf:"bytes,4,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"` - // Optional. If set, return deleted keys that have not yet been wiped out. + // If set, return deleted keys that have not yet been wiped out. ShowDeletedKeys bool `protobuf:"varint,5,opt,name=show_deleted_keys,json=showDeletedKeys,proto3" json:"show_deleted_keys,omitempty"` } @@ -3497,7 +3497,7 @@ type UpdateHmacKeyRequest struct { // Update mask for hmac_key. // Not specifying any fields will mean only the `state` field is updated to // the value specified in `hmac_key`. - UpdateMask *field_mask.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` } func (x *UpdateHmacKeyRequest) Reset() { @@ -3539,7 +3539,7 @@ func (x *UpdateHmacKeyRequest) GetHmacKey() *HmacKeyMetadata { return nil } -func (x *UpdateHmacKeyRequest) GetUpdateMask() *field_mask.FieldMask { +func (x *UpdateHmacKeyRequest) GetUpdateMask() *fieldmaskpb.FieldMask { if x != nil { return x.UpdateMask } @@ -3716,14 +3716,14 @@ type Bucket struct { // Output only. The creation time of the bucket. // Attempting to set or update this field will result in a // [FieldViolation][google.rpc.BadRequest.FieldViolation]. - CreateTime *timestamp.Timestamp `protobuf:"bytes,11,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + CreateTime *timestamppb.Timestamp `protobuf:"bytes,11,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` // The bucket's [https://www.w3.org/TR/cors/][Cross-Origin Resource Sharing] // (CORS) config. Cors []*Bucket_Cors `protobuf:"bytes,12,rep,name=cors,proto3" json:"cors,omitempty"` // Output only. The modification time of the bucket. // Attempting to set or update this field will result in a // [FieldViolation][google.rpc.BadRequest.FieldViolation]. - UpdateTime *timestamp.Timestamp `protobuf:"bytes,13,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` + UpdateTime *timestamppb.Timestamp `protobuf:"bytes,13,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` // The default value for event-based hold on newly created objects in this // bucket. Event-based hold is a way to retain objects indefinitely until an // event occurs, signified by the @@ -3770,8 +3770,8 @@ type Bucket struct { IamConfig *Bucket_IamConfig `protobuf:"bytes,23,opt,name=iam_config,json=iamConfig,proto3" json:"iam_config,omitempty"` // Reserved for future use. SatisfiesPzs bool `protobuf:"varint,25,opt,name=satisfies_pzs,json=satisfiesPzs,proto3" json:"satisfies_pzs,omitempty"` - // Configuration that, if present, specifies the data placement for a Custom - // Dual Region. + // Configuration that, if present, specifies the data placement for a + // [https://cloud.google.com/storage/docs/use-dual-regions][Dual Region]. CustomPlacementConfig *Bucket_CustomPlacementConfig `protobuf:"bytes,26,opt,name=custom_placement_config,json=customPlacementConfig,proto3" json:"custom_placement_config,omitempty"` // The bucket's Autoclass configuration. If there is no configuration, the // Autoclass feature will be disabled and have no effect on the bucket. @@ -3894,7 +3894,7 @@ func (x *Bucket) GetLifecycle() *Bucket_Lifecycle { return nil } -func (x *Bucket) GetCreateTime() *timestamp.Timestamp { +func (x *Bucket) GetCreateTime() *timestamppb.Timestamp { if x != nil { return x.CreateTime } @@ -3908,7 +3908,7 @@ func (x *Bucket) GetCors() []*Bucket_Cors { return nil } -func (x *Bucket) GetUpdateTime() *timestamp.Timestamp { +func (x *Bucket) GetUpdateTime() *timestamppb.Timestamp { if x != nil { return x.UpdateTime } @@ -4022,7 +4022,8 @@ type BucketAccessControl struct { // * `group-{groupid}` // * `group-{email}` // * `domain-{domain}` - // * `project-{team-projectid}` + // * `project-{team}-{projectnumber}` + // * `project-{team}-{projectid}` // * `allUsers` // * `allAuthenticatedUsers` // Examples: @@ -4031,7 +4032,12 @@ type BucketAccessControl struct { // `group-example@googlegroups.com` // * All members of the Google Apps for Business domain `example.com` would be // `domain-example.com` + // For project entities, `project-{team}-{projectnumber}` format will be + // returned on response. Entity string `protobuf:"bytes,3,opt,name=entity,proto3" json:"entity,omitempty"` + // Output only. The alternative entity format, if exists. For project entities, + // `project-{team}-{projectid}` format will be returned on response. + EntityAlt string `protobuf:"bytes,9,opt,name=entity_alt,json=entityAlt,proto3" json:"entity_alt,omitempty"` // The ID for the entity, if any. EntityId string `protobuf:"bytes,4,opt,name=entity_id,json=entityId,proto3" json:"entity_id,omitempty"` // The etag of the BucketAccessControl. @@ -4100,6 +4106,13 @@ func (x *BucketAccessControl) GetEntity() string { return "" } +func (x *BucketAccessControl) GetEntityAlt() string { + if x != nil { + return x.EntityAlt + } + return "" +} + func (x *BucketAccessControl) GetEntityId() string { if x != nil { return x.EntityId @@ -4283,9 +4296,9 @@ type HmacKeyMetadata struct { // Writable, can be updated by UpdateHmacKey operation. State string `protobuf:"bytes,5,opt,name=state,proto3" json:"state,omitempty"` // Output only. The creation time of the HMAC key. - CreateTime *timestamp.Timestamp `protobuf:"bytes,6,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + CreateTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` // Output only. The last modification time of the HMAC key metadata. - UpdateTime *timestamp.Timestamp `protobuf:"bytes,7,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` + UpdateTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` // The etag of the HMAC key. Etag string `protobuf:"bytes,8,opt,name=etag,proto3" json:"etag,omitempty"` } @@ -4357,14 +4370,14 @@ func (x *HmacKeyMetadata) GetState() string { return "" } -func (x *HmacKeyMetadata) GetCreateTime() *timestamp.Timestamp { +func (x *HmacKeyMetadata) GetCreateTime() *timestamppb.Timestamp { if x != nil { return x.CreateTime } return nil } -func (x *HmacKeyMetadata) GetUpdateTime() *timestamp.Timestamp { +func (x *HmacKeyMetadata) GetUpdateTime() *timestamppb.Timestamp { if x != nil { return x.UpdateTime } @@ -4396,13 +4409,13 @@ type Notification struct { // If included in the metadata of GetNotificationRequest, the operation will // only be performed if the etag matches that of the Notification. Etag string `protobuf:"bytes,7,opt,name=etag,proto3" json:"etag,omitempty"` - // Optional. If present, only send notifications about listed event types. If empty, + // If present, only send notifications about listed event types. If empty, // sent notifications for all event types. EventTypes []string `protobuf:"bytes,3,rep,name=event_types,json=eventTypes,proto3" json:"event_types,omitempty"` - // Optional. An optional list of additional attributes to attach to each Pub/Sub + // A list of additional attributes to attach to each Pub/Sub // message published for this notification subscription. CustomAttributes map[string]string `protobuf:"bytes,4,rep,name=custom_attributes,json=customAttributes,proto3" json:"custom_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // Optional. If present, only apply this notification config to object names that + // If present, only apply this notification config to object names that // begin with this prefix. ObjectNamePrefix string `protobuf:"bytes,5,opt,name=object_name_prefix,json=objectNamePrefix,proto3" json:"object_name_prefix,omitempty"` // Required. The desired content of the Payload. @@ -4611,7 +4624,7 @@ type Object struct { // version of the object has been deleted. // Attempting to set or update this field will result in a // [FieldViolation][google.rpc.BadRequest.FieldViolation]. - DeleteTime *timestamp.Timestamp `protobuf:"bytes,12,opt,name=delete_time,json=deleteTime,proto3" json:"delete_time,omitempty"` + DeleteTime *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=delete_time,json=deleteTime,proto3" json:"delete_time,omitempty"` // Content-Type of the object data, matching // [https://tools.ietf.org/html/rfc7231#section-3.1.1.5][RFC 7231 §3.1.1.5]. // If an object is stored without a Content-Type, it is served as @@ -4620,7 +4633,7 @@ type Object struct { // Output only. The creation time of the object. // Attempting to set or update this field will result in a // [FieldViolation][google.rpc.BadRequest.FieldViolation]. - CreateTime *timestamp.Timestamp `protobuf:"bytes,14,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + CreateTime *timestamppb.Timestamp `protobuf:"bytes,14,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` // Output only. Number of underlying components that make up this object. Components are // accumulated by compose operations. // Attempting to set or update this field will result in a @@ -4636,7 +4649,7 @@ type Object struct { // Object Lifecycle Configuration. // Attempting to set or update this field will result in a // [FieldViolation][google.rpc.BadRequest.FieldViolation]. - UpdateTime *timestamp.Timestamp `protobuf:"bytes,17,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` + UpdateTime *timestamppb.Timestamp `protobuf:"bytes,17,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` // Cloud KMS Key used to encrypt this object, if the object is encrypted by // such a key. KmsKey string `protobuf:"bytes,18,opt,name=kms_key,json=kmsKey,proto3" json:"kms_key,omitempty"` @@ -4644,7 +4657,7 @@ type Object struct { // object is initially created, it will be set to time_created. // Attempting to set or update this field will result in a // [FieldViolation][google.rpc.BadRequest.FieldViolation]. - UpdateStorageClassTime *timestamp.Timestamp `protobuf:"bytes,19,opt,name=update_storage_class_time,json=updateStorageClassTime,proto3" json:"update_storage_class_time,omitempty"` + UpdateStorageClassTime *timestamppb.Timestamp `protobuf:"bytes,19,opt,name=update_storage_class_time,json=updateStorageClassTime,proto3" json:"update_storage_class_time,omitempty"` // Whether an object is under temporary hold. While this flag is set to true, // the object is protected against deletion and overwrites. A common use case // of this flag is regulatory investigations where objects need to be retained @@ -4658,7 +4671,7 @@ type Object struct { // Note 2: This value can be provided even when temporary hold is set (so that // the user can reason about policy without having to first unset the // temporary hold). - RetentionExpireTime *timestamp.Timestamp `protobuf:"bytes,21,opt,name=retention_expire_time,json=retentionExpireTime,proto3" json:"retention_expire_time,omitempty"` + RetentionExpireTime *timestamppb.Timestamp `protobuf:"bytes,21,opt,name=retention_expire_time,json=retentionExpireTime,proto3" json:"retention_expire_time,omitempty"` // User-provided metadata, in key/value pairs. Metadata map[string]string `protobuf:"bytes,22,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Whether an object is under event-based hold. @@ -4681,7 +4694,7 @@ type Object struct { // such a key. CustomerEncryption *CustomerEncryption `protobuf:"bytes,25,opt,name=customer_encryption,json=customerEncryption,proto3" json:"customer_encryption,omitempty"` // A user-specified timestamp set on an object. - CustomTime *timestamp.Timestamp `protobuf:"bytes,26,opt,name=custom_time,json=customTime,proto3" json:"custom_time,omitempty"` + CustomTime *timestamppb.Timestamp `protobuf:"bytes,26,opt,name=custom_time,json=customTime,proto3" json:"custom_time,omitempty"` } func (x *Object) Reset() { @@ -4800,7 +4813,7 @@ func (x *Object) GetContentLanguage() string { return "" } -func (x *Object) GetDeleteTime() *timestamp.Timestamp { +func (x *Object) GetDeleteTime() *timestamppb.Timestamp { if x != nil { return x.DeleteTime } @@ -4814,7 +4827,7 @@ func (x *Object) GetContentType() string { return "" } -func (x *Object) GetCreateTime() *timestamp.Timestamp { +func (x *Object) GetCreateTime() *timestamppb.Timestamp { if x != nil { return x.CreateTime } @@ -4835,7 +4848,7 @@ func (x *Object) GetChecksums() *ObjectChecksums { return nil } -func (x *Object) GetUpdateTime() *timestamp.Timestamp { +func (x *Object) GetUpdateTime() *timestamppb.Timestamp { if x != nil { return x.UpdateTime } @@ -4849,7 +4862,7 @@ func (x *Object) GetKmsKey() string { return "" } -func (x *Object) GetUpdateStorageClassTime() *timestamp.Timestamp { +func (x *Object) GetUpdateStorageClassTime() *timestamppb.Timestamp { if x != nil { return x.UpdateStorageClassTime } @@ -4863,7 +4876,7 @@ func (x *Object) GetTemporaryHold() bool { return false } -func (x *Object) GetRetentionExpireTime() *timestamp.Timestamp { +func (x *Object) GetRetentionExpireTime() *timestamppb.Timestamp { if x != nil { return x.RetentionExpireTime } @@ -4898,7 +4911,7 @@ func (x *Object) GetCustomerEncryption() *CustomerEncryption { return nil } -func (x *Object) GetCustomTime() *timestamp.Timestamp { +func (x *Object) GetCustomTime() *timestamppb.Timestamp { if x != nil { return x.CustomTime } @@ -4921,7 +4934,8 @@ type ObjectAccessControl struct { // * `group-{groupid}` // * `group-{email}` // * `domain-{domain}` - // * `project-{team-projectid}` + // * `project-{team}-{projectnumber}` + // * `project-{team}-{projectid}` // * `allUsers` // * `allAuthenticatedUsers` // Examples: @@ -4930,7 +4944,12 @@ type ObjectAccessControl struct { // `group-example@googlegroups.com`. // * All members of the Google Apps for Business domain `example.com` would be // `domain-example.com`. + // For project entities, `project-{team}-{projectnumber}` format will be + // returned on response. Entity string `protobuf:"bytes,3,opt,name=entity,proto3" json:"entity,omitempty"` + // Output only. The alternative entity format, if exists. For project entities, + // `project-{team}-{projectid}` format will be returned on response. + EntityAlt string `protobuf:"bytes,9,opt,name=entity_alt,json=entityAlt,proto3" json:"entity_alt,omitempty"` // The ID for the entity, if any. EntityId string `protobuf:"bytes,4,opt,name=entity_id,json=entityId,proto3" json:"entity_id,omitempty"` // The etag of the ObjectAccessControl. @@ -4999,6 +5018,13 @@ func (x *ObjectAccessControl) GetEntity() string { return "" } +func (x *ObjectAccessControl) GetEntityAlt() string { + if x != nil { + return x.EntityAlt + } + return "" +} + func (x *ObjectAccessControl) GetEntityId() string { if x != nil { return x.EntityId @@ -5819,14 +5845,14 @@ type Bucket_RetentionPolicy struct { // Server-determined value that indicates the time from which policy was // enforced and effective. - EffectiveTime *timestamp.Timestamp `protobuf:"bytes,1,opt,name=effective_time,json=effectiveTime,proto3" json:"effective_time,omitempty"` + EffectiveTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=effective_time,json=effectiveTime,proto3" json:"effective_time,omitempty"` // Once locked, an object retention policy cannot be modified. IsLocked bool `protobuf:"varint,2,opt,name=is_locked,json=isLocked,proto3" json:"is_locked,omitempty"` // The duration in seconds that objects need to be retained. Retention // duration must be greater than zero and less than 100 years. Note that // enforcement of retention periods less than a day is not guaranteed. Such // periods should only be used for testing purposes. - RetentionPeriod int64 `protobuf:"varint,3,opt,name=retention_period,json=retentionPeriod,proto3" json:"retention_period,omitempty"` + RetentionPeriod *int64 `protobuf:"varint,3,opt,name=retention_period,json=retentionPeriod,proto3,oneof" json:"retention_period,omitempty"` } func (x *Bucket_RetentionPolicy) Reset() { @@ -5861,7 +5887,7 @@ func (*Bucket_RetentionPolicy) Descriptor() ([]byte, []int) { return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 6} } -func (x *Bucket_RetentionPolicy) GetEffectiveTime() *timestamp.Timestamp { +func (x *Bucket_RetentionPolicy) GetEffectiveTime() *timestamppb.Timestamp { if x != nil { return x.EffectiveTime } @@ -5876,8 +5902,8 @@ func (x *Bucket_RetentionPolicy) GetIsLocked() bool { } func (x *Bucket_RetentionPolicy) GetRetentionPeriod() int64 { - if x != nil { - return x.RetentionPeriod + if x != nil && x.RetentionPeriod != nil { + return *x.RetentionPeriod } return 0 } @@ -6063,7 +6089,7 @@ type Bucket_Autoclass struct { // disabled/unconfigured or set to false after being enabled. If Autoclass // is enabled when the bucket is created, the toggle_time is set to the // bucket creation time. - ToggleTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=toggle_time,json=toggleTime,proto3" json:"toggle_time,omitempty"` + ToggleTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=toggle_time,json=toggleTime,proto3" json:"toggle_time,omitempty"` } func (x *Bucket_Autoclass) Reset() { @@ -6105,7 +6131,7 @@ func (x *Bucket_Autoclass) GetEnabled() bool { return false } -func (x *Bucket_Autoclass) GetToggleTime() *timestamp.Timestamp { +func (x *Bucket_Autoclass) GetToggleTime() *timestamppb.Timestamp { if x != nil { return x.ToggleTime } @@ -6124,7 +6150,7 @@ type Bucket_IamConfig_UniformBucketLevelAccess struct { // The deadline time for changing // `iamConfig.uniformBucketLevelAccess.enabled` from `true` to `false`. // Mutable until the specified deadline is reached, but not afterward. - LockTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=lock_time,json=lockTime,proto3" json:"lock_time,omitempty"` + LockTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=lock_time,json=lockTime,proto3" json:"lock_time,omitempty"` } func (x *Bucket_IamConfig_UniformBucketLevelAccess) Reset() { @@ -6166,7 +6192,7 @@ func (x *Bucket_IamConfig_UniformBucketLevelAccess) GetEnabled() bool { return false } -func (x *Bucket_IamConfig_UniformBucketLevelAccess) GetLockTime() *timestamp.Timestamp { +func (x *Bucket_IamConfig_UniformBucketLevelAccess) GetLockTime() *timestamppb.Timestamp { if x != nil { return x.LockTime } @@ -6516,256 +6542,298 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{ 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, - 0x22, 0xa6, 0x02, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x22, 0xa1, 0x02, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4b, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x06, 0x70, - 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x36, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x20, 0x0a, - 0x09, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12, - 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, - 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, - 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x1d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, - 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x70, - 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x22, 0x81, 0x02, 0x0a, 0x12, 0x4c, 0x69, - 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x4b, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, - 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, - 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, - 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, - 0x78, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, - 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, - 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0x72, 0x0a, - 0x13, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x52, 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, - 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, - 0x6e, 0x22, 0x99, 0x01, 0x0a, 0x20, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x36, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xac, 0x03, + 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, + 0x52, 0x08, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, + 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, + 0x6c, 0x12, 0x41, 0x0a, 0x1d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, + 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, + 0x63, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, + 0x69, 0x6e, 0x65, 0x64, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x41, 0x63, 0x6c, 0x22, 0x81, 0x02, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4b, 0x0a, 0x06, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, + 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, + 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, + 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, + 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x3c, 0x0a, 0x09, + 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x00, 0x52, 0x08, 0x72, + 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, + 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0x72, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x33, 0x0a, 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x07, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, + 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, + 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x9e, 0x01, 0x0a, + 0x20, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, + 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xb6, 0x03, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, - 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x15, 0x69, 0x66, 0x4d, - 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, - 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x18, 0x69, 0x66, - 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, - 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, - 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, - 0x12, 0x41, 0x0a, 0x1d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x64, - 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x63, - 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, - 0x6e, 0x65, 0x64, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x41, 0x63, 0x6c, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, - 0x73, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, - 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, - 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x5c, 0x0a, 0x19, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2b, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x25, 0x0a, - 0x23, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x53, 0x0a, 0x16, 0x47, 0x65, - 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, - 0xa4, 0x01, 0x0a, 0x19, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x3b, 0x0a, + 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, + 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, + 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, + 0x01, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x25, + 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, + 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x1d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, + 0x6e, 0x65, 0x64, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x70, 0x72, + 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, + 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, + 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x5c, 0x0a, 0x19, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x2b, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x25, 0x0a, 0x23, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x53, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, + 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xa4, 0x01, 0x0a, 0x19, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x12, + 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x48, 0x0a, 0x0c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x0c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x22, 0x95, 0x01, 0x0a, 0x18, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x12, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x48, 0x0a, 0x0c, - 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x95, 0x01, 0x0a, 0x18, 0x4c, 0x69, 0x73, 0x74, 0x4e, - 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x12, 0x1d, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, - 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x8a, - 0x01, 0x0a, 0x19, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a, 0x0d, - 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, - 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, - 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xf4, 0x06, 0x0a, 0x14, - 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x40, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, - 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5b, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x73, 0x12, 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, - 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, - 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, - 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, - 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, - 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x88, 0x01, 0x01, 0x12, 0x3f, 0x0a, 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, - 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, - 0x73, 0x4b, 0x65, 0x79, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, - 0x72, 0x61, 0x6d, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, - 0x61, 0x6d, 0x73, 0x1a, 0xa8, 0x02, 0x0a, 0x0c, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, - 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7b, 0x0a, - 0x14, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, - 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x62, 0x0a, 0x13, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, - 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x16, - 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x22, 0xc0, 0x04, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, - 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, + 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, + 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, + 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x8a, 0x01, 0x0a, 0x19, 0x4c, 0x69, 0x73, + 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a, 0x0d, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, + 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x26, 0x0a, + 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, + 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xf4, 0x06, 0x0a, 0x14, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, + 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x40, + 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, + 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x5b, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, + 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x0d, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x3c, 0x0a, + 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, + 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, + 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, + 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, + 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, - 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, - 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x03, 0x48, 0x01, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3f, 0x0a, + 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, + 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, + 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x6d, + 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0xa8, 0x02, + 0x0a, 0x0c, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x17, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7b, 0x0a, 0x14, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x5f, 0x70, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, + 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x13, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x62, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, + 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x13, 0x69, + 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, + 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, - 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, - 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, - 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, - 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, - 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, - 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, - 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x3f, 0x0a, 0x1b, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, - 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, - 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, 0x1e, 0x0a, 0x1c, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, - 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xc0, 0x05, 0x0a, 0x11, 0x52, 0x65, 0x61, 0x64, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, - 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, + 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xc0, 0x04, 0x0a, + 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, + 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, + 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, + 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, + 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, + 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, + 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, + 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, + 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, + 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, + 0x3f, 0x0a, 0x1b, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, + 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, + 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, + 0x22, 0x1e, 0x0a, 0x1c, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, + 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0xca, 0x05, 0x0a, 0x11, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x66, 0x66, 0x73, 0x65, + 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x61, 0x64, 0x4c, 0x69, 0x6d, 0x69, 0x74, + 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, + 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, + 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, + 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, + 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0c, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, + 0x48, 0x04, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, + 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, + 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, + 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0x89, 0x05, + 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, + 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, - 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x1d, 0x0a, - 0x0a, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x09, 0x72, 0x65, 0x61, 0x64, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x33, 0x0a, 0x13, + 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, + 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, + 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, + 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x48, + 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x0a, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x3c, 0x0a, - 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, + 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x04, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, @@ -6776,356 +6844,236 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, - 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0x89, 0x05, 0x0a, 0x10, 0x47, 0x65, - 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, - 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, - 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, - 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, - 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, - 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, - 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, - 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, - 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, - 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, - 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, - 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, - 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, - 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x04, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, - 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, - 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, - 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, - 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, - 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, - 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaf, 0x02, 0x0a, 0x12, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x10, - 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, + 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaf, 0x02, 0x0a, 0x12, 0x52, 0x65, + 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x4d, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, + 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x52, 0x0f, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, + 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, + 0x75, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x44, + 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, + 0x61, 0x6e, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x8c, 0x04, 0x0a, 0x0f, + 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, + 0x3a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x70, + 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, + 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, + 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, + 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x88, 0x01, 0x01, 0x12, 0x24, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, + 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x0a, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, + 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, + 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, + 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x22, 0xf8, 0x03, 0x0a, 0x12, 0x57, + 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, + 0x12, 0x50, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x48, + 0x00, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, + 0x65, 0x63, 0x12, 0x26, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, + 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x77, + 0x72, 0x69, 0x74, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x4f, 0x0a, 0x10, 0x63, 0x68, + 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, + 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x48, 0x01, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x44, 0x0a, 0x0d, 0x63, 0x6f, - 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, - 0x67, 0x65, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x12, 0x35, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x6d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x87, 0x04, 0x0a, 0x0f, 0x57, 0x72, 0x69, 0x74, - 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x35, 0x0a, 0x08, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, - 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, - 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, - 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, - 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, - 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, - 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, - 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, - 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, - 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x24, 0x0a, 0x0b, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, - 0x48, 0x04, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x88, 0x01, - 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, - 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, - 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, - 0x65, 0x22, 0xf8, 0x03, 0x0a, 0x12, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, - 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x75, - 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x50, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, - 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x48, 0x00, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x26, 0x0a, 0x0c, 0x77, 0x72, 0x69, - 0x74, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, - 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, - 0x74, 0x12, 0x4f, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, - 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x48, - 0x01, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, - 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, - 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, - 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, - 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x5f, 0x77, 0x72, 0x69, 0x74, - 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x57, - 0x72, 0x69, 0x74, 0x65, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, - 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, - 0x61, 0x6d, 0x73, 0x42, 0x0f, 0x0a, 0x0d, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x87, 0x01, 0x0a, - 0x13, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, - 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, - 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, - 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xc9, 0x03, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, - 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, - 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x12, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, - 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, - 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, - 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x65, 0x6c, 0x69, - 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x65, 0x6c, - 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x3c, 0x0a, 0x1a, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, - 0x65, 0x5f, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, - 0x69, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x69, 0x6e, 0x63, 0x6c, - 0x75, 0x64, 0x65, 0x54, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x69, 0x6d, - 0x69, 0x74, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1a, 0x0a, 0x08, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, - 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, - 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x12, 0x2f, 0x0a, 0x13, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, - 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x0a, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x12, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, - 0x69, 0x63, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x6c, 0x65, 0x78, 0x69, 0x63, - 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x5f, 0x65, 0x6e, 0x64, 0x18, 0x0b, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x10, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, - 0x63, 0x45, 0x6e, 0x64, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, - 0x73, 0x6b, 0x22, 0xaa, 0x01, 0x0a, 0x17, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, - 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, - 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, - 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, - 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x22, - 0x8c, 0x01, 0x0a, 0x18, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0e, - 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, - 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, - 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xbe, - 0x0d, 0x0a, 0x14, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, - 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x18, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x54, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, - 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x19, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, - 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x56, 0x0a, - 0x13, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x6d, 0x73, - 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, - 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, - 0x65, 0x79, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, - 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x3b, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0c, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x28, 0x0a, 0x0d, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, - 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, 0x72, - 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, - 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, - 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, - 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, - 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, - 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, - 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, - 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, - 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, - 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, - 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x40, 0x0a, 0x1a, 0x69, 0x66, 0x5f, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x17, - 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x47, 0x0a, 0x1e, 0x69, 0x66, - 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0c, 0x20, 0x01, - 0x28, 0x03, 0x48, 0x05, 0x52, 0x1a, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x88, 0x01, 0x01, 0x12, 0x48, 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x48, 0x06, 0x52, 0x1b, 0x69, - 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x4f, 0x0a, - 0x22, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x48, 0x07, 0x52, 0x1e, 0x69, 0x66, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3e, - 0x0a, 0x1c, 0x6d, 0x61, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, - 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x18, 0x0f, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x18, 0x6d, 0x61, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, - 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x50, 0x65, 0x72, 0x43, 0x61, 0x6c, 0x6c, 0x12, 0x47, - 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, - 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, - 0x68, 0x6d, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1d, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, - 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x46, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x1c, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, - 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, - 0x53, 0x0a, 0x27, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, - 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, - 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x22, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, - 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, - 0x79, 0x74, 0x65, 0x73, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, - 0x72, 0x61, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, + 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, + 0x6e, 0x69, 0x73, 0x68, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0b, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x6d, 0x0a, + 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x0f, 0x0a, 0x0d, + 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x06, 0x0a, + 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x87, 0x01, 0x0a, 0x13, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, + 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, + 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, + 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, + 0xc9, 0x03, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x12, 0x1d, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, + 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, + 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, + 0x3c, 0x0a, 0x1a, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x69, 0x6c, + 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x18, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x72, 0x61, 0x69, + 0x6c, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x16, 0x0a, + 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, + 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1a, 0x0a, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, + 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x12, + 0x2f, 0x0a, 0x13, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, + 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6c, 0x65, + 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x53, 0x74, 0x61, 0x72, 0x74, + 0x12, 0x2b, 0x0a, 0x11, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, + 0x63, 0x5f, 0x65, 0x6e, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6c, 0x65, 0x78, + 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x42, 0x0c, 0x0a, + 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaa, 0x01, 0x0a, 0x17, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, + 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, + 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, - 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, - 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, - 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, - 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x22, 0x8c, 0x01, 0x0a, 0x18, 0x51, 0x75, 0x65, + 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, + 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, + 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, + 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xc4, 0x0d, 0x0a, 0x14, 0x52, 0x65, 0x77, 0x72, + 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x31, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x18, 0x20, 0x01, 0x28, 0x09, 0x42, 0x06, 0xe0, 0x41, 0x02, 0xe0, + 0x41, 0x05, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x57, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x19, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x28, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x56, 0x0a, 0x13, + 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, + 0x6b, 0x65, 0x79, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, + 0x79, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x6d, + 0x73, 0x4b, 0x65, 0x79, 0x12, 0x3b, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0c, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x28, 0x0a, 0x0d, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, 0x72, 0x69, + 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, + 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, + 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, + 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x25, 0x0a, 0x23, 0x5f, 0x69, 0x66, 0x5f, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, - 0xd6, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, - 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x13, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, - 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x6f, 0x6e, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x12, 0x23, 0x0a, 0x0d, - 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, - 0x6e, 0x12, 0x35, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0xdb, 0x01, 0x0a, 0x1a, 0x53, 0x74, 0x61, - 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, - 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x22, 0x3a, 0x0a, 0x1b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, - 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, - 0x49, 0x64, 0x22, 0xfd, 0x04, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x06, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x33, 0x0a, - 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, - 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, - 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, - 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, - 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, - 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, - 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, - 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, - 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, - 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, - 0x61, 0x73, 0x6b, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, + 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, + 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, + 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x40, 0x0a, 0x1a, 0x69, 0x66, 0x5f, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x17, 0x69, + 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x47, 0x0a, 0x1e, 0x69, 0x66, 0x5f, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0c, 0x20, 0x01, 0x28, + 0x03, 0x48, 0x05, 0x52, 0x1a, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, + 0x01, 0x01, 0x12, 0x48, 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x48, 0x06, 0x52, 0x1b, 0x69, 0x66, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x4f, 0x0a, 0x22, + 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x48, 0x07, 0x52, 0x1e, 0x69, 0x66, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3e, 0x0a, + 0x1c, 0x6d, 0x61, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, + 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x18, 0x0f, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x18, 0x6d, 0x61, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, + 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x50, 0x65, 0x72, 0x43, 0x61, 0x6c, 0x6c, 0x12, 0x47, 0x0a, + 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, + 0x6d, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1d, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, + 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x46, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x1c, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x53, + 0x0a, 0x27, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, + 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x22, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, + 0x74, 0x65, 0x73, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, - 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x61, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, @@ -7137,916 +7085,1013 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{ 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x22, 0x69, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, - 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x9e, 0x01, - 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, - 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x22, 0x81, - 0x01, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, - 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, - 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x65, 0x63, 0x72, - 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x0e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, - 0x65, 0x73, 0x22, 0x87, 0x01, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, - 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x61, - 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, - 0xe0, 0x41, 0x02, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, - 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, - 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x84, 0x01, 0x0a, - 0x11, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, - 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, + 0x63, 0x68, 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x25, 0x0a, 0x23, 0x5f, 0x69, 0x66, 0x5f, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xd6, + 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x13, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, + 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, + 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x12, 0x35, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0xe0, 0x01, 0x0a, 0x1a, 0x53, 0x74, 0x61, 0x72, + 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x53, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x53, 0x70, 0x65, 0x63, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, + 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x6d, 0x0a, 0x1c, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, + 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x22, 0x3a, 0x0a, 0x1b, 0x53, 0x74, + 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x70, 0x6c, + 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x70, + 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, 0x87, 0x05, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, + 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, + 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, + 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, + 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, + 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, + 0x6b, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, + 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x22, 0x69, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, + 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x9e, 0x01, 0x0a, 0x14, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x22, 0x94, 0x02, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, - 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, + 0x65, 0x63, 0x74, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, + 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x22, 0x81, 0x01, 0x0a, + 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, + 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x0e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, + 0x22, 0x87, 0x01, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, + 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x70, 0x61, - 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, - 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0a, - 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, - 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x03, 0xe0, 0x41, 0x01, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, - 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x2f, 0x0a, 0x11, 0x73, 0x68, 0x6f, - 0x77, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0f, 0x73, 0x68, 0x6f, 0x77, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x22, 0x7f, 0x0a, 0x14, 0x4c, 0x69, - 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x09, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, - 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x68, 0x6d, 0x61, 0x63, 0x4b, - 0x65, 0x79, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, - 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, - 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x97, 0x01, 0x0a, 0x14, - 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x08, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, - 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, - 0x07, 0x68, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0xbf, 0x01, 0x0a, 0x19, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, - 0x61, 0x6d, 0x73, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, - 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x30, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x1b, 0x65, 0x6e, 0x63, 0x72, - 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, - 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x18, 0x65, - 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, - 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xca, 0x05, 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x73, 0x22, 0xb5, 0x05, 0x0a, - 0x06, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x12, 0x56, 0x41, 0x4c, 0x55, 0x45, - 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, - 0x1b, 0x0a, 0x14, 0x4d, 0x41, 0x58, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x43, 0x48, 0x55, 0x4e, - 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x1c, 0x0a, 0x15, - 0x4d, 0x41, 0x58, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x43, 0x48, 0x55, 0x4e, 0x4b, 0x5f, - 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x19, 0x0a, 0x12, 0x4d, 0x41, - 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x4d, 0x42, - 0x10, 0x80, 0x80, 0xc0, 0x02, 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, - 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, 0x49, 0x45, - 0x4c, 0x44, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x08, - 0x12, 0x2a, 0x0a, 0x25, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, - 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x56, 0x41, - 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x20, 0x12, 0x29, 0x0a, 0x24, - 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, - 0x41, 0x54, 0x41, 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x42, - 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x40, 0x12, 0x2a, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x42, - 0x55, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, - 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, - 0x80, 0xa0, 0x01, 0x12, 0x27, 0x0a, 0x23, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, - 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x53, 0x5f, - 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x64, 0x12, 0x22, 0x0a, 0x1e, - 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x49, 0x46, 0x45, 0x43, 0x59, 0x43, 0x4c, 0x45, 0x5f, 0x52, 0x55, - 0x4c, 0x45, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x64, - 0x12, 0x26, 0x0a, 0x22, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, - 0x49, 0x42, 0x55, 0x54, 0x45, 0x53, 0x10, 0x05, 0x12, 0x31, 0x0a, 0x2c, 0x4d, 0x41, 0x58, 0x5f, - 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, - 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, 0x4b, 0x45, - 0x59, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x80, 0x02, 0x12, 0x33, 0x0a, 0x2e, 0x4d, - 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, - 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, - 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x80, 0x08, - 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x45, - 0x4e, 0x54, 0x52, 0x49, 0x45, 0x53, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x10, 0x40, 0x12, 0x1f, - 0x0a, 0x1b, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x4b, 0x45, 0x59, - 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x3f, 0x12, - 0x1f, 0x0a, 0x1a, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x4b, 0x45, - 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x01, - 0x12, 0x2e, 0x0a, 0x29, 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x49, - 0x44, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x4f, 0x42, - 0x4a, 0x45, 0x43, 0x54, 0x53, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0xe8, 0x07, - 0x12, 0x1e, 0x0a, 0x1a, 0x53, 0x50, 0x4c, 0x49, 0x54, 0x5f, 0x54, 0x4f, 0x4b, 0x45, 0x4e, 0x5f, - 0x4d, 0x41, 0x58, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x44, 0x41, 0x59, 0x53, 0x10, 0x0e, - 0x1a, 0x02, 0x10, 0x01, 0x22, 0x8c, 0x1e, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, - 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, - 0x41, 0x05, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, - 0x52, 0x08, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, - 0x61, 0x67, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x4d, - 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x33, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x2b, 0x0a, - 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x08, 0x6c, 0x6f, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, - 0x05, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x0d, 0x6c, - 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x70, - 0x6f, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x70, 0x6f, 0x12, 0x38, 0x0a, 0x03, - 0x61, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x84, 0x01, 0x0a, 0x11, 0x47, + 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x22, 0x80, 0x02, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, + 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, + 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, + 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, + 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, + 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, + 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x32, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x68, 0x6f, 0x77, + 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0f, 0x73, 0x68, 0x6f, 0x77, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, + 0x4b, 0x65, 0x79, 0x73, 0x22, 0x7f, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, + 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x09, + 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x52, 0x08, 0x68, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x26, 0x0a, + 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, + 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x97, 0x01, 0x0a, 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, + 0x0a, 0x08, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x07, 0x68, 0x6d, 0x61, 0x63, 0x4b, + 0x65, 0x79, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, + 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, + 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, + 0xbf, 0x01, 0x0a, 0x19, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x31, 0x0a, + 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, + 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, + 0x12, 0x30, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, + 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, + 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, + 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x1b, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x18, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, + 0x73, 0x22, 0xca, 0x05, 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x74, 0x73, 0x22, 0xb5, 0x05, 0x0a, 0x06, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x73, 0x12, 0x16, 0x0a, 0x12, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x14, 0x4d, 0x41, 0x58, + 0x5f, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x43, 0x48, 0x55, 0x4e, 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, + 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x1c, 0x0a, 0x15, 0x4d, 0x41, 0x58, 0x5f, 0x57, 0x52, + 0x49, 0x54, 0x45, 0x5f, 0x43, 0x48, 0x55, 0x4e, 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, + 0x80, 0x80, 0x80, 0x01, 0x12, 0x19, 0x0a, 0x12, 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, + 0x43, 0x54, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x4d, 0x42, 0x10, 0x80, 0x80, 0xc0, 0x02, 0x12, + 0x29, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, + 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x4e, 0x41, 0x4d, + 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x08, 0x12, 0x2a, 0x0a, 0x25, 0x4d, 0x41, + 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, + 0x41, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, + 0x54, 0x45, 0x53, 0x10, 0x80, 0x20, 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, + 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, 0x4f, + 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, + 0x40, 0x12, 0x2a, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x5f, + 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, + 0x49, 0x5a, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0xa0, 0x01, 0x12, 0x27, 0x0a, + 0x23, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, + 0x4e, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, + 0x43, 0x4b, 0x45, 0x54, 0x10, 0x64, 0x12, 0x22, 0x0a, 0x1e, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x49, + 0x46, 0x45, 0x43, 0x59, 0x43, 0x4c, 0x45, 0x5f, 0x52, 0x55, 0x4c, 0x45, 0x53, 0x5f, 0x50, 0x45, + 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x64, 0x12, 0x26, 0x0a, 0x22, 0x4d, 0x41, + 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, + 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x53, + 0x10, 0x05, 0x12, 0x31, 0x0a, 0x2c, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, + 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, + 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x4c, 0x45, 0x4e, 0x47, + 0x54, 0x48, 0x10, 0x80, 0x02, 0x12, 0x33, 0x0a, 0x2e, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, + 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, + 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, + 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x80, 0x08, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x41, + 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x49, 0x45, 0x53, + 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x10, 0x40, 0x12, 0x1f, 0x0a, 0x1b, 0x4d, 0x41, 0x58, 0x5f, + 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, + 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x3f, 0x12, 0x1f, 0x0a, 0x1a, 0x4d, 0x41, 0x58, + 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, + 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x01, 0x12, 0x2e, 0x0a, 0x29, 0x4d, 0x41, + 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x49, 0x44, 0x53, 0x5f, 0x50, 0x45, 0x52, + 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x53, 0x5f, + 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0xe8, 0x07, 0x12, 0x1e, 0x0a, 0x1a, 0x53, 0x50, + 0x4c, 0x49, 0x54, 0x5f, 0x54, 0x4f, 0x4b, 0x45, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x5f, 0x56, 0x41, + 0x4c, 0x49, 0x44, 0x5f, 0x44, 0x41, 0x59, 0x53, 0x10, 0x0e, 0x1a, 0x02, 0x10, 0x01, 0x22, 0xa6, + 0x1e, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x08, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x1d, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, 0x41, + 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, + 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x08, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, + 0x03, 0x52, 0x0c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, + 0x6c, 0x61, 0x73, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x70, 0x6f, 0x18, 0x1b, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x72, 0x70, 0x6f, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, 0x08, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61, 0x63, 0x6c, + 0x12, 0x54, 0x0a, 0x12, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x10, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, + 0x63, 0x6c, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, - 0x6c, 0x52, 0x03, 0x61, 0x63, 0x6c, 0x12, 0x54, 0x0a, 0x12, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, - 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x10, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x09, - 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, - 0x79, 0x63, 0x6c, 0x65, 0x52, 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x12, - 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0b, + 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x52, 0x09, + 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, + 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x32, 0x0a, 0x04, 0x63, + 0x6f, 0x72, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x6f, 0x72, 0x73, 0x52, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x12, + 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, - 0x65, 0x12, 0x32, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x6f, 0x72, 0x73, 0x52, - 0x04, 0x63, 0x6f, 0x72, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x18, 0x64, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, - 0x6f, 0x6c, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x64, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, 0x6c, 0x64, - 0x12, 0x3d, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, - 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, - 0x3b, 0x0a, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x57, 0x65, 0x62, 0x73, - 0x69, 0x74, 0x65, 0x52, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x44, 0x0a, 0x0a, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, - 0x6e, 0x67, 0x12, 0x3b, 0x0a, 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x18, 0x12, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, - 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, - 0x33, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x6f, - 0x77, 0x6e, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, - 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x07, 0x62, 0x69, - 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, + 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, + 0x65, 0x12, 0x37, 0x0a, 0x18, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x0e, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x15, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, 0x6c, 0x64, 0x12, 0x3d, 0x0a, 0x06, 0x6c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x3b, 0x0a, 0x07, 0x77, 0x65, 0x62, + 0x73, 0x69, 0x74, 0x65, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x57, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x52, 0x07, 0x77, + 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, + 0x52, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x3b, 0x0a, 0x07, + 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, + 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77, 0x6e, + 0x65, 0x72, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77, 0x6e, + 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x44, + 0x0a, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x14, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x45, 0x6e, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x07, 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x18, + 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x2e, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, + 0x67, 0x12, 0x54, 0x0a, 0x10, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x52, 0x07, - 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x54, 0x0a, 0x10, 0x72, 0x65, 0x74, 0x65, 0x6e, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x16, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x74, - 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0f, 0x72, 0x65, - 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x42, 0x0a, - 0x0a, 0x69, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x17, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x69, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, 0x5f, 0x70, - 0x7a, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, - 0x69, 0x65, 0x73, 0x50, 0x7a, 0x73, 0x12, 0x67, 0x0a, 0x17, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, - 0x5f, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, - 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, - 0x41, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x1c, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x41, 0x75, - 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, - 0x73, 0x73, 0x1a, 0x30, 0x0a, 0x07, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x25, 0x0a, - 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x79, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x72, - 0x50, 0x61, 0x79, 0x73, 0x1a, 0x87, 0x01, 0x0a, 0x04, 0x43, 0x6f, 0x72, 0x73, 0x12, 0x16, 0x0a, - 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6f, - 0x72, 0x69, 0x67, 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x27, 0x0a, - 0x0f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x61, 0x67, - 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x0d, 0x6d, 0x61, 0x78, 0x41, 0x67, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x1a, 0x5c, - 0x0a, 0x0a, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x0f, - 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, - 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x0d, 0x64, - 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x1a, 0xb1, 0x02, 0x0a, - 0x09, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x7b, 0x0a, 0x1b, 0x75, 0x6e, - 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6c, 0x65, 0x76, - 0x65, 0x6c, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x55, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x18, 0x75, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, + 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x42, 0x0a, 0x0a, 0x69, 0x61, 0x6d, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x52, 0x09, 0x69, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d, 0x73, + 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, 0x5f, 0x70, 0x7a, 0x73, 0x18, 0x19, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0c, 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, 0x50, 0x7a, 0x73, + 0x12, 0x67, 0x0a, 0x17, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x70, 0x6c, 0x61, 0x63, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x1a, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x75, 0x73, + 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x15, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x41, 0x0a, 0x09, 0x61, 0x75, 0x74, + 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, + 0x73, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x1a, 0x30, 0x0a, 0x07, + 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x72, 0x50, 0x61, 0x79, 0x73, 0x1a, 0x87, + 0x01, 0x0a, 0x04, 0x43, 0x6f, 0x72, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, + 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x12, + 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, + 0x6e, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x41, 0x67, + 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x1a, 0x5c, 0x0a, 0x0a, 0x45, 0x6e, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x0f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, + 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x1a, 0xb1, 0x02, 0x0a, 0x09, 0x49, 0x61, 0x6d, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x7b, 0x0a, 0x1b, 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x5f, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x55, + 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, + 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x18, 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x12, 0x38, 0x0a, 0x18, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x61, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x16, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x41, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x50, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x6d, 0x0a, 0x18, 0x55, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, - 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x70, 0x75, 0x62, 0x6c, 0x69, - 0x63, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x70, 0x75, 0x62, 0x6c, 0x69, - 0x63, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x50, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x69, 0x6f, - 0x6e, 0x1a, 0x6d, 0x0a, 0x18, 0x55, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x18, 0x0a, - 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, - 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, + 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x64, 0x12, 0x37, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x1a, 0xdb, 0x07, 0x0a, 0x09, 0x4c, + 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x12, 0x3c, 0x0a, 0x04, 0x72, 0x75, 0x6c, 0x65, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, + 0x52, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x1a, 0x8f, 0x07, 0x0a, 0x04, 0x52, 0x75, 0x6c, 0x65, 0x12, + 0x47, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, + 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x50, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, + 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x41, 0x0a, 0x06, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x1a, 0xa8, 0x05, + 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x08, 0x61, + 0x67, 0x65, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, + 0x07, 0x61, 0x67, 0x65, 0x44, 0x61, 0x79, 0x73, 0x88, 0x01, 0x01, 0x12, 0x38, 0x0a, 0x0e, 0x63, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x0d, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, + 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x1c, 0x0a, 0x07, 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, 0x06, 0x69, 0x73, 0x4c, 0x69, 0x76, 0x65, + 0x88, 0x01, 0x01, 0x12, 0x31, 0x0a, 0x12, 0x6e, 0x75, 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, + 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x48, + 0x02, 0x52, 0x10, 0x6e, 0x75, 0x6d, 0x4e, 0x65, 0x77, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x88, 0x01, 0x01, 0x12, 0x32, 0x0a, 0x15, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x73, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, + 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x38, 0x0a, 0x16, 0x64, 0x61, + 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x48, 0x03, 0x52, 0x13, 0x64, 0x61, + 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, + 0x65, 0x88, 0x01, 0x01, 0x12, 0x3f, 0x0a, 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, + 0x61, 0x74, 0x65, 0x52, 0x10, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x42, + 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x40, 0x0a, 0x1a, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, + 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x48, 0x04, 0x52, 0x17, 0x64, 0x61, 0x79, + 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, + 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x47, 0x0a, 0x16, 0x6e, 0x6f, 0x6e, 0x63, 0x75, + 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, + 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x14, 0x6e, 0x6f, 0x6e, 0x63, + 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, + 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, + 0x69, 0x78, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x73, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x42, 0x0b, + 0x0a, 0x09, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x42, 0x0a, 0x0a, 0x08, 0x5f, + 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x6e, 0x75, 0x6d, 0x5f, + 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x19, + 0x0a, 0x17, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x64, 0x61, + 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, + 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x54, 0x0a, 0x07, 0x4c, 0x6f, 0x67, 0x67, + 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x6f, 0x67, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6c, 0x6f, 0x67, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x6c, 0x6f, 0x67, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, + 0x6f, 0x67, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x1a, 0xb6, + 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x12, 0x41, 0x0a, 0x0e, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, - 0x1a, 0xdb, 0x07, 0x0a, 0x09, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x12, 0x3c, - 0x0a, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, - 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x1a, 0x8f, 0x07, 0x0a, - 0x04, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x47, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x2e, - 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x50, - 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, - 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x1a, 0x41, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x23, - 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, - 0x61, 0x73, 0x73, 0x1a, 0xa8, 0x05, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x1e, 0x0a, 0x08, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x07, 0x61, 0x67, 0x65, 0x44, 0x61, 0x79, 0x73, 0x88, 0x01, - 0x01, 0x12, 0x38, 0x0a, 0x0e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x65, 0x66, - 0x6f, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x0d, 0x63, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x1c, 0x0a, 0x07, 0x69, - 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, 0x06, - 0x69, 0x73, 0x4c, 0x69, 0x76, 0x65, 0x88, 0x01, 0x01, 0x12, 0x31, 0x0a, 0x12, 0x6e, 0x75, 0x6d, - 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x05, 0x48, 0x02, 0x52, 0x10, 0x6e, 0x75, 0x6d, 0x4e, 0x65, 0x77, 0x65, - 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x88, 0x01, 0x01, 0x12, 0x32, 0x0a, 0x15, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, - 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x65, 0x73, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, - 0x12, 0x38, 0x0a, 0x16, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x63, - 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, - 0x48, 0x03, 0x52, 0x13, 0x64, 0x61, 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x43, 0x75, 0x73, - 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x3f, 0x0a, 0x12, 0x63, 0x75, - 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x10, 0x63, 0x75, 0x73, 0x74, 0x6f, - 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x40, 0x0a, 0x1a, 0x64, - 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, - 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x48, - 0x04, 0x52, 0x17, 0x64, 0x61, 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x6f, 0x6e, 0x63, - 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x47, 0x0a, - 0x16, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, - 0x52, 0x14, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, - 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, - 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x25, 0x0a, - 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, - 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, 0x75, - 0x66, 0x66, 0x69, 0x78, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, 0x79, - 0x73, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x42, 0x15, 0x0a, - 0x13, 0x5f, 0x6e, 0x75, 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x19, 0x0a, 0x17, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, - 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x42, - 0x1d, 0x0a, 0x1b, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, - 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x54, - 0x0a, 0x07, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x6f, 0x67, - 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6c, - 0x6f, 0x67, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x6c, 0x6f, 0x67, 0x5f, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x6f, 0x67, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, - 0x65, 0x66, 0x69, 0x78, 0x1a, 0x9c, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, - 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x41, 0x0a, 0x0e, 0x65, 0x66, 0x66, 0x65, - 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x65, 0x66, - 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x69, - 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, - 0x69, 0x73, 0x4c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x74, 0x65, - 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x0f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x65, 0x72, - 0x69, 0x6f, 0x64, 0x1a, 0x26, 0x0a, 0x0a, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, - 0x67, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x1a, 0x59, 0x0a, 0x07, 0x57, - 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x70, - 0x61, 0x67, 0x65, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0e, 0x6d, 0x61, 0x69, 0x6e, 0x50, 0x61, 0x67, 0x65, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, - 0x12, 0x24, 0x0a, 0x0e, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x70, 0x61, - 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, - 0x6e, 0x64, 0x50, 0x61, 0x67, 0x65, 0x1a, 0x3e, 0x0a, 0x15, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, - 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, - 0x25, 0x0a, 0x0e, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x61, 0x74, 0x61, 0x4c, 0x6f, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x67, 0x0a, 0x09, 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, - 0x61, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x40, 0x0a, - 0x0b, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, - 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x1a, - 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, + 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x69, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, + 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x69, 0x73, 0x4c, 0x6f, 0x63, 0x6b, + 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0f, + 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x88, + 0x01, 0x01, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x1a, 0x26, 0x0a, 0x0a, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x1a, + 0x59, 0x0a, 0x07, 0x57, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x6d, 0x61, + 0x69, 0x6e, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x61, 0x69, 0x6e, 0x50, 0x61, 0x67, 0x65, 0x53, 0x75, + 0x66, 0x66, 0x69, 0x78, 0x12, 0x24, 0x0a, 0x0e, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, 0x6e, + 0x64, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6e, 0x6f, + 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x61, 0x67, 0x65, 0x1a, 0x3e, 0x0a, 0x15, 0x43, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x61, 0x74, + 0x61, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x67, 0x0a, 0x09, 0x41, 0x75, + 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, + 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x64, 0x12, 0x40, 0x0a, 0x0b, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x54, + 0x69, 0x6d, 0x65, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x47, + 0xea, 0x41, 0x44, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x12, 0x23, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x7b, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x7d, 0x22, 0x97, 0x02, 0x0a, 0x13, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, + 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, + 0x6f, 0x6c, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x61, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, 0x12, + 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, + 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, + 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, + 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x54, 0x65, 0x61, 0x6d, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, + 0x6d, 0x22, 0x53, 0x0a, 0x0f, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, + 0x44, 0x61, 0x74, 0x61, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x1b, + 0x0a, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x07, 0x48, 0x00, + 0x52, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, + 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0x54, 0x0a, 0x0f, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, + 0x33, 0x32, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, + 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x64, 0x35, 0x5f, 0x68, 0x61, + 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6d, 0x64, 0x35, 0x48, 0x61, 0x73, + 0x68, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0xfe, 0x02, 0x0a, + 0x0f, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x12, 0x13, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, + 0x05, 0x52, 0x02, 0x69, 0x64, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x08, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x2d, + 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, + 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, + 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x22, 0xec, 0x03, + 0x0a, 0x0c, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x17, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x74, 0x6f, 0x70, + 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x62, 0x0a, 0x11, 0x63, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, + 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x63, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4e, + 0x61, 0x6d, 0x65, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x2a, 0x0a, 0x0e, 0x70, 0x61, 0x79, + 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x46, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x1a, 0x43, 0x0a, 0x15, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x47, 0xea, 0x41, 0x44, 0x0a, - 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x7d, 0x22, 0xf3, 0x01, 0x0a, 0x13, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, - 0x6f, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, - 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, - 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, - 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, - 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x16, - 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, - 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52, 0x0b, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x53, 0x0a, 0x0f, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x18, 0x0a, 0x07, - 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, - 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, - 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0x54, - 0x0a, 0x0f, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, - 0x73, 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x07, 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x12, 0x19, - 0x0a, 0x08, 0x6d, 0x64, 0x35, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x07, 0x6d, 0x64, 0x35, 0x48, 0x61, 0x73, 0x68, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x63, 0x72, - 0x63, 0x33, 0x32, 0x63, 0x22, 0xfe, 0x02, 0x0a, 0x0f, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, - 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x13, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x02, 0x69, 0x64, 0x12, 0x20, 0x0a, - 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, - 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x37, - 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, - 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, - 0x41, 0x03, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x40, 0x0a, - 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, - 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, - 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x70, 0xea, 0x41, 0x6d, 0x0a, + 0x23, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x46, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, + 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x2f, 0x7b, 0x6e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x22, 0x71, 0x0a, 0x12, + 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, + 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x28, 0x0a, 0x10, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, + 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x0e, 0x6b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, + 0xec, 0x0b, 0x0a, 0x06, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x23, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, + 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x0e, 0x6d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x17, 0x0a, + 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, + 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, + 0x67, 0x12, 0x2f, 0x0a, 0x13, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x64, 0x69, 0x73, + 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x44, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x61, 0x63, 0x68, 0x65, + 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, 0x0a, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61, 0x63, + 0x6c, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x61, 0x6e, + 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x6e, 0x74, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12, 0x40, 0x0a, 0x0b, + 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x52, 0x0a, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x21, + 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0d, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, + 0x69, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, + 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, + 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x75, 0x6e, + 0x74, 0x12, 0x45, 0x0a, 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x10, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, + 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x07, 0x6b, 0x6d, + 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, + 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, + 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x5a, 0x0a, 0x19, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, + 0x61, 0x73, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, + 0x16, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, + 0x61, 0x73, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x65, 0x6d, 0x70, 0x6f, + 0x72, 0x61, 0x72, 0x79, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0d, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x48, 0x6f, 0x6c, 0x64, 0x12, 0x4e, + 0x0a, 0x15, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x69, + 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x13, 0x72, 0x65, 0x74, 0x65, 0x6e, + 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x43, + 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x12, 0x2d, 0x0a, 0x10, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, + 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, + 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, 0x6c, 0x64, 0x88, + 0x01, 0x01, 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x18, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x03, + 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x56, 0x0a, 0x13, 0x63, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x19, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, + 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x63, 0x75, 0x73, + 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x3b, 0x0a, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, - 0x65, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x65, 0x74, 0x61, 0x67, 0x22, 0xfb, 0x03, 0x0a, 0x0c, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x19, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, - 0xe0, 0x41, 0x02, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, - 0x61, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x24, - 0x0a, 0x0b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, - 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x73, 0x12, 0x67, 0x0a, 0x11, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x61, - 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x63, 0x75, 0x73, - 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x31, 0x0a, - 0x12, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x72, 0x65, - 0x66, 0x69, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, - 0x12, 0x2a, 0x0a, 0x0e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x6d, - 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x70, - 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x1a, 0x43, 0x0a, 0x15, - 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, - 0x01, 0x3a, 0x70, 0xea, 0x41, 0x6d, 0x0a, 0x23, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, - 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x46, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, - 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x7d, - 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x73, 0x2f, 0x7b, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x7d, 0x22, 0x71, 0x0a, 0x12, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, - 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63, - 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, - 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x28, 0x0a, 0x10, - 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x6b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, - 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xec, 0x0b, 0x0a, 0x06, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x03, 0xe0, 0x41, 0x05, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x05, 0xfa, - 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, - 0x67, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x23, 0x0a, - 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x03, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, - 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, - 0x6c, 0x61, 0x73, 0x73, 0x12, 0x17, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x29, 0x0a, - 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, - 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, - 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x2f, 0x0a, 0x13, 0x63, 0x6f, 0x6e, 0x74, - 0x65, 0x6e, 0x74, 0x5f, 0x64, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x44, 0x69, - 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x61, 0x63, - 0x68, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x63, 0x61, 0x63, 0x68, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x38, - 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, + 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x0d, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x65, 0x76, + 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x22, 0x97, + 0x02, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x61, 0x6c, 0x74, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x65, 0x6e, 0x74, + 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x16, 0x0a, + 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, + 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61, 0x63, 0x6c, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, - 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c, 0x61, 0x6e, 0x67, 0x75, - 0x61, 0x67, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, - 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x64, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, - 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52, 0x0b, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x8e, 0x01, 0x0a, 0x13, 0x4c, 0x69, 0x73, + 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x33, 0x0a, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, + 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, + 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x48, 0x0a, 0x0b, 0x50, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, + 0x12, 0x0a, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, + 0x65, 0x61, 0x6d, 0x22, 0x35, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x5f, 0x61, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x6d, + 0x61, 0x69, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x3c, 0x0a, 0x05, 0x4f, 0x77, + 0x6e, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x22, 0x5f, 0x0a, 0x0c, 0x43, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, + 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x65, 0x6e, 0x64, + 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x6c, 0x65, 0x6e, + 0x67, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, 0x6c, + 0x65, 0x74, 0x65, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x32, 0x97, 0x26, 0x0a, 0x07, 0x53, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x72, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, - 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x0f, 0x63, 0x6f, - 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0f, 0x20, - 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, - 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x09, 0x63, 0x68, 0x65, 0x63, - 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, + 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x22, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, + 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x6f, 0x0a, 0x09, 0x47, 0x65, 0x74, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x42, - 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, - 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x11, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, - 0x65, 0x12, 0x3f, 0x0a, 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x12, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, - 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73, 0x4b, - 0x65, 0x79, 0x12, 0x5a, 0x0a, 0x19, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, - 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x16, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x25, - 0x0a, 0x0e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x68, 0x6f, 0x6c, 0x64, - 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, - 0x79, 0x48, 0x6f, 0x6c, 0x64, 0x12, 0x4e, 0x0a, 0x15, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x15, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x52, 0x13, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x70, 0x69, 0x72, - 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x43, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x2d, 0x0a, 0x10, 0x65, 0x76, - 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x17, - 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73, - 0x65, 0x64, 0x48, 0x6f, 0x6c, 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77, 0x6e, - 0x65, 0x72, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77, 0x6e, - 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x56, - 0x0a, 0x13, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x22, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, + 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x8b, 0x01, 0x0a, 0x0c, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, - 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, - 0x69, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, - 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x22, 0xf3, 0x01, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, - 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, - 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, - 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, - 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, - 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, - 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52, 0x0b, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x8e, 0x01, 0x0a, 0x13, - 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, - 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x65, 0x66, - 0x69, 0x78, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x65, 0x66, - 0x69, 0x78, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, - 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, - 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x48, 0x0a, 0x0b, - 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x12, 0x25, 0x0a, 0x0e, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x75, 0x6d, 0x62, - 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x22, 0x35, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x6d, 0x61, 0x69, - 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0c, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x3c, 0x0a, - 0x05, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1b, - 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x22, 0x5f, 0x0a, 0x0c, 0x43, - 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, - 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, - 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, - 0x65, 0x6e, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x5f, - 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x63, 0x6f, - 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x32, 0xda, 0x24, 0x0a, - 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x72, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x22, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x15, - 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x6f, 0x0a, 0x09, - 0x47, 0x65, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, - 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x38, + 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, + 0x17, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x12, 0x85, 0x01, 0x0a, 0x0b, 0x4c, 0x69, 0x73, + 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, + 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x27, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x12, + 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x12, 0x93, 0x01, 0x0a, 0x19, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, + 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x22, 0x8a, 0xd3, 0xe4, 0x93, 0x02, - 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x6d, 0x0a, - 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x22, 0x1a, 0xda, 0x41, 0x17, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x2c, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x12, 0x67, 0x0a, 0x0b, - 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x09, 0xda, 0x41, 0x06, 0x70, - 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x93, 0x01, 0x0a, 0x19, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x22, 0x26, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, - 0x2a, 0x7d, 0xda, 0x41, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0xab, 0x01, 0x0a, 0x0c, - 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, - 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, - 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x60, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, - 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x12, 0x28, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, - 0x2f, 0x2a, 0x7d, 0x2f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0xda, 0x41, - 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0xb2, 0x01, 0x0a, 0x0c, 0x53, 0x65, - 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x49, 0x61, - 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x67, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, + 0x76, 0x32, 0x2e, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x74, + 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x26, + 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x06, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0xab, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x61, + 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x22, 0x60, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, + 0x2a, 0x2a, 0x7d, 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, + 0x28, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0xda, 0x41, 0x08, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x12, 0xb2, 0x01, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, + 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x22, 0x67, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, + 0x7d, 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x28, 0x7b, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0xda, 0x41, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x2c, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0xd7, 0x01, 0x0a, 0x12, 0x54, 0x65, + 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, + 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, + 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6c, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x28, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, - 0x7d, 0x2f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0xda, 0x41, 0x0f, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0xd7, - 0x01, 0x0a, 0x12, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, - 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, - 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, - 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, - 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6c, 0x8a, 0xd3, 0xe4, 0x93, - 0x02, 0x4f, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, - 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x34, 0x0a, 0x08, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x28, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, - 0x2a, 0xda, 0x41, 0x14, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x65, 0x72, - 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x8a, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x37, 0x8a, 0xd3, - 0xe4, 0x93, 0x02, 0x1c, 0x12, 0x1a, 0x0a, 0x0b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, - 0xda, 0x41, 0x12, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x12, 0x93, 0x01, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x2e, 0x67, + 0x7d, 0x2f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0xda, 0x41, 0x14, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x8a, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x22, 0x37, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, 0x28, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, - 0x7d, 0x2f, 0x2a, 0x2a, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x96, 0x01, 0x0a, 0x0f, - 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, - 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x37, 0x8a, 0xd3, 0xe4, - 0x93, 0x02, 0x2a, 0x12, 0x28, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x7b, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, - 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0xda, 0x41, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, - 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x33, 0x8a, 0xd3, 0xe4, 0x93, - 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x13, 0x70, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x2c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x96, 0x01, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x26, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, - 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x7e, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, - 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, - 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x29, 0x8a, - 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x12, 0x21, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x98, 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x48, 0x8a, 0xd3, 0xe4, 0x93, 0x02, + 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x37, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1c, 0x12, + 0x1a, 0x0a, 0x0b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, + 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x12, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, + 0x12, 0x93, 0x01, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x37, 0x8a, + 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, 0x28, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x7b, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0xda, + 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x96, 0x01, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x4e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, + 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x37, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, 0x28, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x98, 0x01, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x33, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, + 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x13, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x6e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x96, 0x01, 0x0a, 0x11, 0x4c, + 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0x8a, 0xd3, 0xe4, + 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x12, 0x7e, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x29, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x23, + 0x12, 0x21, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, + 0x2a, 0x2a, 0x7d, 0x12, 0x98, 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x22, 0x48, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, + 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0xba, + 0x01, 0x0a, 0x14, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, + 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, + 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, + 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2f, + 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0xda, + 0x41, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x95, 0x01, 0x0a, 0x09, + 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, + 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x48, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0xba, 0x01, 0x0a, 0x14, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, - 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x2e, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, - 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, + 0x69, 0x6f, 0x6e, 0x12, 0xa5, 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, + 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x48, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, + 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, + 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x30, 0x01, 0x12, 0x8c, 0x01, 0x0a, 0x0c, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, - 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0x8a, - 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, - 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, - 0x7d, 0x2f, 0x2a, 0x2a, 0xda, 0x41, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, - 0x12, 0x95, 0x01, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x48, - 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x0d, - 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, - 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0xa5, 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x61, - 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x48, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, + 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, + 0x39, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x12, 0x1c, 0x0a, 0x0d, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x12, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x12, 0xc7, 0x01, 0x0a, 0x0b, 0x57, + 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, + 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x67, 0x8a, 0xd3, 0xe4, 0x93, 0x02, + 0x61, 0x12, 0x30, 0x0a, 0x21, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, - 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x30, 0x01, - 0x12, 0x8c, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x22, 0x39, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x12, 0x1c, 0x0a, 0x0d, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, - 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x12, 0x6f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x12, - 0xc7, 0x01, 0x0a, 0x0b, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, - 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x67, - 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x61, 0x12, 0x30, 0x0a, 0x21, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, - 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x28, 0x01, 0x12, 0x84, 0x01, 0x0a, 0x0b, 0x4c, 0x69, - 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, - 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, - 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x12, 0x98, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, + 0x2a, 0x2a, 0x7d, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, + 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, + 0x2a, 0x2a, 0x28, 0x01, 0x12, 0x84, 0x01, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x3a, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x0f, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x21, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, - 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, - 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, 0x01, 0x0a, 0x13, - 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, - 0x69, 0x74, 0x65, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, - 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, - 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x38, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x32, 0x12, 0x30, 0x0a, 0x21, 0x77, 0x72, - 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x2e, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, - 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, 0x01, 0x0a, - 0x10, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x26, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, + 0x2a, 0x7d, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x98, 0x01, 0x0a, 0x0d, + 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, + 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3a, 0x8a, 0xd3, 0xe4, 0x93, + 0x02, 0x34, 0x12, 0x0f, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x12, 0x21, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, 0x01, 0x0a, 0x13, 0x53, 0x74, 0x61, 0x72, 0x74, + 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x2d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, + 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0x8a, 0xd3, 0xe4, 0x93, - 0x02, 0x2f, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, - 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, - 0x2a, 0xda, 0x41, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x6f, 0x0a, - 0x11, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x22, 0x0a, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x84, - 0x01, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, - 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, - 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x20, 0xda, 0x41, 0x1d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2c, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, - 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x66, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, - 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x14, 0xda, 0x41, 0x11, 0x61, 0x63, 0x63, 0x65, - 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x6c, 0x0a, - 0x0a, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x24, 0x2e, 0x67, 0x6f, + 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, + 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0x8a, + 0xd3, 0xe4, 0x93, 0x02, 0x32, 0x12, 0x30, 0x0a, 0x21, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, 0x01, 0x0a, 0x10, 0x51, 0x75, 0x65, 0x72, + 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, + 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x0a, + 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0xda, 0x41, 0x09, 0x75, + 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x80, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2b, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x14, 0xda, 0x41, 0x11, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x6b, 0x0a, 0x0c, 0x4c, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x1b, + 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x95, 0x01, 0x0a, 0x0d, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x31, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0xda, 0x41, 0x1d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, + 0x61, 0x69, 0x6c, 0x12, 0x77, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, + 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, + 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x25, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, + 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x11, 0x61, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x7d, 0x0a, 0x0a, + 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, + 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x22, 0x25, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x11, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x7c, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, - 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x0a, 0xda, 0x41, - 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x75, 0x0a, 0x0d, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x17, 0xda, 0x41, 0x14, 0x68, 0x6d, 0x61, 0x63, 0x5f, - 0x6b, 0x65, 0x79, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x1a, - 0xa7, 0x02, 0xca, 0x41, 0x16, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x8a, 0x02, 0x68, - 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, - 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, + 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1b, 0x8a, 0xd3, + 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0xda, + 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x9d, 0x01, 0x0a, 0x0d, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x3f, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x22, + 0x12, 0x20, 0x0a, 0x10, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, + 0x2a, 0x7d, 0xda, 0x41, 0x14, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x1a, 0xa7, 0x02, 0xca, 0x41, 0x16, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x8a, 0x02, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, + 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, + 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, + 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, + 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x2d, 0x6f, 0x6e, 0x6c, 0x79, + 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, + 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x66, 0x75, 0x6c, 0x6c, + 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, + 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, - 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x72, 0x65, 0x61, - 0x64, 0x2d, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, - 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2c, 0x68, - 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, - 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, - 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, - 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, - 0x65, 0x61, 0x64, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x42, 0xdc, 0x01, 0x0a, 0x15, 0x63, 0x6f, - 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x42, 0x0c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x50, 0x01, 0x5a, 0x38, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, - 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2f, 0x76, 0x32, 0x3b, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0xea, 0x41, 0x78, - 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, - 0x4b, 0x65, 0x79, 0x12, 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, - 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, - 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, - 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, + 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x77, 0x72, + 0x69, 0x74, 0x65, 0x42, 0xdc, 0x01, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x42, 0x0c, 0x53, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x38, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, + 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2f, 0x76, 0x32, 0x3b, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0xea, 0x41, 0x78, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x53, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, + 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, + 0x79, 0x7d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -8139,13 +8184,13 @@ var file_google_storage_v2_storage_proto_goTypes = []interface{}{ (*Bucket_Lifecycle_Rule_Condition)(nil), // 72: google.storage.v2.Bucket.Lifecycle.Rule.Condition nil, // 73: google.storage.v2.Notification.CustomAttributesEntry nil, // 74: google.storage.v2.Object.MetadataEntry - (*field_mask.FieldMask)(nil), // 75: google.protobuf.FieldMask - (*timestamp.Timestamp)(nil), // 76: google.protobuf.Timestamp + (*fieldmaskpb.FieldMask)(nil), // 75: google.protobuf.FieldMask + (*timestamppb.Timestamp)(nil), // 76: google.protobuf.Timestamp (*date.Date)(nil), // 77: google.type.Date (*v1.GetIamPolicyRequest)(nil), // 78: google.iam.v1.GetIamPolicyRequest (*v1.SetIamPolicyRequest)(nil), // 79: google.iam.v1.SetIamPolicyRequest (*v1.TestIamPermissionsRequest)(nil), // 80: google.iam.v1.TestIamPermissionsRequest - (*empty.Empty)(nil), // 81: google.protobuf.Empty + (*emptypb.Empty)(nil), // 81: google.protobuf.Empty (*v1.Policy)(nil), // 82: google.iam.v1.Policy (*v1.TestIamPermissionsResponse)(nil), // 83: google.iam.v1.TestIamPermissionsResponse } @@ -9191,6 +9236,7 @@ func file_google_storage_v2_storage_proto_init() { file_google_storage_v2_storage_proto_msgTypes[43].OneofWrappers = []interface{}{} file_google_storage_v2_storage_proto_msgTypes[47].OneofWrappers = []interface{}{} file_google_storage_v2_storage_proto_msgTypes[55].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[62].OneofWrappers = []interface{}{} file_google_storage_v2_storage_proto_msgTypes[71].OneofWrappers = []interface{}{} type x struct{} out := protoimpl.TypeBuilder{ @@ -9226,7 +9272,7 @@ const _ = grpc.SupportPackageIsVersion6 // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type StorageClient interface { // Permanently deletes an empty bucket. - DeleteBucket(ctx context.Context, in *DeleteBucketRequest, opts ...grpc.CallOption) (*empty.Empty, error) + DeleteBucket(ctx context.Context, in *DeleteBucketRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) // Returns metadata for the specified bucket. GetBucket(ctx context.Context, in *GetBucketRequest, opts ...grpc.CallOption) (*Bucket, error) // Creates a new bucket. @@ -9245,7 +9291,7 @@ type StorageClient interface { // Updates a bucket. Equivalent to JSON API's storage.buckets.patch method. UpdateBucket(ctx context.Context, in *UpdateBucketRequest, opts ...grpc.CallOption) (*Bucket, error) // Permanently deletes a notification subscription. - DeleteNotification(ctx context.Context, in *DeleteNotificationRequest, opts ...grpc.CallOption) (*empty.Empty, error) + DeleteNotification(ctx context.Context, in *DeleteNotificationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) // View a notification config. GetNotification(ctx context.Context, in *GetNotificationRequest, opts ...grpc.CallOption) (*Notification, error) // Creates a notification subscription for a given bucket. @@ -9260,7 +9306,7 @@ type StorageClient interface { ComposeObject(ctx context.Context, in *ComposeObjectRequest, opts ...grpc.CallOption) (*Object, error) // Deletes an object and its metadata. Deletions are permanent if versioning // is not enabled for the bucket, or if the `generation` parameter is used. - DeleteObject(ctx context.Context, in *DeleteObjectRequest, opts ...grpc.CallOption) (*empty.Empty, error) + DeleteObject(ctx context.Context, in *DeleteObjectRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) // Cancels an in-progress resumable upload. CancelResumableWrite(ctx context.Context, in *CancelResumableWriteRequest, opts ...grpc.CallOption) (*CancelResumableWriteResponse, error) // Retrieves an object's metadata. @@ -9351,7 +9397,7 @@ type StorageClient interface { // Creates a new HMAC key for the given service account. CreateHmacKey(ctx context.Context, in *CreateHmacKeyRequest, opts ...grpc.CallOption) (*CreateHmacKeyResponse, error) // Deletes a given HMAC key. Key must be in an INACTIVE state. - DeleteHmacKey(ctx context.Context, in *DeleteHmacKeyRequest, opts ...grpc.CallOption) (*empty.Empty, error) + DeleteHmacKey(ctx context.Context, in *DeleteHmacKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) // Gets an existing HMAC key metadata for the given id. GetHmacKey(ctx context.Context, in *GetHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error) // Lists HMAC keys under a given project with the additional filters provided. @@ -9368,8 +9414,8 @@ func NewStorageClient(cc grpc.ClientConnInterface) StorageClient { return &storageClient{cc} } -func (c *storageClient) DeleteBucket(ctx context.Context, in *DeleteBucketRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) +func (c *storageClient) DeleteBucket(ctx context.Context, in *DeleteBucketRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteBucket", in, out, opts...) if err != nil { return nil, err @@ -9449,8 +9495,8 @@ func (c *storageClient) UpdateBucket(ctx context.Context, in *UpdateBucketReques return out, nil } -func (c *storageClient) DeleteNotification(ctx context.Context, in *DeleteNotificationRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) +func (c *storageClient) DeleteNotification(ctx context.Context, in *DeleteNotificationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteNotification", in, out, opts...) if err != nil { return nil, err @@ -9494,8 +9540,8 @@ func (c *storageClient) ComposeObject(ctx context.Context, in *ComposeObjectRequ return out, nil } -func (c *storageClient) DeleteObject(ctx context.Context, in *DeleteObjectRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) +func (c *storageClient) DeleteObject(ctx context.Context, in *DeleteObjectRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteObject", in, out, opts...) if err != nil { return nil, err @@ -9650,8 +9696,8 @@ func (c *storageClient) CreateHmacKey(ctx context.Context, in *CreateHmacKeyRequ return out, nil } -func (c *storageClient) DeleteHmacKey(ctx context.Context, in *DeleteHmacKeyRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) +func (c *storageClient) DeleteHmacKey(ctx context.Context, in *DeleteHmacKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteHmacKey", in, out, opts...) if err != nil { return nil, err @@ -9689,7 +9735,7 @@ func (c *storageClient) UpdateHmacKey(ctx context.Context, in *UpdateHmacKeyRequ // StorageServer is the server API for Storage service. type StorageServer interface { // Permanently deletes an empty bucket. - DeleteBucket(context.Context, *DeleteBucketRequest) (*empty.Empty, error) + DeleteBucket(context.Context, *DeleteBucketRequest) (*emptypb.Empty, error) // Returns metadata for the specified bucket. GetBucket(context.Context, *GetBucketRequest) (*Bucket, error) // Creates a new bucket. @@ -9708,7 +9754,7 @@ type StorageServer interface { // Updates a bucket. Equivalent to JSON API's storage.buckets.patch method. UpdateBucket(context.Context, *UpdateBucketRequest) (*Bucket, error) // Permanently deletes a notification subscription. - DeleteNotification(context.Context, *DeleteNotificationRequest) (*empty.Empty, error) + DeleteNotification(context.Context, *DeleteNotificationRequest) (*emptypb.Empty, error) // View a notification config. GetNotification(context.Context, *GetNotificationRequest) (*Notification, error) // Creates a notification subscription for a given bucket. @@ -9723,7 +9769,7 @@ type StorageServer interface { ComposeObject(context.Context, *ComposeObjectRequest) (*Object, error) // Deletes an object and its metadata. Deletions are permanent if versioning // is not enabled for the bucket, or if the `generation` parameter is used. - DeleteObject(context.Context, *DeleteObjectRequest) (*empty.Empty, error) + DeleteObject(context.Context, *DeleteObjectRequest) (*emptypb.Empty, error) // Cancels an in-progress resumable upload. CancelResumableWrite(context.Context, *CancelResumableWriteRequest) (*CancelResumableWriteResponse, error) // Retrieves an object's metadata. @@ -9814,7 +9860,7 @@ type StorageServer interface { // Creates a new HMAC key for the given service account. CreateHmacKey(context.Context, *CreateHmacKeyRequest) (*CreateHmacKeyResponse, error) // Deletes a given HMAC key. Key must be in an INACTIVE state. - DeleteHmacKey(context.Context, *DeleteHmacKeyRequest) (*empty.Empty, error) + DeleteHmacKey(context.Context, *DeleteHmacKeyRequest) (*emptypb.Empty, error) // Gets an existing HMAC key metadata for the given id. GetHmacKey(context.Context, *GetHmacKeyRequest) (*HmacKeyMetadata, error) // Lists HMAC keys under a given project with the additional filters provided. @@ -9827,7 +9873,7 @@ type StorageServer interface { type UnimplementedStorageServer struct { } -func (*UnimplementedStorageServer) DeleteBucket(context.Context, *DeleteBucketRequest) (*empty.Empty, error) { +func (*UnimplementedStorageServer) DeleteBucket(context.Context, *DeleteBucketRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteBucket not implemented") } func (*UnimplementedStorageServer) GetBucket(context.Context, *GetBucketRequest) (*Bucket, error) { @@ -9854,7 +9900,7 @@ func (*UnimplementedStorageServer) TestIamPermissions(context.Context, *v1.TestI func (*UnimplementedStorageServer) UpdateBucket(context.Context, *UpdateBucketRequest) (*Bucket, error) { return nil, status.Errorf(codes.Unimplemented, "method UpdateBucket not implemented") } -func (*UnimplementedStorageServer) DeleteNotification(context.Context, *DeleteNotificationRequest) (*empty.Empty, error) { +func (*UnimplementedStorageServer) DeleteNotification(context.Context, *DeleteNotificationRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteNotification not implemented") } func (*UnimplementedStorageServer) GetNotification(context.Context, *GetNotificationRequest) (*Notification, error) { @@ -9869,7 +9915,7 @@ func (*UnimplementedStorageServer) ListNotifications(context.Context, *ListNotif func (*UnimplementedStorageServer) ComposeObject(context.Context, *ComposeObjectRequest) (*Object, error) { return nil, status.Errorf(codes.Unimplemented, "method ComposeObject not implemented") } -func (*UnimplementedStorageServer) DeleteObject(context.Context, *DeleteObjectRequest) (*empty.Empty, error) { +func (*UnimplementedStorageServer) DeleteObject(context.Context, *DeleteObjectRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteObject not implemented") } func (*UnimplementedStorageServer) CancelResumableWrite(context.Context, *CancelResumableWriteRequest) (*CancelResumableWriteResponse, error) { @@ -9905,7 +9951,7 @@ func (*UnimplementedStorageServer) GetServiceAccount(context.Context, *GetServic func (*UnimplementedStorageServer) CreateHmacKey(context.Context, *CreateHmacKeyRequest) (*CreateHmacKeyResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method CreateHmacKey not implemented") } -func (*UnimplementedStorageServer) DeleteHmacKey(context.Context, *DeleteHmacKeyRequest) (*empty.Empty, error) { +func (*UnimplementedStorageServer) DeleteHmacKey(context.Context, *DeleteHmacKeyRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteHmacKey not implemented") } func (*UnimplementedStorageServer) GetHmacKey(context.Context, *GetHmacKeyRequest) (*HmacKeyMetadata, error) { diff --git a/vendor/cloud.google.com/go/storage/internal/version.go b/vendor/cloud.google.com/go/storage/internal/version.go index dc1cb9f60..008568b40 100644 --- a/vendor/cloud.google.com/go/storage/internal/version.go +++ b/vendor/cloud.google.com/go/storage/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.27.0" +const Version = "1.28.1" diff --git a/vendor/cloud.google.com/go/storage/post_policy_v4.go b/vendor/cloud.google.com/go/storage/post_policy_v4.go index 0057f1087..2961aca20 100644 --- a/vendor/cloud.google.com/go/storage/post_policy_v4.go +++ b/vendor/cloud.google.com/go/storage/post_policy_v4.go @@ -340,7 +340,7 @@ func GenerateSignedPostPolicyV4(bucket, object string, opts *PostPolicyV4Options "expiration": opts.Expires.Format(time.RFC3339), }) if err != nil { - return nil, fmt.Errorf("storage: PostPolicyV4 JSON serialization failed: %v", err) + return nil, fmt.Errorf("storage: PostPolicyV4 JSON serialization failed: %w", err) } b64Policy := base64.StdEncoding.EncodeToString(condsAsJSON) diff --git a/vendor/cloud.google.com/go/storage/release-please-config.json b/vendor/cloud.google.com/go/storage/release-please-config.json deleted file mode 100644 index 5b7b812a9..000000000 --- a/vendor/cloud.google.com/go/storage/release-please-config.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "release-type": "go-yoshi", - "separate-pull-requests": true, - "include-component-in-tag": true, - "tag-separator": "/", - "packages": { - "storage": { - "component": "storage" - } - }, - "plugins": ["sentence-case"] -} diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go index d7e06fca7..b5c10efc8 100644 --- a/vendor/cloud.google.com/go/storage/storage.go +++ b/vendor/cloud.google.com/go/storage/storage.go @@ -52,6 +52,7 @@ import ( htransport "google.golang.org/api/transport/http" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/known/fieldmaskpb" "google.golang.org/protobuf/types/known/timestamppb" ) @@ -187,22 +188,22 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error // htransport selects the correct endpoint among WithEndpoint (user override), WithDefaultEndpoint, and WithDefaultMTLSEndpoint. hc, ep, err := htransport.NewClient(ctx, opts...) if err != nil { - return nil, fmt.Errorf("dialing: %v", err) + return nil, fmt.Errorf("dialing: %w", err) } // RawService should be created with the chosen endpoint to take account of user override. rawService, err := raw.NewService(ctx, option.WithEndpoint(ep), option.WithHTTPClient(hc)) if err != nil { - return nil, fmt.Errorf("storage client: %v", err) + return nil, fmt.Errorf("storage client: %w", err) } // Update readHost and scheme with the chosen endpoint. u, err := url.Parse(ep) if err != nil { - return nil, fmt.Errorf("supplied endpoint %q is not valid: %v", ep, err) + return nil, fmt.Errorf("supplied endpoint %q is not valid: %w", ep, err) } tc, err := newHTTPStorageClient(ctx, withClientOptions(opts...)) if err != nil { - return nil, fmt.Errorf("storage: %v", err) + return nil, fmt.Errorf("storage: %w", err) } return &Client{ @@ -1089,11 +1090,6 @@ func (o *ObjectAttrs) toRawObject(bucket string) *raw.Object { // toProtoObject copies the editable attributes from o to the proto library's Object type. func (o *ObjectAttrs) toProtoObject(b string) *storagepb.Object { - checksums := &storagepb.ObjectChecksums{Md5Hash: o.MD5} - if o.CRC32C > 0 { - checksums.Crc32C = proto.Uint32(o.CRC32C) - } - // For now, there are only globally unique buckets, and "_" is the alias // project ID for such buckets. If the bucket is not provided, like in the // destination ObjectAttrs of a Copy, do not attempt to format it. @@ -1122,7 +1118,6 @@ func (o *ObjectAttrs) toProtoObject(b string) *storagepb.Object { KmsKey: o.KMSKeyName, Generation: o.Generation, Size: o.Size, - Checksums: checksums, } } @@ -1417,12 +1412,13 @@ func newObjectFromProto(o *storagepb.Object) *ObjectAttrs { Generation: o.Generation, Metageneration: o.Metageneration, StorageClass: o.StorageClass, - CustomerKeySHA256: string(o.GetCustomerEncryption().GetKeySha256Bytes()), - KMSKeyName: o.GetKmsKey(), - Created: convertProtoTime(o.GetCreateTime()), - Deleted: convertProtoTime(o.GetDeleteTime()), - Updated: convertProtoTime(o.GetUpdateTime()), - CustomTime: convertProtoTime(o.GetCustomTime()), + // CustomerKeySHA256 needs to be presented as base64 encoded, but the response from gRPC is not. + CustomerKeySHA256: base64.StdEncoding.EncodeToString(o.GetCustomerEncryption().GetKeySha256Bytes()), + KMSKeyName: o.GetKmsKey(), + Created: convertProtoTime(o.GetCreateTime()), + Deleted: convertProtoTime(o.GetDeleteTime()), + Updated: convertProtoTime(o.GetUpdateTime()), + CustomTime: convertProtoTime(o.GetCustomTime()), } } @@ -1489,10 +1485,11 @@ type Query struct { // object will be included in the results. Versions bool - // fieldSelection is used to select only specific fields to be returned by - // the query. It's used internally and is populated for the user by - // calling Query.SetAttrSelection - fieldSelection string + // attrSelection is used to select only specific fields to be returned by + // the query. It is set by the user calling calling SetAttrSelection. These + // are used by toFieldMask and toFieldSelection for gRPC and HTTP/JSON + // clients repsectively. + attrSelection []string // StartOffset is used to filter results to objects whose names are // lexicographically equal to or after startOffset. If endOffset is also set, @@ -1552,6 +1549,39 @@ var attrToFieldMap = map[string]string{ "CustomTime": "customTime", } +// attrToProtoFieldMap maps the field names of ObjectAttrs to the underlying field +// names in the protobuf Object message. +var attrToProtoFieldMap = map[string]string{ + "Name": "name", + "Bucket": "bucket", + "Etag": "etag", + "Generation": "generation", + "Metageneration": "metageneration", + "StorageClass": "storage_class", + "Size": "size", + "ContentEncoding": "content_encoding", + "ContentDisposition": "content_disposition", + "CacheControl": "cache_control", + "ACL": "acl", + "ContentLanguage": "content_language", + "Deleted": "delete_time", + "ContentType": "content_type", + "Created": "create_time", + "CRC32C": "checksums.crc32c", + "MD5": "checksums.md5_hash", + "Updated": "update_time", + "KMSKeyName": "kms_key", + "TemporaryHold": "temporary_hold", + "RetentionExpirationTime": "retention_expire_time", + "Metadata": "metadata", + "EventBasedHold": "event_based_hold", + "Owner": "owner", + "CustomerKeySHA256": "customer_encryption", + "CustomTime": "custom_time", + // MediaLink was explicitly excluded from the proto as it is an HTTP-ism. + // "MediaLink": "mediaLink", +} + // SetAttrSelection makes the query populate only specific attributes of // objects. When iterating over objects, if you only need each object's name // and size, pass []string{"Name", "Size"} to this method. Only these fields @@ -1560,16 +1590,42 @@ var attrToFieldMap = map[string]string{ // optimization; for more information, see // https://cloud.google.com/storage/docs/json_api/v1/how-tos/performance func (q *Query) SetAttrSelection(attrs []string) error { + // Validate selections. + for _, attr := range attrs { + // If the attr is acceptable for one of the two sets, then it is OK. + // If it is not acceptable for either, then return an error. + // The respective masking implementations ignore unknown attrs which + // makes switching between transports a little easier. + _, okJSON := attrToFieldMap[attr] + _, okGRPC := attrToProtoFieldMap[attr] + + if !okJSON && !okGRPC { + return fmt.Errorf("storage: attr %v is not valid", attr) + } + } + + q.attrSelection = attrs + + return nil +} + +func (q *Query) toFieldSelection() string { + if q == nil || len(q.attrSelection) == 0 { + return "" + } fieldSet := make(map[string]bool) - for _, attr := range attrs { + for _, attr := range q.attrSelection { field, ok := attrToFieldMap[attr] if !ok { - return fmt.Errorf("storage: attr %v is not valid", attr) + // Future proofing, skip unknown fields, let SetAttrSelection handle + // error modes. + continue } fieldSet[field] = true } + var s string if len(fieldSet) > 0 { var b bytes.Buffer b.WriteString("prefixes,items(") @@ -1582,9 +1638,50 @@ func (q *Query) SetAttrSelection(attrs []string) error { b.WriteString(field) } b.WriteString(")") - q.fieldSelection = b.String() + s = b.String() } - return nil + return s +} + +func (q *Query) toFieldMask() *fieldmaskpb.FieldMask { + // The default behavior with no Query is ProjectionDefault (i.e. ProjectionFull). + if q == nil { + return &fieldmaskpb.FieldMask{Paths: []string{"*"}} + } + + // User selected attributes via q.SetAttrSeleciton. This takes precedence + // over the Projection. + if numSelected := len(q.attrSelection); numSelected > 0 { + protoFieldPaths := make([]string, 0, numSelected) + + for _, attr := range q.attrSelection { + pf, ok := attrToProtoFieldMap[attr] + if !ok { + // Future proofing, skip unknown fields, let SetAttrSelection + // handle error modes. + continue + } + protoFieldPaths = append(protoFieldPaths, pf) + } + + return &fieldmaskpb.FieldMask{Paths: protoFieldPaths} + } + + // ProjectDefault == ProjectionFull which means all fields. + fm := &fieldmaskpb.FieldMask{Paths: []string{"*"}} + if q.Projection == ProjectionNoACL { + paths := make([]string, 0, len(attrToProtoFieldMap)-2) // omitting two fields + for _, f := range attrToProtoFieldMap { + // Skip the acl and owner fields for "NoACL". + if f == "acl" || f == "owner" { + continue + } + paths = append(paths, f) + } + fm.Paths = paths + } + + return fm } // Conditions constrain methods to act on specific generations of diff --git a/vendor/cloud.google.com/go/storage/writer.go b/vendor/cloud.google.com/go/storage/writer.go index 91229f148..3a6a1ce0f 100644 --- a/vendor/cloud.google.com/go/storage/writer.go +++ b/vendor/cloud.google.com/go/storage/writer.go @@ -176,7 +176,6 @@ func (w *Writer) openWriter() (err error) { isIdempotent := w.o.conds != nil && (w.o.conds.GenerationMatch >= 0 || w.o.conds.DoesNotExist == true) opts := makeStorageOpts(isIdempotent, w.o.retry, w.o.userProject) - go w.monitorCancel() params := &openWriterParams{ ctx: w.ctx, chunkSize: w.ChunkSize, @@ -191,11 +190,15 @@ func (w *Writer) openWriter() (err error) { progress: w.progress, setObj: func(o *ObjectAttrs) { w.obj = o }, } + if err := w.ctx.Err(); err != nil { + return err // short-circuit + } w.pw, err = w.o.c.tc.OpenWriter(params, opts...) if err != nil { return err } w.opened = true + go w.monitorCancel() return nil } diff --git a/vendor/cloud.google.com/go/testing.md b/vendor/cloud.google.com/go/testing.md deleted file mode 100644 index bcca0604d..000000000 --- a/vendor/cloud.google.com/go/testing.md +++ /dev/null @@ -1,236 +0,0 @@ -# Testing Code that depends on Go Client Libraries - -The Go client libraries generated as a part of `cloud.google.com/go` all take -the approach of returning concrete types instead of interfaces. That way, new -fields and methods can be added to the libraries without breaking users. This -document will go over some patterns that can be used to test code that depends -on the Go client libraries. - -## Testing gRPC services using fakes - -*Note*: You can see the full -[example code using a fake here](https://github.com/googleapis/google-cloud-go/tree/main/internal/examples/fake). - -The clients found in `cloud.google.com/go` are gRPC based, with a couple of -notable exceptions being the [`storage`](https://pkg.go.dev/cloud.google.com/go/storage) -and [`bigquery`](https://pkg.go.dev/cloud.google.com/go/bigquery) clients. -Interactions with gRPC services can be faked by serving up your own in-memory -server within your test. One benefit of using this approach is that you don’t -need to define an interface in your runtime code; you can keep using -concrete struct types. You instead define a fake server in your test code. For -example, take a look at the following function: - -```go -import ( - "context" - "fmt" - "log" - "os" - - translate "cloud.google.com/go/translate/apiv3" - "github.com/googleapis/gax-go/v2" - translatepb "google.golang.org/genproto/googleapis/cloud/translate/v3" -) - -func TranslateTextWithConcreteClient(client *translate.TranslationClient, text string, targetLang string) (string, error) { - ctx := context.Background() - log.Printf("Translating %q to %q", text, targetLang) - req := &translatepb.TranslateTextRequest{ - Parent: fmt.Sprintf("projects/%s/locations/global", os.Getenv("GOOGLE_CLOUD_PROJECT")), - TargetLanguageCode: "en-US", - Contents: []string{text}, - } - resp, err := client.TranslateText(ctx, req) - if err != nil { - return "", fmt.Errorf("unable to translate text: %v", err) - } - translations := resp.GetTranslations() - if len(translations) != 1 { - return "", fmt.Errorf("expected only one result, got %d", len(translations)) - } - return translations[0].TranslatedText, nil -} -``` - -Here is an example of what a fake server implementation would look like for -faking the interactions above: - -```go -import ( - "context" - - translatepb "google.golang.org/genproto/googleapis/cloud/translate/v3" -) - -type fakeTranslationServer struct { - translatepb.UnimplementedTranslationServiceServer -} - -func (f *fakeTranslationServer) TranslateText(ctx context.Context, req *translatepb.TranslateTextRequest) (*translatepb.TranslateTextResponse, error) { - resp := &translatepb.TranslateTextResponse{ - Translations: []*translatepb.Translation{ - &translatepb.Translation{ - TranslatedText: "Hello World", - }, - }, - } - return resp, nil -} -``` - -All of the generated protobuf code found in [google.golang.org/genproto](https://pkg.go.dev/google.golang.org/genproto) -contains a similar `package.UnimplmentedFooServer` type that is useful for -creating fakes. By embedding the unimplemented server in the -`fakeTranslationServer`, the fake will “inherit” all of the RPCs the server -exposes. Then, by providing our own `fakeTranslationServer.TranslateText` -method you can “override” the default unimplemented behavior of the one RPC that -you would like to be faked. - -The test itself does require a little bit of setup: start up a `net.Listener`, -register the server, and tell the client library to call the server: - -```go -import ( - "context" - "net" - "testing" - - translate "cloud.google.com/go/translate/apiv3" - "google.golang.org/api/option" - translatepb "google.golang.org/genproto/googleapis/cloud/translate/v3" - "google.golang.org/grpc" -) - -func TestTranslateTextWithConcreteClient(t *testing.T) { - ctx := context.Background() - - // Setup the fake server. - fakeTranslationServer := &fakeTranslationServer{} - l, err := net.Listen("tcp", "localhost:0") - if err != nil { - t.Fatal(err) - } - gsrv := grpc.NewServer() - translatepb.RegisterTranslationServiceServer(gsrv, fakeTranslationServer) - fakeServerAddr := l.Addr().String() - go func() { - if err := gsrv.Serve(l); err != nil { - panic(err) - } - }() - - // Create a client. - client, err := translate.NewTranslationClient(ctx, - option.WithEndpoint(fakeServerAddr), - option.WithoutAuthentication(), - option.WithGRPCDialOption(grpc.WithInsecure()), - ) - if err != nil { - t.Fatal(err) - } - - // Run the test. - text, err := TranslateTextWithConcreteClient(client, "Hola Mundo", "en-US") - if err != nil { - t.Fatal(err) - } - if text != "Hello World" { - t.Fatalf("got %q, want Hello World", text) - } -} -``` - -## Testing using mocks - -*Note*: You can see the full -[example code using a mock here](https://github.com/googleapis/google-cloud-go/tree/main/internal/examples/mock). - -When mocking code you need to work with interfaces. Let’s create an interface -for the `cloud.google.com/go/translate/apiv3` client used in the -`TranslateTextWithConcreteClient` function mentioned in the previous section. -The `translate.Client` has over a dozen methods but this code only uses one of -them. Here is an interface that satisfies the interactions of the -`translate.Client` in this function. - -```go -type TranslationClient interface { - TranslateText(ctx context.Context, req *translatepb.TranslateTextRequest, opts ...gax.CallOption) (*translatepb.TranslateTextResponse, error) -} -``` - -Now that we have an interface that satisfies the method being used we can -rewrite the function signature to take the interface instead of the concrete -type. - -```go -func TranslateTextWithInterfaceClient(client TranslationClient, text string, targetLang string) (string, error) { -// ... -} -``` - -This allows a real `translate.Client` to be passed to the method in production -and for a mock implementation to be passed in during testing. This pattern can -be applied to any Go code, not just `cloud.google.com/go`. This is because -interfaces in Go are implicitly satisfied. Structs in the client libraries can -implicitly implement interfaces defined in your codebase. Let’s take a look at -what it might look like to define a lightweight mock for the `TranslationClient` -interface. - -```go -import ( - "context" - "testing" - - "github.com/googleapis/gax-go/v2" - translatepb "google.golang.org/genproto/googleapis/cloud/translate/v3" -) - -type mockClient struct{} - -func (*mockClient) TranslateText(_ context.Context, req *translatepb.TranslateTextRequest, opts ...gax.CallOption) (*translatepb.TranslateTextResponse, error) { - resp := &translatepb.TranslateTextResponse{ - Translations: []*translatepb.Translation{ - &translatepb.Translation{ - TranslatedText: "Hello World", - }, - }, - } - return resp, nil -} - -func TestTranslateTextWithAbstractClient(t *testing.T) { - client := &mockClient{} - text, err := TranslateTextWithInterfaceClient(client, "Hola Mundo", "en-US") - if err != nil { - t.Fatal(err) - } - if text != "Hello World" { - t.Fatalf("got %q, want Hello World", text) - } -} -``` - -If you prefer to not write your own mocks there are mocking frameworks such as -[golang/mock](https://github.com/golang/mock) which can generate mocks for you -from an interface. As a word of caution though, try to not -[overuse mocks](https://testing.googleblog.com/2013/05/testing-on-toilet-dont-overuse-mocks.html). - -## Testing using emulators - -Some of the client libraries provided in `cloud.google.com/go` support running -against a service emulator. The concept is similar to that of using fakes, -mentioned above, but the server is managed for you. You just need to start it up -and instruct the client library to talk to the emulator by setting a service -specific emulator environment variable. Current services/environment-variables -are: - -- bigtable: `BIGTABLE_EMULATOR_HOST` -- datastore: `DATASTORE_EMULATOR_HOST` -- firestore: `FIRESTORE_EMULATOR_HOST` -- pubsub: `PUBSUB_EMULATOR_HOST` -- spanner: `SPANNER_EMULATOR_HOST` -- storage: `STORAGE_EMULATOR_HOST` - - Although the storage client supports an emulator environment variable there is no official emulator provided by gcloud. - -For more information on emulators please refer to the -[gcloud documentation](https://cloud.google.com/sdk/gcloud/reference/beta/emulators). diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md index e930a1660..80321d29a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md @@ -1,5 +1,47 @@ # Release History +## 1.2.0 (2022-11-04) + +### Features Added +* Added `ClientOptions.APIVersion` field, which overrides the default version a client + requests of the service, if the client supports this (all ARM clients do). +* Added package `tracing` that contains the building blocks for distributed tracing. +* Added field `TracingProvider` to type `policy.ClientOptions` that will be used to set the per-client tracing implementation. + +### Bugs Fixed +* Fixed an issue in `runtime.SetMultipartFormData` to properly handle slices of `io.ReadSeekCloser`. +* Fixed the MaxRetryDelay default to be 60s. +* Failure to poll the state of an LRO will now return an `*azcore.ResponseError` for poller types that require this behavior. +* Fixed a bug in `runtime.NewPipeline` that would cause pipeline-specified allowed headers and query parameters to be lost. + +### Other Changes +* Retain contents of read-only fields when sending requests. + +## 1.1.4 (2022-10-06) + +### Bugs Fixed +* Don't retry a request if the `Retry-After` delay is greater than the configured `RetryOptions.MaxRetryDelay`. +* `runtime.JoinPaths`: do not unconditionally add a forward slash before the query string + +### Other Changes +* Removed logging URL from retry policy as it's redundant. +* Retry policy logs when it exits due to a non-retriable status code. + +## 1.1.3 (2022-09-01) + +### Bugs Fixed +* Adjusted the initial retry delay to 800ms per the Azure SDK guidelines. + +## 1.1.2 (2022-08-09) + +### Other Changes +* Fixed various doc bugs. + +## 1.1.1 (2022-06-30) + +### Bugs Fixed +* Avoid polling when a RELO LRO synchronously terminates. + ## 1.1.0 (2022-06-03) ### Other Changes diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go index 7119699f9..28c64678c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go @@ -10,11 +10,11 @@ Package azcore implements an HTTP request/response middleware pipeline used by A The middleware consists of three components. - - One or more Policy instances. - - A Transporter instance. - - A Pipeline instance that combines the Policy and Transporter instances. + - One or more Policy instances. + - A Transporter instance. + - A Pipeline instance that combines the Policy and Transporter instances. -Implementing the Policy Interface +# Implementing the Policy Interface A Policy can be implemented in two ways; as a first-class function for a stateless Policy, or as a method on a type for a stateful Policy. Note that HTTP requests made via the same pipeline share @@ -34,53 +34,53 @@ and error instances to its caller. Template for implementing a stateless Policy: - type policyFunc func(*policy.Request) (*http.Response, error) - // Do implements the Policy interface on policyFunc. + type policyFunc func(*policy.Request) (*http.Response, error) - func (pf policyFunc) Do(req *policy.Request) (*http.Response, error) { - return pf(req) - } + // Do implements the Policy interface on policyFunc. + func (pf policyFunc) Do(req *policy.Request) (*http.Response, error) { + return pf(req) + } - func NewMyStatelessPolicy() policy.Policy { - return policyFunc(func(req *policy.Request) (*http.Response, error) { - // TODO: mutate/process Request here + func NewMyStatelessPolicy() policy.Policy { + return policyFunc(func(req *policy.Request) (*http.Response, error) { + // TODO: mutate/process Request here - // forward Request to next Policy & get Response/error - resp, err := req.Next() + // forward Request to next Policy & get Response/error + resp, err := req.Next() - // TODO: mutate/process Response/error here + // TODO: mutate/process Response/error here - // return Response/error to previous Policy - return resp, err - }) - } + // return Response/error to previous Policy + return resp, err + }) + } Template for implementing a stateful Policy: - type MyStatefulPolicy struct { - // TODO: add configuration/setting fields here - } + type MyStatefulPolicy struct { + // TODO: add configuration/setting fields here + } - // TODO: add initialization args to NewMyStatefulPolicy() - func NewMyStatefulPolicy() policy.Policy { - return &MyStatefulPolicy{ - // TODO: initialize configuration/setting fields here - } - } + // TODO: add initialization args to NewMyStatefulPolicy() + func NewMyStatefulPolicy() policy.Policy { + return &MyStatefulPolicy{ + // TODO: initialize configuration/setting fields here + } + } - func (p *MyStatefulPolicy) Do(req *policy.Request) (resp *http.Response, err error) { - // TODO: mutate/process Request here + func (p *MyStatefulPolicy) Do(req *policy.Request) (resp *http.Response, err error) { + // TODO: mutate/process Request here - // forward Request to next Policy & get Response/error - resp, err := req.Next() + // forward Request to next Policy & get Response/error + resp, err := req.Next() - // TODO: mutate/process Response/error here + // TODO: mutate/process Response/error here - // return Response/error to previous Policy - return resp, err - } + // return Response/error to previous Policy + return resp, err + } -Implementing the Transporter Interface +# Implementing the Transporter Interface The Transporter interface is responsible for sending the HTTP request and returning the corresponding HTTP response or error. The Transporter is invoked by the last Policy in the chain. The default Transporter @@ -88,66 +88,66 @@ implementation uses a shared http.Client from the standard library. The same stateful/stateless rules for Policy implementations apply to Transporter implementations. -Using Policy and Transporter Instances Via a Pipeline +# Using Policy and Transporter Instances Via a Pipeline To use the Policy and Transporter instances, an application passes them to the runtime.NewPipeline function. - func NewPipeline(transport Transporter, policies ...Policy) Pipeline + func NewPipeline(transport Transporter, policies ...Policy) Pipeline The specified Policy instances form a chain and are invoked in the order provided to NewPipeline followed by the Transporter. Once the Pipeline has been created, create a runtime.Request instance and pass it to Pipeline's Do method. - func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) + func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) - func (p Pipeline) Do(req *Request) (*http.Request, error) + func (p Pipeline) Do(req *Request) (*http.Request, error) The Pipeline.Do method sends the specified Request through the chain of Policy and Transporter instances. The response/error is then sent through the same chain of Policy instances in reverse order. For example, assuming there are Policy types PolicyA, PolicyB, and PolicyC along with TransportA. - pipeline := NewPipeline(TransportA, PolicyA, PolicyB, PolicyC) + pipeline := NewPipeline(TransportA, PolicyA, PolicyB, PolicyC) The flow of Request and Response looks like the following: - policy.Request -> PolicyA -> PolicyB -> PolicyC -> TransportA -----+ - | - HTTP(S) endpoint - | - caller <--------- PolicyA <- PolicyB <- PolicyC <- http.Response-+ + policy.Request -> PolicyA -> PolicyB -> PolicyC -> TransportA -----+ + | + HTTP(S) endpoint + | + caller <--------- PolicyA <- PolicyB <- PolicyC <- http.Response-+ -Creating a Request Instance +# Creating a Request Instance The Request instance passed to Pipeline's Do method is a wrapper around an *http.Request. It also contains some internal state and provides various convenience methods. You create a Request instance by calling the runtime.NewRequest function: - func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) + func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) If the Request should contain a body, call the SetBody method. - func (req *Request) SetBody(body ReadSeekCloser, contentType string) error + func (req *Request) SetBody(body ReadSeekCloser, contentType string) error A seekable stream is required so that upon retry, the retry Policy instance can seek the stream back to the beginning before retrying the network request and re-uploading the body. -Sending an Explicit Null +# Sending an Explicit Null Operations like JSON-MERGE-PATCH send a JSON null to indicate a value should be deleted. - { - "delete-me": null - } + { + "delete-me": null + } This requirement conflicts with the SDK's default marshalling that specifies "omitempty" as a means to resolve the ambiguity between a field to be excluded and its zero-value. - type Widget struct { - Name *string `json:",omitempty"` - Count *int `json:",omitempty"` - } + type Widget struct { + Name *string `json:",omitempty"` + Count *int `json:",omitempty"` + } In the above example, Name and Count are defined as pointer-to-type to disambiguate between a missing value (nil) and a zero-value (0) which might have semantic differences. @@ -157,18 +157,18 @@ a Widget's count, one simply specifies the new value for Count, leaving Name nil To fulfill the requirement for sending a JSON null, the NullValue() function can be used. - w := Widget{ - Count: azcore.NullValue[*int](), - } + w := Widget{ + Count: azcore.NullValue[*int](), + } This sends an explict "null" for Count, indicating that any current value for Count should be deleted. -Processing the Response +# Processing the Response When the HTTP response is received, the *http.Response is returned directly. Each Policy instance can inspect/mutate the *http.Response. -Built-in Logging +# Built-in Logging To enable logging, set environment variable AZURE_SDK_GO_LOGGING to "all" before executing your program. @@ -178,40 +178,40 @@ own synchronization to handle concurrent invocations. See the docs for the log package for further details. -Pageable Operations +# Pageable Operations Pageable operations return potentially large data sets spread over multiple GET requests. The result of each GET is a "page" of data consisting of a slice of items. Pageable operations can be identified by their New*Pager naming convention and return type of *runtime.Pager[T]. - func (c *WidgetClient) NewListWidgetsPager(o *Options) *runtime.Pager[PageResponse] + func (c *WidgetClient) NewListWidgetsPager(o *Options) *runtime.Pager[PageResponse] The call to WidgetClient.NewListWidgetsPager() returns an instance of *runtime.Pager[T] for fetching pages and determining if there are more pages to fetch. No IO calls are made until the NextPage() method is invoked. - pager := widgetClient.NewListWidgetsPager(nil) - for pager.More() { - page, err := pager.NextPage(context.TODO()) - // handle err - for _, widget := range page.Values { - // process widget - } - } + pager := widgetClient.NewListWidgetsPager(nil) + for pager.More() { + page, err := pager.NextPage(context.TODO()) + // handle err + for _, widget := range page.Values { + // process widget + } + } -Long-Running Operations +# Long-Running Operations Long-running operations (LROs) are operations consisting of an initial request to start the operation followed by polling to determine when the operation has reached a terminal state. An LRO's terminal state is one of the following values. - * Succeeded - the LRO completed successfully - * Failed - the LRO failed to complete - * Canceled - the LRO was canceled + - Succeeded - the LRO completed successfully + - Failed - the LRO failed to complete + - Canceled - the LRO was canceled LROs can be identified by their Begin* prefix and their return type of *runtime.Poller[T]. - func (c *WidgetClient) BeginCreateOrUpdate(ctx context.Context, w Widget, o *Options) (*runtime.Poller[Response], error) + func (c *WidgetClient) BeginCreateOrUpdate(ctx context.Context, w Widget, o *Options) (*runtime.Poller[Response], error) When a call to WidgetClient.BeginCreateOrUpdate() returns a nil error, it means that the LRO has started. It does _not_ mean that the widget has been created or updated (or failed to be created/updated). @@ -219,11 +219,11 @@ It does _not_ mean that the widget has been created or updated (or failed to be The *runtime.Poller[T] provides APIs for determining the state of the LRO. To wait for the LRO to complete, call the PollUntilDone() method. - poller, err := widgetClient.BeginCreateOrUpdate(context.TODO(), Widget{}, nil) - // handle err - result, err := poller.PollUntilDone(context.TODO(), nil) - // handle err - // use result + poller, err := widgetClient.BeginCreateOrUpdate(context.TODO(), Widget{}, nil) + // handle err + result, err := poller.PollUntilDone(context.TODO(), nil) + // handle err + // use result The call to PollUntilDone() will block the current goroutine until the LRO has reached a terminal state or the context is canceled/timed out. @@ -232,22 +232,22 @@ Note that LROs can take anywhere from several seconds to several minutes. The d this variant behavior, pollers do _not_ have a preconfigured time-out. Use a context with the appropriate cancellation mechanism as required. -Resume Tokens +# Resume Tokens Pollers provide the ability to serialize their state into a "resume token" which can be used by another process to recreate the poller. This is achieved via the runtime.Poller[T].ResumeToken() method. - token, err := poller.ResumeToken() - // handle error + token, err := poller.ResumeToken() + // handle error Note that a token can only be obtained for a poller that's in a non-terminal state. Also note that any subsequent calls to poller.Poll() might change the poller's state. In this case, a new token should be created. After the token has been obtained, it can be used to recreate an instance of the originating poller. - poller, err := widgetClient.BeginCreateOrUpdate(nil, Widget{}, &Options{ - ResumeToken: token, - }) + poller, err := widgetClient.BeginCreateOrUpdate(nil, Widget{}, &Options{ + ResumeToken: token, + }) When resuming a poller, no IO is performed, and zero-value arguments can be used for everything but the Options.ResumeToken. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go index 8fca32a7d..6e029d493 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go @@ -8,7 +8,6 @@ package exported import ( "io" - "io/ioutil" "net/http" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" @@ -51,7 +50,7 @@ func Payload(resp *http.Response) ([]byte, error) { if buf, ok := resp.Body.(*shared.NopClosingBytesReader); ok { return buf.Bytes(), nil } - bytesBody, err := ioutil.ReadAll(resp.Body) + bytesBody, err := io.ReadAll(resp.Body) resp.Body.Close() if err != nil { return nil, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go index 0194b8b01..d34f161c7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go @@ -71,6 +71,13 @@ func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.Fi if !pollers.IsValidURL(asyncURL) { return nil, fmt.Errorf("invalid polling URL %s", asyncURL) } + // check for provisioning state. if the operation is a RELO + // and terminates synchronously this will prevent extra polling. + // it's ok if there's no provisioning state. + state, _ := pollers.GetProvisioningState(resp) + if state == "" { + state = pollers.StatusInProgress + } p := &Poller[T]{ pl: pl, resp: resp, @@ -79,7 +86,7 @@ func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.Fi OrigURL: resp.Request.URL.String(), Method: resp.Request.Method, FinalState: finalState, - CurState: pollers.StatusInProgress, + CurState: state, } return p, nil } @@ -92,6 +99,10 @@ func (p *Poller[T]) Done() bool { // Poll retrieves the current state of the LRO. func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { err := pollers.PollHelper(ctx, p.AsyncURL, p.pl, func(resp *http.Response) (string, error) { + if !pollers.StatusCodeValid(resp) { + p.resp = resp + return "", exported.NewResponseError(resp) + } state, err := pollers.GetStatus(resp) if err != nil { return "", err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go index 99e9f2f8d..7efdd8a0d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go @@ -100,6 +100,10 @@ func (p *Poller[T]) Done() bool { func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { err := pollers.PollHelper(ctx, p.PollURL, p.pl, func(resp *http.Response) (string, error) { + if !pollers.StatusCodeValid(resp) { + p.resp = resp + return "", exported.NewResponseError(resp) + } if resp.StatusCode == http.StatusNoContent { p.resp = resp p.CurState = pollers.StatusSucceeded diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go index 56c2b9029..276685da4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go @@ -64,12 +64,19 @@ func New[T any](pl exported.Pipeline, resp *http.Response) (*Poller[T], error) { if !pollers.IsValidURL(locURL) { return nil, fmt.Errorf("invalid polling URL %s", locURL) } + // check for provisioning state. if the operation is a RELO + // and terminates synchronously this will prevent extra polling. + // it's ok if there's no provisioning state. + state, _ := pollers.GetProvisioningState(resp) + if state == "" { + state = pollers.StatusInProgress + } return &Poller[T]{ pl: pl, resp: resp, Type: kind, PollURL: locURL, - CurState: pollers.StatusInProgress, + CurState: state, }, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go index dd714e768..c3c648266 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go @@ -91,6 +91,10 @@ func (p *Poller[T]) Done() bool { func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { err := pollers.PollHelper(ctx, p.OpLocURL, p.pl, func(resp *http.Response) (string, error) { + if !pollers.StatusCodeValid(resp) { + p.resp = resp + return "", exported.NewResponseError(resp) + } state, err := pollers.GetStatus(resp) if err != nil { return "", err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go index 4dd39e68c..75d241c5b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go @@ -30,5 +30,5 @@ const ( Module = "azcore" // Version is the semantic version (see http://semver.org) of this module. - Version = "v1.1.0" + Version = "v1.2.0" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go index bfc71e9a0..27c302298 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go @@ -11,6 +11,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" ) // Policy represents an extensibility point for the Pipeline that can mutate the specified @@ -27,6 +28,9 @@ type Request = exported.Request // ClientOptions contains optional settings for a client's pipeline. // All zero-value fields will be initialized with default values. type ClientOptions struct { + // APIVersion overrides the default version requested of the service. Set with caution as this package version has not been tested with arbitrary service versions. + APIVersion string + // Cloud specifies a cloud for the client. The default is Azure Public Cloud. Cloud cloud.Configuration @@ -39,6 +43,10 @@ type ClientOptions struct { // Telemetry configures the built-in telemetry policy. Telemetry TelemetryOptions + // TracingProvider configures the tracing provider. + // It defaults to a no-op tracer. + TracingProvider tracing.Provider + // Transport sets the transport for HTTP requests. Transport Transporter @@ -69,7 +77,8 @@ type LogOptions struct { } // RetryOptions configures the retry policy's behavior. -// Call NewRetryOptions() to create an instance with default values. +// Zero-value fields will have their specified default values applied during use. +// This allows for modification of a subset of fields. type RetryOptions struct { // MaxRetries specifies the maximum number of attempts a failed operation will be retried // before producing an error. @@ -82,6 +91,7 @@ type RetryOptions struct { TryTimeout time.Duration // RetryDelay specifies the initial amount of delay to use before retrying an operation. + // The value is used only if the HTTP response does not contain a Retry-After header. // The delay increases exponentially with each retry up to the maximum specified by MaxRetryDelay. // The default value is four seconds. A value less than zero means no delay between retries. RetryDelay time.Duration @@ -92,8 +102,15 @@ type RetryOptions struct { MaxRetryDelay time.Duration // StatusCodes specifies the HTTP status codes that indicate the operation should be retried. - // The default value is the status codes in StatusCodesForRetry. - // Specifying an empty slice will cause retries to happen only for transport errors. + // A nil slice will use the following values. + // http.StatusRequestTimeout 408 + // http.StatusTooManyRequests 429 + // http.StatusInternalServerError 500 + // http.StatusBadGateway 502 + // http.StatusServiceUnavailable 503 + // http.StatusGatewayTimeout 504 + // Specifying values will replace the default values. + // Specifying an empty slice will disable retries for HTTP status codes. StatusCodes []int } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go index ad75ae2ab..a2906f51b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go @@ -16,6 +16,7 @@ import ( // PipelineOptions contains Pipeline options for SDK developers type PipelineOptions struct { AllowedHeaders, AllowedQueryParameters []string + APIVersion APIVersionOptions PerCall, PerRetry []policy.Policy } @@ -32,13 +33,13 @@ func NewPipeline(module, version string, plOpts PipelineOptions, options *policy cp = *options } if len(plOpts.AllowedHeaders) > 0 { - headers := make([]string, 0, len(plOpts.AllowedHeaders)+len(cp.Logging.AllowedHeaders)) + headers := make([]string, len(plOpts.AllowedHeaders)+len(cp.Logging.AllowedHeaders)) copy(headers, plOpts.AllowedHeaders) headers = append(headers, cp.Logging.AllowedHeaders...) cp.Logging.AllowedHeaders = headers } if len(plOpts.AllowedQueryParameters) > 0 { - qp := make([]string, 0, len(plOpts.AllowedQueryParameters)+len(cp.Logging.AllowedQueryParams)) + qp := make([]string, len(plOpts.AllowedQueryParameters)+len(cp.Logging.AllowedQueryParams)) copy(qp, plOpts.AllowedQueryParameters) qp = append(qp, cp.Logging.AllowedQueryParams...) cp.Logging.AllowedQueryParams = qp @@ -46,6 +47,9 @@ func NewPipeline(module, version string, plOpts PipelineOptions, options *policy // we put the includeResponsePolicy at the very beginning so that the raw response // is populated with the final response (some policies might mutate the response) policies := []policy.Policy{policyFunc(includeResponsePolicy)} + if cp.APIVersion != "" { + policies = append(policies, newAPIVersionPolicy(cp.APIVersion, &plOpts.APIVersion)) + } if !cp.Telemetry.Disabled { policies = append(policies, NewTelemetryPolicy(module, version, &cp.Telemetry)) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_api_version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_api_version.go new file mode 100644 index 000000000..e5309aa6c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_api_version.go @@ -0,0 +1,75 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "errors" + "fmt" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// APIVersionOptions contains options for API versions +type APIVersionOptions struct { + // Location indicates where to set the version on a request, for example in a header or query param + Location APIVersionLocation + // Name is the name of the header or query parameter, for example "api-version" + Name string +} + +// APIVersionLocation indicates which part of a request identifies the service version +type APIVersionLocation int + +const ( + // APIVersionLocationQueryParam indicates a query parameter + APIVersionLocationQueryParam = 0 + // APIVersionLocationHeader indicates a header + APIVersionLocationHeader = 1 +) + +// newAPIVersionPolicy constructs an APIVersionPolicy. If version is "", Do will be a no-op. If version +// isn't empty and opts.Name is empty, Do will return an error. +func newAPIVersionPolicy(version string, opts *APIVersionOptions) *apiVersionPolicy { + if opts == nil { + opts = &APIVersionOptions{} + } + return &apiVersionPolicy{location: opts.Location, name: opts.Name, version: version} +} + +// apiVersionPolicy enables users to set the API version of every request a client sends. +type apiVersionPolicy struct { + // location indicates whether "name" refers to a query parameter or header. + location APIVersionLocation + + // name of the query param or header whose value should be overridden; provided by the client. + name string + + // version is the value (provided by the user) that replaces the default version value. + version string +} + +// Do sets the request's API version, if the policy is configured to do so, replacing any prior value. +func (a *apiVersionPolicy) Do(req *policy.Request) (*http.Response, error) { + if a.version != "" { + if a.name == "" { + // user set ClientOptions.APIVersion but the client ctor didn't set PipelineOptions.APIVersionOptions + return nil, errors.New("this client doesn't support overriding its API version") + } + switch a.location { + case APIVersionLocationHeader: + req.Raw().Header.Set(a.name, a.version) + case APIVersionLocationQueryParam: + q := req.Raw().URL.Query() + q.Set(a.name, a.version) + req.Raw().URL.RawQuery = q.Encode() + default: + return nil, fmt.Errorf("unknown APIVersionLocation %d", a.location) + } + } + return req.Next() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go index faf175e3f..30a02a7a4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go @@ -9,7 +9,7 @@ package runtime import ( "bytes" "fmt" - "io/ioutil" + "io" "net/http" "sort" "strings" @@ -210,7 +210,7 @@ func writeReqBody(req *policy.Request, b *bytes.Buffer) error { if ct := req.Raw().Header.Get(shared.HeaderContentType); !shouldLogBody(b, ct) { return nil } - body, err := ioutil.ReadAll(req.Raw().Body) + body, err := io.ReadAll(req.Raw().Body) if err != nil { fmt.Fprintf(b, " Failed to read request body: %s\n", err.Error()) return err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go index 9d630e471..b33002018 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go @@ -31,18 +31,21 @@ func setDefaults(o *policy.RetryOptions) { } else if o.MaxRetries < 0 { o.MaxRetries = 0 } + + // SDK guidelines specify the default MaxRetryDelay is 60 seconds if o.MaxRetryDelay == 0 { - o.MaxRetryDelay = 120 * time.Second + o.MaxRetryDelay = 60 * time.Second } else if o.MaxRetryDelay < 0 { // not really an unlimited cap, but sufficiently large enough to be considered as such o.MaxRetryDelay = math.MaxInt64 } if o.RetryDelay == 0 { - o.RetryDelay = 4 * time.Second + o.RetryDelay = 800 * time.Millisecond } else if o.RetryDelay < 0 { o.RetryDelay = 0 } if o.StatusCodes == nil { + // NOTE: if you change this list, you MUST update the docs in policy/policy.go o.StatusCodes = []int{ http.StatusRequestTimeout, // 408 http.StatusTooManyRequests, // 429 @@ -106,7 +109,7 @@ func (p *retryPolicy) Do(req *policy.Request) (resp *http.Response, err error) { try := int32(1) for { resp = nil // reset - log.Writef(log.EventRetryPolicy, "\n=====> Try=%d %s %s", try, req.Raw().Method, req.Raw().URL.String()) + log.Writef(log.EventRetryPolicy, "=====> Try=%d", try) // For each try, seek to the beginning of the Body stream. We do this even for the 1st try because // the stream may not be at offset 0 when we first get it and we want the same behavior for the @@ -145,6 +148,7 @@ func (p *retryPolicy) Do(req *policy.Request) (resp *http.Response, err error) { if err == nil && !HasStatusCode(resp, options.StatusCodes...) { // if there is no error and the response code isn't in the list of retry codes then we're done. + log.Write(log.EventRetryPolicy, "exit due to non-retriable status code") return } else if ctxErr := req.Raw().Context().Err(); ctxErr != nil { // don't retry if the parent context has been cancelled or its deadline exceeded @@ -167,14 +171,19 @@ func (p *retryPolicy) Do(req *policy.Request) (resp *http.Response, err error) { return } - // drain before retrying so nothing is leaked - Drain(resp) - // use the delay from retry-after if available delay := shared.RetryAfter(resp) if delay <= 0 { delay = calcDelay(options, try) + } else if delay > options.MaxRetryDelay { + // the retry-after delay exceeds the the cap so don't retry + log.Writef(log.EventRetryPolicy, "Retry-After delay %s exceeds MaxRetryDelay of %s", delay, options.MaxRetryDelay) + return } + + // drain before retrying so nothing is leaked + Drain(resp) + log.Writef(log.EventRetryPolicy, "End Try #%d, Delay=%v", try, delay) select { case <-time.After(delay): diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go index 21e5c578d..98e007184 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go @@ -15,6 +15,8 @@ import ( "fmt" "io" "mime/multipart" + "os" + "path" "reflect" "strings" "time" @@ -37,6 +39,7 @@ const ( ) // NewRequest creates a new policy.Request with the specified input. +// The endpoint MUST be properly encoded before calling this function. func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*policy.Request, error) { return exported.NewRequest(ctx, httpMethod, endpoint) } @@ -55,19 +58,23 @@ func JoinPaths(root string, paths ...string) string { root, qps = splitPath[0], splitPath[1] } - for i := 0; i < len(paths); i++ { - root = strings.TrimRight(root, "/") - paths[i] = strings.TrimLeft(paths[i], "/") - root += "/" + paths[i] + p := path.Join(paths...) + // path.Join will remove any trailing slashes. + // if one was provided, preserve it. + if strings.HasSuffix(paths[len(paths)-1], "/") && !strings.HasSuffix(p, "/") { + p += "/" } if qps != "" { - if !strings.HasSuffix(root, "/") { - root += "/" - } - return root + "?" + qps + p = p + "?" + qps + } + + if strings.HasSuffix(root, "/") && strings.HasPrefix(p, "/") { + root = root[:len(root)-1] + } else if !strings.HasSuffix(root, "/") && !strings.HasPrefix(p, "/") { + p = "/" + p } - return root + return root + p } // EncodeByteArray will base-64 encode the byte slice v. @@ -88,7 +95,9 @@ func MarshalAsByteArray(req *policy.Request, v []byte, format Base64Encoding) er // MarshalAsJSON calls json.Marshal() to get the JSON encoding of v then calls SetBody. func MarshalAsJSON(req *policy.Request, v interface{}) error { - v = cloneWithoutReadOnlyFields(v) + if omit := os.Getenv("AZURE_SDK_GO_OMIT_READONLY"); omit == "true" { + v = cloneWithoutReadOnlyFields(v) + } b, err := json.Marshal(v) if err != nil { return fmt.Errorf("error marshalling type %T: %s", v, err) @@ -113,16 +122,30 @@ func MarshalAsXML(req *policy.Request, v interface{}) error { func SetMultipartFormData(req *policy.Request, formData map[string]interface{}) error { body := bytes.Buffer{} writer := multipart.NewWriter(&body) + + writeContent := func(fieldname, filename string, src io.Reader) error { + fd, err := writer.CreateFormFile(fieldname, filename) + if err != nil { + return err + } + // copy the data to the form file + if _, err = io.Copy(fd, src); err != nil { + return err + } + return nil + } + for k, v := range formData { if rsc, ok := v.(io.ReadSeekCloser); ok { - // this is the body to upload, the key is its file name - fd, err := writer.CreateFormFile(k, k) - if err != nil { + if err := writeContent(k, k, rsc); err != nil { return err } - // copy the data to the form file - if _, err = io.Copy(fd, rsc); err != nil { - return err + continue + } else if rscs, ok := v.([]io.ReadSeekCloser); ok { + for _, rsc := range rscs { + if err := writeContent(k, k, rsc); err != nil { + return err + } } continue } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go index 2322f0a20..f86ec0b95 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go @@ -13,7 +13,6 @@ import ( "encoding/xml" "fmt" "io" - "io/ioutil" "net/http" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" @@ -86,7 +85,7 @@ func UnmarshalAsXML(resp *http.Response, v interface{}) error { // Drain reads the response body to completion then closes it. The bytes read are discarded. func Drain(resp *http.Response) { if resp != nil && resp.Body != nil { - _, _ = io.Copy(ioutil.Discard, resp.Body) + _, _ = io.Copy(io.Discard, resp.Body) resp.Body.Close() } } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/constants.go new file mode 100644 index 000000000..80282d4ab --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/constants.go @@ -0,0 +1,41 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package tracing + +// SpanKind represents the role of a Span inside a Trace. Often, this defines how a Span will be processed and visualized by various backends. +type SpanKind int + +const ( + // SpanKindInternal indicates the span represents an internal operation within an application. + SpanKindInternal SpanKind = 1 + + // SpanKindServer indicates the span covers server-side handling of a request. + SpanKindServer SpanKind = 2 + + // SpanKindClient indicates the span describes a request to a remote service. + SpanKindClient SpanKind = 3 + + // SpanKindProducer indicates the span was created by a messaging producer. + SpanKindProducer SpanKind = 4 + + // SpanKindConsumer indicates the span was created by a messaging consumer. + SpanKindConsumer SpanKind = 5 +) + +// SpanStatus represents the status of a span. +type SpanStatus int + +const ( + // SpanStatusUnset is the default status code. + SpanStatusUnset SpanStatus = 0 + + // SpanStatusError indicates the operation contains an error. + SpanStatusError SpanStatus = 1 + + // SpanStatusOK indicates the operation completed successfully. + SpanStatusOK SpanStatus = 2 +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go new file mode 100644 index 000000000..75f757ced --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go @@ -0,0 +1,168 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// Package tracing contains the definitions needed to support distributed tracing. +package tracing + +import ( + "context" +) + +// ProviderOptions contains the optional values when creating a Provider. +type ProviderOptions struct { + // for future expansion +} + +// NewProvider creates a new Provider with the specified values. +// - newTracerFn is the underlying implementation for creating Tracer instances +// - options contains optional values; pass nil to accept the default value +func NewProvider(newTracerFn func(name, version string) Tracer, options *ProviderOptions) Provider { + return Provider{ + newTracerFn: newTracerFn, + } +} + +// Provider is the factory that creates Tracer instances. +// It defaults to a no-op provider. +type Provider struct { + newTracerFn func(name, version string) Tracer +} + +// NewTracer creates a new Tracer for the specified name and version. +// - name - the name of the tracer object, typically the fully qualified name of the service client +// - version - the version of the module in which the service client resides +func (p Provider) NewTracer(name, version string) (tracer Tracer) { + if p.newTracerFn != nil { + tracer = p.newTracerFn(name, version) + } + return +} + +///////////////////////////////////////////////////////////////////////////////////////////////////////////// + +// TracerOptions contains the optional values when creating a Tracer. +type TracerOptions struct { + // for future expansion +} + +// NewTracer creates a Tracer with the specified values. +// - newSpanFn is the underlying implementation for creating Span instances +// - options contains optional values; pass nil to accept the default value +func NewTracer(newSpanFn func(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span), options *TracerOptions) Tracer { + return Tracer{ + newSpanFn: newSpanFn, + } +} + +// Tracer is the factory that creates Span instances. +type Tracer struct { + newSpanFn func(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span) +} + +// Start creates a new span and a context.Context that contains it. +// - ctx is the parent context for this span. If it contains a Span, the newly created span will be a child of that span, else it will be a root span +// - spanName identifies the span within a trace, it's typically the fully qualified API name +// - options contains optional values for the span, pass nil to accept any defaults +func (t Tracer) Start(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span) { + if t.newSpanFn != nil { + return t.newSpanFn(ctx, spanName, options) + } + return ctx, Span{} +} + +// SpanOptions contains optional settings for creating a span. +type SpanOptions struct { + // Kind indicates the kind of Span. + Kind SpanKind + + // Attributes contains key-value pairs of attributes for the span. + Attributes []Attribute +} + +///////////////////////////////////////////////////////////////////////////////////////////////////////////// + +// SpanImpl abstracts the underlying implementation for Span, +// allowing it to work with various tracing implementations. +// Any zero-values will have their default, no-op behavior. +type SpanImpl struct { + // End contains the implementation for the Span.End method. + End func() + + // SetAttributes contains the implementation for the Span.SetAttributes method. + SetAttributes func(...Attribute) + + // AddEvent contains the implementation for the Span.AddEvent method. + AddEvent func(string, ...Attribute) + + // AddError contains the implementation for the Span.AddError method. + AddError func(err error) + + // SetStatus contains the implementation for the Span.SetStatus method. + SetStatus func(SpanStatus, string) +} + +// NewSpan creates a Span with the specified implementation. +func NewSpan(impl SpanImpl) Span { + return Span{ + impl: impl, + } +} + +// Span is a single unit of a trace. A trace can contain multiple spans. +// A zero-value Span provides a no-op implementation. +type Span struct { + impl SpanImpl +} + +// End terminates the span and MUST be called before the span leaves scope. +// Any further updates to the span will be ignored after End is called. +func (s Span) End() { + if s.impl.End != nil { + s.impl.End() + } +} + +// SetAttributes sets the specified attributes on the Span. +// Any existing attributes with the same keys will have their values overwritten. +func (s Span) SetAttributes(attrs ...Attribute) { + if s.impl.SetAttributes != nil { + s.impl.SetAttributes(attrs...) + } +} + +// AddEvent adds a named event with an optional set of attributes to the span. +func (s Span) AddEvent(name string, attrs ...Attribute) { + if s.impl.AddEvent != nil { + s.impl.AddEvent(name, attrs...) + } +} + +// AddError adds the specified error event to the span. +func (s Span) AddError(err error) { + if s.impl.AddError != nil { + s.impl.AddError(err) + } +} + +// SetStatus sets the status on the span along with a description. +func (s Span) SetStatus(code SpanStatus, desc string) { + if s.impl.SetStatus != nil { + s.impl.SetStatus(code, desc) + } +} + +///////////////////////////////////////////////////////////////////////////////////////////////////////////// + +// Attribute is a key-value pair. +type Attribute struct { + // Key is the name of the attribute. + Key string + + // Value is the attribute's value. + // Types that are natively supported include int64, float64, int, bool, string. + // Any other type will be formatted per rules of fmt.Sprintf("%v"). + Value any +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md index 670839fd4..5877e476f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md @@ -1,11 +1,41 @@ # Release History +## 1.2.0 (2022-11-08) + +### Other Changes +* This version includes all fixes and features from 1.2.0-beta.* + +## 1.2.0-beta.3 (2022-10-11) + +### Features Added +* `ManagedIdentityCredential` caches tokens in memory + +### Bugs Fixed +* `ClientCertificateCredential` sends only the leaf cert for SNI authentication + +## 1.2.0-beta.2 (2022-08-10) + +### Features Added +* Added `ClientAssertionCredential` to enable applications to authenticate + with custom client assertions + +### Other Changes +* Updated AuthenticationFailedError with links to TROUBLESHOOTING.md for relevant errors +* Upgraded `microsoft-authentication-library-for-go` requirement to v0.6.0 + +## 1.2.0-beta.1 (2022-06-07) + +### Features Added +* `EnvironmentCredential` reads certificate passwords from `AZURE_CLIENT_CERTIFICATE_PASSWORD` + ([#17099](https://github.com/Azure/azure-sdk-for-go/pull/17099)) + ## 1.1.0 (2022-06-07) ### Features Added * `ClientCertificateCredential` and `ClientSecretCredential` support ESTS-R. First-party applications can set environment variable `AZURE_REGIONAL_AUTHORITY_NAME` with a region name. + ([#15605](https://github.com/Azure/azure-sdk-for-go/issues/15605)) ## 1.0.1 (2022-06-07) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md index 68b35a545..2df42c813 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md @@ -133,8 +133,9 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil) |Credential|Usage |-|- -|[ClientSecretCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ClientSecretCredential)|Authenticate a service principal with a secret +|[ClientAssertionCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity@v1.2.0-beta.2#ClientAssertionCredential)|Authenticate a service principal with a signed client assertion |[ClientCertificateCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ClientCertificateCredential)|Authenticate a service principal with a certificate +|[ClientSecretCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ClientSecretCredential)|Authenticate a service principal with a secret ### Authenticating Users @@ -168,7 +169,8 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil) |-|- |`AZURE_CLIENT_ID`|ID of an Azure Active Directory application |`AZURE_TENANT_ID`|ID of the application's Azure Active Directory tenant -|`AZURE_CLIENT_CERTIFICATE_PATH`|path to a certificate file including private key (without password protection) +|`AZURE_CLIENT_CERTIFICATE_PATH`|path to a certificate file including private key +|`AZURE_CLIENT_CERTIFICATE_PASSWORD`|password of the certificate file, if any #### Username and password diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md index 1e28d181f..affa91d08 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md @@ -88,6 +88,7 @@ azlog.SetEvents(azidentity.EventAuthentication) |---|---|---| |Missing or incomplete environment variable configuration|A valid combination of environment variables wasn't set|Ensure the appropriate environment variables are set for the intended authentication method as described in the [module documentation](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#EnvironmentCredential)| + ## Troubleshoot ClientSecretCredential authentication issues | Error Code | Issue | Mitigation | @@ -96,6 +97,7 @@ azlog.SetEvents(azidentity.EventAuthentication) |AADSTS7000222|An expired client secret was provided.|Create a new client secret using the Azure portal. Details on creating a new client secret are in [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| |AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Azure AD instructions](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).| + ## Troubleshoot ClientCertificateCredential authentication issues | Error Code | Description | Mitigation | @@ -103,12 +105,14 @@ azlog.SetEvents(azidentity.EventAuthentication) |AADSTS700027|Client assertion contains an invalid signature.|Ensure the specified certificate has been uploaded to the application registration as described in [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-1-upload-a-certificate).| |AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Azure AD instructions](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).| + ## Troubleshoot UsernamePasswordCredential authentication issues | Error Code | Issue | Mitigation | |---|---|---| |AADSTS50126|The provided username or password is invalid.|Ensure the username and password provided to the credential constructor are valid.| + ## Troubleshoot ManagedIdentityCredential authentication issues `ManagedIdentityCredential` is designed to work on a variety of Azure hosts support managed identity. Configuration and troubleshooting vary from host to host. The below table lists the Azure hosts that can be assigned a managed identity and are supported by `ManagedIdentityCredential`. @@ -164,6 +168,7 @@ curl "$IDENTITY_ENDPOINT?resource=https://management.core.windows.net&api-versio |---|---|---| |"no azure identity found for request clientID"|The application attempted to authenticate before an identity was assigned to its pod|Verify the pod is labeled correctly. This also occurs when a correctly labeled pod authenticates before the identity is ready. To prevent initialization races, configure NMI to set the Retry-After header in its responses as described in [Pod Identity documentation](https://azure.github.io/aad-pod-identity/docs/configure/feature_flags/#set-retry-after-header-in-nmi-response). + ## Troubleshoot AzureCliCredential authentication issues | Error Message |Description| Mitigation | diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go index 0faee55ef..60c3b9a1e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go @@ -11,7 +11,6 @@ import ( "context" "errors" "io" - "io/ioutil" "net/http" "net/url" "os" @@ -26,9 +25,15 @@ import ( ) const ( - azureAuthorityHost = "AZURE_AUTHORITY_HOST" - azureClientID = "AZURE_CLIENT_ID" - azureRegionalAuthorityName = "AZURE_REGIONAL_AUTHORITY_NAME" + azureAuthorityHost = "AZURE_AUTHORITY_HOST" + azureClientCertificatePassword = "AZURE_CLIENT_CERTIFICATE_PASSWORD" + azureClientCertificatePath = "AZURE_CLIENT_CERTIFICATE_PATH" + azureClientID = "AZURE_CLIENT_ID" + azureClientSecret = "AZURE_CLIENT_SECRET" + azurePassword = "AZURE_PASSWORD" + azureRegionalAuthorityName = "AZURE_REGIONAL_AUTHORITY_NAME" + azureTenantID = "AZURE_TENANT_ID" + azureUsername = "AZURE_USERNAME" organizationsTenantID = "organizations" developerSignOnClientID = "04b07795-8ddb-461a-bbee-02f9e1bf7b46" @@ -36,6 +41,37 @@ const ( tenantIDValidationErr = "invalid tenantID. You can locate your tenantID by following the instructions listed here: https://docs.microsoft.com/partner-center/find-ids-and-domain-names" ) +func getConfidentialClient(clientID, tenantID string, cred confidential.Credential, co *azcore.ClientOptions, additionalOpts ...confidential.Option) (confidential.Client, error) { + if !validTenantID(tenantID) { + return confidential.Client{}, errors.New(tenantIDValidationErr) + } + authorityHost, err := setAuthorityHost(co.Cloud) + if err != nil { + return confidential.Client{}, err + } + o := []confidential.Option{ + confidential.WithAuthority(runtime.JoinPaths(authorityHost, tenantID)), + confidential.WithAzureRegion(os.Getenv(azureRegionalAuthorityName)), + confidential.WithHTTPClient(newPipelineAdapter(co)), + } + o = append(o, additionalOpts...) + return confidential.New(clientID, cred, o...) +} + +func getPublicClient(clientID, tenantID string, co *azcore.ClientOptions) (public.Client, error) { + if !validTenantID(tenantID) { + return public.Client{}, errors.New(tenantIDValidationErr) + } + authorityHost, err := setAuthorityHost(co.Cloud) + if err != nil { + return public.Client{}, err + } + return public.New(clientID, + public.WithAuthority(runtime.JoinPaths(authorityHost, tenantID)), + public.WithHTTPClient(newPipelineAdapter(co)), + ) +} + // setAuthorityHost initializes the authority host for credentials. Precedence is: // 1. cloud.Configuration.ActiveDirectoryAuthorityHost value set by user // 2. value of AZURE_AUTHORITY_HOST @@ -94,7 +130,7 @@ func (p pipelineAdapter) Do(r *http.Request) (*http.Response, error) { if rsc, ok := r.Body.(io.ReadSeekCloser); ok { body = rsc } else { - b, err := ioutil.ReadAll(r.Body) + b, err := io.ReadAll(r.Body) if err != nil { return nil, err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go new file mode 100644 index 000000000..ffcf2094b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go @@ -0,0 +1,74 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" +) + +const credNameAssertion = "ClientAssertionCredential" + +// ClientAssertionCredential authenticates an application with assertions provided by a callback function. +// This credential is for advanced scenarios. ClientCertificateCredential has a more convenient API for +// the most common assertion scenario, authenticating a service principal with a certificate. See +// [Azure AD documentation] for details of the assertion format. +// +// [Azure AD documentation]: https://docs.microsoft.com/azure/active-directory/develop/active-directory-certificate-credentials#assertion-format +type ClientAssertionCredential struct { + client confidentialClient +} + +// ClientAssertionCredentialOptions contains optional parameters for ClientAssertionCredential. +type ClientAssertionCredentialOptions struct { + azcore.ClientOptions +} + +// NewClientAssertionCredential constructs a ClientAssertionCredential. The getAssertion function must be thread safe. Pass nil for options to accept defaults. +func NewClientAssertionCredential(tenantID, clientID string, getAssertion func(context.Context) (string, error), options *ClientAssertionCredentialOptions) (*ClientAssertionCredential, error) { + if getAssertion == nil { + return nil, errors.New("getAssertion must be a function that returns assertions") + } + if options == nil { + options = &ClientAssertionCredentialOptions{} + } + cred := confidential.NewCredFromAssertionCallback( + func(ctx context.Context, _ confidential.AssertionRequestOptions) (string, error) { + return getAssertion(ctx) + }, + ) + c, err := getConfidentialClient(clientID, tenantID, cred, &options.ClientOptions) + if err != nil { + return nil, err + } + return &ClientAssertionCredential{client: c}, nil +} + +// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +func (c *ClientAssertionCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + if len(opts.Scopes) == 0 { + return azcore.AccessToken{}, errors.New(credNameAssertion + ": GetToken() requires at least one scope") + } + ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes) + if err == nil { + logGetTokenSuccess(c, opts) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err + } + + ar, err = c.client.AcquireTokenByCredential(ctx, opts.Scopes) + if err != nil { + return azcore.AccessToken{}, newAuthenticationFailedErrorFromMSALError(credNameAssertion, err) + } + logGetTokenSuccess(c, opts) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +var _ azcore.TokenCredential = (*ClientAssertionCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go index e50157b10..a61d824ef 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go @@ -9,17 +9,12 @@ package azidentity import ( "context" "crypto" - "crypto/rsa" - "crypto/sha1" "crypto/x509" - "encoding/base64" "encoding/pem" "errors" - "os" "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" "golang.org/x/crypto/pkcs12" ) @@ -46,37 +41,18 @@ func NewClientCertificateCredential(tenantID string, clientID string, certs []*x if len(certs) == 0 { return nil, errors.New("at least one certificate is required") } - pk, ok := key.(*rsa.PrivateKey) - if !ok { - return nil, errors.New("'key' must be an *rsa.PrivateKey") - } - if !validTenantID(tenantID) { - return nil, errors.New(tenantIDValidationErr) - } if options == nil { options = &ClientCertificateCredentialOptions{} } - authorityHost, err := setAuthorityHost(options.Cloud) - if err != nil { - return nil, err - } - cert, err := newCertContents(certs, pk, options.SendCertificateChain) + cred, err := confidential.NewCredFromCertChain(certs, key) if err != nil { return nil, err } - cred := confidential.NewCredFromCert(cert.c, key) // TODO: NewCredFromCert should take a slice - if err != nil { - return nil, err - } - o := []confidential.Option{ - confidential.WithAuthority(runtime.JoinPaths(authorityHost, tenantID)), - confidential.WithHTTPClient(newPipelineAdapter(&options.ClientOptions)), - confidential.WithAzureRegion(os.Getenv(azureRegionalAuthorityName)), - } + var o []confidential.Option if options.SendCertificateChain { o = append(o, confidential.WithX5C()) } - c, err := confidential.New(clientID, cred, o...) + c, err := getConfidentialClient(clientID, tenantID, cred, &options.ClientOptions, o...) if err != nil { return nil, err } @@ -156,36 +132,6 @@ func ParseCertificates(certData []byte, password []byte) ([]*x509.Certificate, c return certs, pk, nil } -type certContents struct { - c *x509.Certificate // the signing cert - fp []byte // the signing cert's fingerprint, a SHA-1 digest - pk *rsa.PrivateKey // the signing key - x5c []string // concatenation of every provided cert, base64 encoded -} - -func newCertContents(certs []*x509.Certificate, key *rsa.PrivateKey, sendCertificateChain bool) (*certContents, error) { - cc := certContents{pk: key} - // need the the signing cert's fingerprint: identify that cert by matching its public key to the private key - for _, cert := range certs { - certKey, ok := cert.PublicKey.(*rsa.PublicKey) - if ok && key.E == certKey.E && key.N.Cmp(certKey.N) == 0 { - fp := sha1.Sum(cert.Raw) - cc.fp = fp[:] - cc.c = cert - if sendCertificateChain { - // signing cert must be first in x5c - cc.x5c = append([]string{base64.StdEncoding.EncodeToString(cert.Raw)}, cc.x5c...) - } - } else if sendCertificateChain { - cc.x5c = append(cc.x5c, base64.StdEncoding.EncodeToString(cert.Raw)) - } - } - if len(cc.fp) == 0 || cc.c == nil { - return nil, errors.New("found no certificate matching 'key'") - } - return &cc, nil -} - func loadPEMCert(certData []byte) ([]*pem.Block, error) { blocks := []*pem.Block{} for { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go index 6ecb8f4db..1c3a51660 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go @@ -9,11 +9,9 @@ package azidentity import ( "context" "errors" - "os" "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" ) @@ -31,25 +29,14 @@ type ClientSecretCredential struct { // NewClientSecretCredential constructs a ClientSecretCredential. Pass nil for options to accept defaults. func NewClientSecretCredential(tenantID string, clientID string, clientSecret string, options *ClientSecretCredentialOptions) (*ClientSecretCredential, error) { - if !validTenantID(tenantID) { - return nil, errors.New(tenantIDValidationErr) - } if options == nil { options = &ClientSecretCredentialOptions{} } - authorityHost, err := setAuthorityHost(options.Cloud) - if err != nil { - return nil, err - } cred, err := confidential.NewCredFromSecret(clientSecret) if err != nil { return nil, err } - c, err := confidential.New(clientID, cred, - confidential.WithAuthority(runtime.JoinPaths(authorityHost, tenantID)), - confidential.WithHTTPClient(newPipelineAdapter(&options.ClientOptions)), - confidential.WithAzureRegion(os.Getenv(azureRegionalAuthorityName)), - ) + c, err := getConfidentialClient(clientID, tenantID, cred, &options.ClientOptions) if err != nil { return nil, err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go index 7358558ac..c2b801c4a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go @@ -31,9 +31,11 @@ type DefaultAzureCredentialOptions struct { // DefaultAzureCredential is a default credential chain for applications that will deploy to Azure. // It combines credentials suitable for deployment with credentials suitable for local development. // It attempts to authenticate with each of these credential types, in the following order, stopping when one provides a token: -// EnvironmentCredential -// ManagedIdentityCredential -// AzureCLICredential +// +// EnvironmentCredential +// ManagedIdentityCredential +// AzureCLICredential +// // Consult the documentation for these credential types for more information on how they authenticate. // Once a credential has successfully authenticated, DefaultAzureCredential will use that credential for // every subsequent authentication. @@ -65,7 +67,7 @@ func NewDefaultAzureCredential(options *DefaultAzureCredentialOptions) (*Default msiCred, err := NewManagedIdentityCredential(o) if err == nil { creds = append(creds, msiCred) - msiCred.client.imdsTimeout = time.Second + msiCred.mic.imdsTimeout = time.Second } else { errorMessages = append(errorMessages, credNameManagedIdentity+": "+err.Error()) creds = append(creds, &defaultCredentialErrorReporter{credType: credNameManagedIdentity, err: err}) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go index d0c72c348..2e9b5438d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go @@ -13,7 +13,6 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" ) @@ -79,17 +78,7 @@ func NewDeviceCodeCredential(options *DeviceCodeCredentialOptions) (*DeviceCodeC cp = *options } cp.init() - if !validTenantID(cp.TenantID) { - return nil, errors.New(tenantIDValidationErr) - } - authorityHost, err := setAuthorityHost(cp.Cloud) - if err != nil { - return nil, err - } - c, err := public.New(cp.ClientID, - public.WithAuthority(runtime.JoinPaths(authorityHost, cp.TenantID)), - public.WithHTTPClient(newPipelineAdapter(&cp.ClientOptions)), - ) + c, err := getPublicClient(cp.ClientID, cp.TenantID, &cp.ClientOptions) if err != nil { return nil, err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go index 16c595d1d..b1871b4d4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go @@ -28,7 +28,7 @@ type EnvironmentCredentialOptions struct { // EnvironmentCredential authenticates a service principal with a secret or certificate, or a user with a password, depending // on environment variable configuration. It reads configuration from these variables, in the following order: // -// Service principal with client secret +// # Service principal with client secret // // AZURE_TENANT_ID: ID of the service principal's tenant. Also called its "directory" ID. // @@ -36,15 +36,17 @@ type EnvironmentCredentialOptions struct { // // AZURE_CLIENT_SECRET: one of the service principal's client secrets // -// Service principal with certificate +// # Service principal with certificate // // AZURE_TENANT_ID: ID of the service principal's tenant. Also called its "directory" ID. // // AZURE_CLIENT_ID: the service principal's client ID // -// AZURE_CLIENT_CERTIFICATE_PATH: path to a PEM or PKCS12 certificate file including the unencrypted private key. +// AZURE_CLIENT_CERTIFICATE_PATH: path to a PEM or PKCS12 certificate file including the private key. // -// User with username and password +// AZURE_CLIENT_CERTIFICATE_PASSWORD: (optional) password for the certificate file. +// +// # User with username and password // // AZURE_TENANT_ID: (optional) tenant to authenticate in. Defaults to "organizations". // @@ -62,7 +64,7 @@ func NewEnvironmentCredential(options *EnvironmentCredentialOptions) (*Environme if options == nil { options = &EnvironmentCredentialOptions{} } - tenantID := os.Getenv("AZURE_TENANT_ID") + tenantID := os.Getenv(azureTenantID) if tenantID == "" { return nil, errors.New("missing environment variable AZURE_TENANT_ID") } @@ -70,7 +72,7 @@ func NewEnvironmentCredential(options *EnvironmentCredentialOptions) (*Environme if clientID == "" { return nil, errors.New("missing environment variable " + azureClientID) } - if clientSecret := os.Getenv("AZURE_CLIENT_SECRET"); clientSecret != "" { + if clientSecret := os.Getenv(azureClientSecret); clientSecret != "" { log.Write(EventAuthentication, "EnvironmentCredential will authenticate with ClientSecretCredential") o := &ClientSecretCredentialOptions{ClientOptions: options.ClientOptions} cred, err := NewClientSecretCredential(tenantID, clientID, clientSecret, o) @@ -79,13 +81,17 @@ func NewEnvironmentCredential(options *EnvironmentCredentialOptions) (*Environme } return &EnvironmentCredential{cred: cred}, nil } - if certPath := os.Getenv("AZURE_CLIENT_CERTIFICATE_PATH"); certPath != "" { + if certPath := os.Getenv(azureClientCertificatePath); certPath != "" { log.Write(EventAuthentication, "EnvironmentCredential will authenticate with ClientCertificateCredential") certData, err := os.ReadFile(certPath) if err != nil { return nil, fmt.Errorf(`failed to read certificate file "%s": %v`, certPath, err) } - certs, key, err := ParseCertificates(certData, nil) + var password []byte + if v := os.Getenv(azureClientCertificatePassword); v != "" { + password = []byte(v) + } + certs, key, err := ParseCertificates(certData, password) if err != nil { return nil, fmt.Errorf(`failed to load certificate from "%s": %v`, certPath, err) } @@ -99,8 +105,8 @@ func NewEnvironmentCredential(options *EnvironmentCredentialOptions) (*Environme } return &EnvironmentCredential{cred: cred}, nil } - if username := os.Getenv("AZURE_USERNAME"); username != "" { - if password := os.Getenv("AZURE_PASSWORD"); password != "" { + if username := os.Getenv(azureUsername); username != "" { + if password := os.Getenv(azurePassword); password != "" { log.Write(EventAuthentication, "EnvironmentCredential will authenticate with UsernamePasswordCredential") o := &UsernamePasswordCredentialOptions{ClientOptions: options.ClientOptions} cred, err := NewUsernamePasswordCredential(tenantID, clientID, username, password, o) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go index c60d13d00..6695f1b70 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go @@ -75,6 +75,22 @@ func (e *AuthenticationFailedError) Error() string { fmt.Fprint(msg, "Response contained no body") } fmt.Fprintln(msg, "\n--------------------------------------------------------------------------------") + var anchor string + switch e.credType { + case credNameAzureCLI: + anchor = "azure-cli" + case credNameCert: + anchor = "client-cert" + case credNameSecret: + anchor = "client-secret" + case credNameManagedIdentity: + anchor = "managed-id" + case credNameUserPassword: + anchor = "username-password" + } + if anchor != "" { + fmt.Fprintf(msg, "To troubleshoot, visit https://aka.ms/azsdk/go/identity/troubleshoot#%s", anchor) + } return msg.String() } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go index e4aaf45b6..9032ae988 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go @@ -12,7 +12,6 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" ) @@ -56,17 +55,7 @@ func NewInteractiveBrowserCredential(options *InteractiveBrowserCredentialOption cp = *options } cp.init() - if !validTenantID(cp.TenantID) { - return nil, errors.New(tenantIDValidationErr) - } - authorityHost, err := setAuthorityHost(cp.Cloud) - if err != nil { - return nil, err - } - c, err := public.New(cp.ClientID, - public.WithAuthority(runtime.JoinPaths(authorityHost, cp.TenantID)), - public.WithHTTPClient(newPipelineAdapter(&cp.ClientOptions)), - ) + c, err := getPublicClient(cp.ClientID, cp.TenantID, &cp.ClientOptions) if err != nil { return nil, err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go index ce6e1e614..c9b72663c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go @@ -11,7 +11,6 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" "net/http" "net/url" "os" @@ -24,6 +23,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" ) const ( @@ -149,10 +149,18 @@ func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*manag return &c, nil } -// authenticate creates an authentication request for a Managed Identity and returns the resulting Access Token if successful. -// ctx: The current context for controlling the request lifetime. -// clientID: The client (application) ID of the service principal. -// scopes: The scopes required for the token. +// provideToken acquires a token for MSAL's confidential.Client, which caches the token +func (c *managedIdentityClient) provideToken(ctx context.Context, params confidential.TokenProviderParameters) (confidential.TokenProviderResult, error) { + result := confidential.TokenProviderResult{} + tk, err := c.authenticate(ctx, c.id, params.Scopes) + if err == nil { + result.AccessToken = tk.Token + result.ExpiresInSeconds = int(time.Until(tk.ExpiresOn).Seconds()) + } + return result, err +} + +// authenticate acquires an access token func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKind, scopes []string) (azcore.AccessToken, error) { var cancel context.CancelFunc if c.imdsTimeout > 0 && c.msiType == msiTypeIMDS { @@ -338,7 +346,7 @@ func (c *managedIdentityClient) getAzureArcSecretKey(ctx context.Context, resour if pos == -1 { return "", fmt.Errorf("did not receive a correct value from WWW-Authenticate header: %s", header) } - key, err := ioutil.ReadFile(header[pos+1:]) + key, err := os.ReadFile(header[pos+1:]) if err != nil { return "", fmt.Errorf("could not read file (%s) contents: %v", header[pos+1:], err) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go index f17ada1c3..18078171e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go @@ -14,6 +14,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" ) const credNameManagedIdentity = "ManagedIdentityCredential" @@ -70,8 +71,8 @@ type ManagedIdentityCredentialOptions struct { // user-assigned identity. See Azure Active Directory documentation for more information about managed identities: // https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview type ManagedIdentityCredential struct { - id ManagedIDKind - client *managedIdentityClient + client confidentialClient + mic *managedIdentityClient } // NewManagedIdentityCredential creates a ManagedIdentityCredential. Pass nil to accept default options. @@ -79,11 +80,25 @@ func NewManagedIdentityCredential(options *ManagedIdentityCredentialOptions) (*M if options == nil { options = &ManagedIdentityCredentialOptions{} } - client, err := newManagedIdentityClient(options) + mic, err := newManagedIdentityClient(options) if err != nil { return nil, err } - return &ManagedIdentityCredential{id: options.ID, client: client}, nil + cred := confidential.NewCredFromTokenProvider(mic.provideToken) + if err != nil { + return nil, err + } + // It's okay to give MSAL an invalid client ID because MSAL will use it only as part of a cache key. + // ManagedIdentityClient handles all the details of authentication and won't receive this value from MSAL. + clientID := "SYSTEM-ASSIGNED-MANAGED-IDENTITY" + if options.ID != nil { + clientID = options.ID.String() + } + c, err := confidential.New(clientID, cred) + if err != nil { + return nil, err + } + return &ManagedIdentityCredential{client: c, mic: mic}, nil } // GetToken requests an access token from the hosting environment. This method is called automatically by Azure SDK clients. @@ -94,12 +109,17 @@ func (c *ManagedIdentityCredential) GetToken(ctx context.Context, opts policy.To } // managed identity endpoints require an AADv1 resource (i.e. token audience), not a v2 scope, so we remove "/.default" here scopes := []string{strings.TrimSuffix(opts.Scopes[0], defaultSuffix)} - tk, err := c.client.authenticate(ctx, c.id, scopes) + ar, err := c.client.AcquireTokenSilent(ctx, scopes) + if err == nil { + logGetTokenSuccess(c, opts) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, nil + } + ar, err = c.client.AcquireTokenByCredential(ctx, scopes) if err != nil { return azcore.AccessToken{}, err } logGetTokenSuccess(c, opts) - return tk, err + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err } var _ azcore.TokenCredential = (*ManagedIdentityCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go index 8b02e7b47..2ab248c3c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go @@ -12,7 +12,6 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" ) @@ -37,20 +36,10 @@ type UsernamePasswordCredential struct { // NewUsernamePasswordCredential creates a UsernamePasswordCredential. clientID is the ID of the application the user // will authenticate to. Pass nil for options to accept defaults. func NewUsernamePasswordCredential(tenantID string, clientID string, username string, password string, options *UsernamePasswordCredentialOptions) (*UsernamePasswordCredential, error) { - if !validTenantID(tenantID) { - return nil, errors.New(tenantIDValidationErr) - } if options == nil { options = &UsernamePasswordCredentialOptions{} } - authorityHost, err := setAuthorityHost(options.Cloud) - if err != nil { - return nil, err - } - c, err := public.New(clientID, - public.WithAuthority(runtime.JoinPaths(authorityHost, tenantID)), - public.WithHTTPClient(newPipelineAdapter(&options.ClientOptions)), - ) + c, err := getPublicClient(clientID, tenantID, &options.ClientOptions) if err != nil { return nil, err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go index 0fb125ace..9757589d1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go @@ -11,5 +11,5 @@ const ( component = "azidentity" // Version is the semantic version (see http://semver.org) of this module. - version = "v1.1.0" + version = "v1.2.0" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go index 1fdc53615..245af7d2b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go @@ -15,7 +15,7 @@ import ( // Caller returns the file and line number of a frame on the caller's stack. // If the funtion fails an empty string is returned. // skipFrames - the number of frames to skip when determining the caller. -// Passing a value of 0 will return the immediate caller of this function. +// Passing a value of 0 will return the immediate caller of this function. func Caller(skipFrames int) string { if pc, file, line, ok := runtime.Caller(skipFrames + 1); ok { // the skipFrames + 1 is to skip ourselves diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go index b23f3860c..238ef42ed 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go @@ -49,9 +49,12 @@ func (er *Resource[TResource, TState]) Get(state TState) (TResource, error) { const window = 5 * time.Minute // This example updates the resource 5 minutes prior to expiration const backoff = 30 * time.Second // Minimum wait time between eager update attempts - now, acquire, expired, resource := time.Now(), false, false, er.resource + now, acquire, expired := time.Now(), false, false + // acquire exclusive lock er.cond.L.Lock() + resource := er.resource + for { expired = er.expiration.IsZero() || er.expiration.Before(now) if expired { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md index db095b3a2..2c2008679 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md @@ -1,26 +1,59 @@ # Release History +## 0.5.1 (2022-10-11) + +### Bugs Fixed + +* `GetSASURL()`: for container and blob clients, don't add a forward slash before the query string +* Fixed issue [#19249](https://github.com/Azure/azure-sdk-for-go/issues/19249) by increasing service version to '2020-02-10'. + +### Other Changes + +* Improved docs for client constructors. +* Updating azcore version to 1.1.4 + +## 0.5.0 (2022-09-29) + +### Breaking Changes + +* Complete architectural change for better user experience. Please view the [README](https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob#readme) + +### Features Added + +* Added [UserDelegationCredential](https://learn.microsoft.com/rest/api/storageservices/create-user-delegation-sas) which resolves [#18976](https://github.com/Azure/azure-sdk-for-go/issues/18976), [#16916](https://github.com/Azure/azure-sdk-for-go/issues/16916), [#18977](https://github.com/Azure/azure-sdk-for-go/issues/18977) +* Added [Restore Container API](https://learn.microsoft.com/rest/api/storageservices/restore-container). + +### Bugs Fixed + +* Fixed issue [#18767](https://github.com/Azure/azure-sdk-for-go/issues/18767) +* Fix deadlock when error writes are slow [#16937](https://github.com/Azure/azure-sdk-for-go/pull/16937) + ## 0.4.1 (2022-05-12) ### Other Changes + * Updated to latest `azcore` and `internal` modules ## 0.4.0 (2022-04-19) ### Breaking Changes + * Fixed Issue #17150 : Renaming/refactoring high level methods. * Fixed Issue #16972 : Constructors should return clients by reference. -* Renaming the options bags to match the naming convention same as that of response. The behaviour of options bags remains the same. +* Renaming the options bags to match the naming convention same as that of response. The behaviour of options bags + remains the same. ### Bugs Fixed + * Fixed Issue #17515 : SetTags options bag missing leaseID. * Fixed Issue #17423 : Drop "Type" suffix from `GeoReplicationStatusType`. * Fixed Issue #17335 : Nil pointer exception when passing nil options bag in `ListBlobsFlat` API call. * Fixed Issue #17188 : `BlobURLParts` not supporting VersionID -* Fixed Issue #17152 , Issue #17131 , Issue #17061 : `UploadStreamToBlockBlob` / `UploadStreamToBlockBlob` methods ignoring the options bag. +* Fixed Issue #17152 , Issue #17131 , Issue #17061 : `UploadStreamToBlockBlob` / `UploadStreamToBlockBlob` methods + ignoring the options bag. * Fixed Issue #16920 : Fixing error handling example. * Fixed Issue #16786 : Refactoring of autorest code generation definition and adding necessary transformations. -* Fixed Issue #16679 : Response parsing issue in List blobs API. +* Fixed Issue #16679 : Response parsing issue in List blobs API. ## 0.3.0 (2022-02-09) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md index 32a10a005..467fe36cd 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md @@ -1,397 +1,274 @@ # Azure Blob Storage SDK for Go -## Introduction +> Server Version: 2020-10-02 -The Microsoft Azure Storage SDK for Go allows you to build applications that takes advantage of Azure's scalable cloud -storage. This is the new beta client module for Azure Blob Storage, which follows -our [Azure SDK Design Guidelines for Go](https://azure.github.io/azure-sdk/golang_introduction.html) and replaces the -previous beta [azblob package](https://github.com/azure/azure-storage-blob-go). +Azure Blob storage is Microsoft's object storage solution for the cloud. Blob +storage is optimized for storing massive amounts of unstructured data. +Unstructured data is data that does not adhere to a particular data model or +definition, such as text or binary data. -## Getting Started +[Source code][source] | [API reference documentation][docs] | [REST API documentation][rest_docs] | [Product documentation][product_docs] -The Azure Blob SDK can access an Azure Storage account. - -### Prerequisites - -* Go versions 1.18 or higher -* You must have an [Azure storage account][azure_storage_account]. If you need to create one, you can use - the [Azure Cloud Shell](https://shell.azure.com/bash) to create one with these commands (replace `my-resource-group` - and `mystorageaccount` with your own unique names): - (Optional) if you want a new resource group to hold the Storage Account: - ``` - az group create --name my-resource-group --location westus2 - ``` - Create the storage account: - ``` - az storage account create --resource-group my-resource-group --name mystorageaccount - ``` - - The storage account name can be queried with: - ``` - az storage account show -n mystorageaccount -g my-resource-group --query "primaryEndpoints.blob" - ``` - You can set this as an environment variable with: - ```bash - # PowerShell - $ENV:AZURE_STORAGE_ACCOUNT_NAME="mystorageaccount" - # bash - export AZURE_STORAGE_ACCOUNT_NAME="mystorageaccount" - ``` - - Query your storage account keys: - ``` - az storage account keys list --resource-group my-resource-group -n mystorageaccount - ``` - - Output: - ```json - [ - { - "creationTime": "2022-02-07T17:18:44.088870+00:00", - "keyName": "key1", - "permissions": "FULL", - "value": "..." - }, - { - "creationTime": "2022-02-07T17:18:44.088870+00:00", - "keyName": "key2", - "permissions": "FULL", - "value": "..." - } - ] - ``` - - ```bash - # PowerShell - $ENV:AZURE_STORAGE_ACCOUNT_KEY="" - # Bash - export AZURE_STORAGE_ACCOUNT_KEY="" - ``` - > You can obtain your account key from the Azure Portal under the "Access Keys" section on the left-hand pane of your storage account. - -#### Create account - -* To create a new Storage account, you can use [Azure Portal][azure_portal_create_account] - , [Azure PowerShell][azure_powershell_create_account], or [Azure CLI][azure_cli_create_account]. +## Getting started ### Install the package -* Install the Azure Blob Storage client module for Go with `go get`: +Install the Azure Blob Storage SDK for Go with [go get][goget]: -```bash +```Powershell go get github.com/Azure/azure-sdk-for-go/sdk/storage/azblob ``` -> Optional: If you are going to use AAD authentication, install the `azidentity` package: - -```bash +If you're going to authenticate with Azure Active Directory (recommended), install the [azidentity][azidentity] module. +```Powershell go get github.com/Azure/azure-sdk-for-go/sdk/azidentity ``` -#### Create the client - -`azblob` allows you to interact with three types of resources :- - -* [Azure storage accounts][azure_storage_account]. -* [Containers](https://azure.microsoft.com/en-in/overview/what-is-a-container/#overview) within those storage accounts. -* [Blobs](https://azure.microsoft.com/en-in/services/storage/blobs/#overview) (block blobs/ page blobs/ append blobs) - within those containers. +### Prerequisites -Interaction with these resources starts with an instance of a [client](#clients). To create a client object, you will -need the account's blob service endpoint URL and a credential that allows you to access the account. The `endpoint` can -be found on the page for your storage account in the [Azure Portal][azure_portal_account_url] under the "Access Keys" -section or by running the following Azure CLI command: +A supported [Go][godevdl] version (the Azure SDK supports the two most recent Go releases). -```bash -# Get the blob service URL for the account -az storage account show -n mystorageaccount -g my-resource-group --query "primaryEndpoints.blob" -``` +You need an [Azure subscription][azure_sub] and a +[Storage Account][storage_account_docs] to use this package. -Once you have the account URL, it can be used to create the service client: +To create a new Storage Account, you can use the [Azure Portal][storage_account_create_portal], +[Azure PowerShell][storage_account_create_ps], or the [Azure CLI][storage_account_create_cli]. +Here's an example using the Azure CLI: -```golang -cred, err := azblob.NewSharedKeyCredential("myAccountName", "myAccountKey") -handle(err) -serviceClient, err := azblob.NewServiceClientWithSharedKey("https://.blob.core.windows.net/", cred, nil) -handle(err) +```Powershell +az storage account create --name MyStorageAccount --resource-group MyResourceGroup --location westus --sku Standard_LRS ``` -For more information about blob service URL's and how to configure custom domain names for Azure Storage check out -the [official documentation][azure_portal_account_url] - -#### Types of credentials +### Authenticate the client -The azblob clients support authentication via Shared Key Credential, Connection String, Shared Access Signature, or any -of the `azidentity` types that implement the `azcore.TokenCredential` interface. +In order to interact with the Azure Blob Storage service, you'll need to create an instance of the `azblob.Client` type. The [azidentity][azidentity] module makes it easy to add Azure Active Directory support for authenticating Azure SDK clients with their corresponding Azure services. -##### 1. Creating the client from a shared key - -To use an account [shared key][azure_shared_key] (aka account key or access key), provide the key as a string. This can -be found in your storage account in the [Azure Portal][azure_portal_account_url] under the "Access Keys" section or by -running the following Azure CLI command: +```go +// create a credential for authenticating with Azure Active Directory +cred, err := azidentity.NewDefaultAzureCredential(nil) +// TODO: handle err -```bash -az storage account keys list -g my-resource-group -n mystorageaccount +// create an azblob.Client for the specified storage account that uses the above credential +client, err := azblob.NewClient("https://MYSTORAGEACCOUNT.blob.core.windows.net/", cred, nil) +// TODO: handle err ``` -Use Shared Key authentication as the credential parameter to authenticate the client: +Learn more about enabling Azure Active Directory for authentication with Azure Storage in [our documentation][storage_ad] and [our samples](#next-steps). -```golang -credential, err := azblob.NewSharedKeyCredential("accountName", "accountKey") -handle(err) -serviceClient, err := azblob.NewServiceClientWithSharedKey("https://.blob.core.windows.net/", credential, nil) -handle(err) -``` +## Key concepts -##### 2. Creating the client from a connection string +Blob storage is designed for: -You can use connection string, instead of providing the account URL and credential separately, for authentication as -well. To do this, pass the connection string to the client's `NewServiceClientFromConnectionString` method. The -connection string can be found in your storage account in the [Azure Portal][azure_portal_account_url] under the "Access -Keys" section or with the following Azure CLI command: +- Serving images or documents directly to a browser. +- Storing files for distributed access. +- Streaming video and audio. +- Writing to log files. +- Storing data for backup and restore, disaster recovery, and archiving. +- Storing data for analysis by an on-premises or Azure-hosted service. -```bash -az storage account show-connection-string -g my-resource-group -n mystorageaccount -``` +Blob storage offers three types of resources: -```golang -connStr := "DefaultEndpointsProtocol=https;AccountName=;AccountKey=;EndpointSuffix=core.windows.net" -serviceClient, err := azblob.NewServiceClientFromConnectionString(connStr, nil) -``` +- The _storage account_ +- One or more _containers_ in a storage account +- One ore more _blobs_ in a container -##### 3. Creating the client from a SAS token - -To use a [shared access signature (SAS) token][azure_sas_token], provide the token as a string. You can generate a SAS -token from the Azure Portal -under [Shared access signature](https://docs.microsoft.com/rest/api/storageservices/create-service-sas) or use -the `ServiceClient.GetSASToken` or `ContainerClient.GetSASToken()` methods. - -```golang -credential, err := azblob.NewSharedKeyCredential("accountName", "accountKey") -handle(err) -serviceClient, err := azblob.NewServiceClientWithSharedKey(fmt.Sprintf("https://%s.blob.core.windows.net/", accountName), credential, nil) -handle(err) -// Provide the convenience function with relevant info (services, resource types, permissions, and duration) -// The SAS token will be valid from this moment onwards. -accountSAS, err := serviceClient.GetSASToken(AccountSASResourceTypes{Object: true, Service: true, Container: true}, -AccountSASPermissions{Read: true, List: true}, AccountSASServices{Blob: true}, time.Now(), time.Now().Add(48*time.Hour)) -handle(err) -sasURL := fmt.Sprintf("https://%s.blob.core.windows.net/?%s", accountName, accountSAS) - -// The sasURL can be used to authenticate a client without need for a credential -serviceClient, err = NewServiceClientWithNoCredential(sasURL, nil) -handle(err) -``` +Instances of the `azblob.Client` type provide methods for manipulating containers and blobs within a storage account. +The storage account is specified when the `azblob.Client` is constructed. +Use the appropriate client constructor function for the authentication mechanism you wish to use. -### Clients +Learn more about options for authentication _(including Connection Strings, Shared Key, Shared Access Signatures (SAS), Azure Active Directory (AAD), and anonymous public access)_ [in our examples.](https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/storage/azblob/examples_test.go) -Three different clients are provided to interact with the various components of the Blob Service: +### Goroutine safety +We guarantee that all client instance methods are goroutine-safe and independent of each other ([guideline](https://azure.github.io/azure-sdk/golang_introduction.html#thread-safety)). This ensures that the recommendation of reusing client instances is always safe, even across goroutines. -1. **`ServiceClient`** - * Get and set account settings. - * Query, create, and delete containers within the account. +### About blob metadata +Blob metadata name/value pairs are valid HTTP headers and should adhere to all restrictions governing HTTP headers. Metadata names must be valid HTTP header names, may contain only ASCII characters, and should be treated as case-insensitive. Base64-encode or URL-encode metadata values containing non-ASCII characters. -2. **`ContainerClient`** - * Get and set container access settings, properties, and metadata. - * Create, delete, and query blobs within the container. - * `ContainerLeaseClient` to support container lease management. +### Additional concepts + +[Client options](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy#ClientOptions) | +[Accessing the response](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime#WithCaptureResponse) | +[Handling failures](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore#ResponseError) | +[Logging](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore/log) + -3. **`BlobClient`** - * `AppendBlobClient`, `BlockBlobClient`, and `PageBlobClient` - * Get and set blob properties. - * Perform CRUD operations on a given blob. - * `BlobLeaseClient` to support blob lease management. +## Examples -### Example +### Uploading a blob ```go -// Use your storage account's name and key to create a credential object, used to access your account. -// You can obtain these details from the Azure Portal. -accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") -if !ok { - handle(errors.New("AZURE_STORAGE_ACCOUNT_NAME could not be found")) -} - -accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") -if !ok { - handle(errors.New("AZURE_STORAGE_ACCOUNT_KEY could not be found")) -} -cred, err := NewSharedKeyCredential(accountName, accountKey) -handle(err) - -// Open up a service client. -// You'll need to specify a service URL, which for blob endpoints usually makes up the syntax http(s)://.blob.core.windows.net/ -service, err := NewServiceClientWithSharedKey(fmt.Sprintf("https://%s.blob.core.windows.net/", accountName), cred, nil) -handle(err) - -// All operations in the Azure Blob Storage SDK for Go operate on a context.Context, allowing you to control cancellation/timeout. -ctx := context.Background() // This example has no expiry. - -// This example showcases several common operations to help you get started, such as: - -// ===== 1. Creating a container ===== - -// First, branch off of the service client and create a container client. -container := service.NewContainerClient("mycontainer") - -// Then, fire off a create operation on the container client. -// Note that, all service-side requests have an options bag attached, allowing you to specify things like metadata, public access types, etc. -// Specifying nil omits all options. -_, err = container.Create(ctx, nil) -handle(err) - -// ===== 2. Uploading/downloading a block blob ===== -// We'll specify our data up-front, rather than reading a file for simplicity's sake. -data := "Hello world!" - -// Branch off of the container into a block blob client -blockBlob := container.NewBlockBlobClient("HelloWorld.txt") - -// Upload data to the block blob -_, err = blockBlob.Upload(ctx, NopCloser(strings.NewReader(data)), nil) -handle(err) - -// Download the blob's contents and ensure that the download worked properly -get, err := blockBlob.Download(ctx, nil) -handle(err) - -// Open a buffer, reader, and then download! -downloadedData := &bytes.Buffer{} -// RetryReaderOptions has a lot of in-depth tuning abilities, but for the sake of simplicity, we'll omit those here. -reader := get.Body(RetryReaderOptions{}) -_, err = downloadedData.ReadFrom(reader) -handle(err) -err = reader.Close() -handle(err) -if data != downloadedData.String() { - handle(errors.New("downloaded data doesn't match uploaded data")) -} +const ( + account = "https://MYSTORAGEACCOUNT.blob.core.windows.net/" + containerName = "sample-container" + blobName = "sample-blob" + sampleFile = "path/to/sample/file" +) + +// authenticate with Azure Active Directory +cred, err := azidentity.NewDefaultAzureCredential(nil) +// TODO: handle error + +// create a client for the specified storage account +client, err := azblob.NewClient(account, cred, nil) +// TODO: handle error + +// open the file for reading +file, err := os.OpenFile(sampleFile, os.O_RDONLY, 0) +// TODO: handle error +defer file.Close() + +// upload the file to the specified container with the specified blob name +_, err = client.UploadFile(context.TODO(), containerName, blobName, file, nil) +// TODO: handle error +``` -// ===== 3. list blobs ===== -// The ListBlobs and ListContainers APIs return two channels, a values channel, and an errors channel. -// You should enumerate on a range over the values channel, and then check the errors channel, as only ONE value will ever be passed to the errors channel. -// The AutoPagerTimeout defines how long it will wait to place into the items channel before it exits & cleans itself up. A zero time will result in no timeout. -pager := container.ListBlobsFlat(nil) +### Downloading a blob -for pager.NextPage(ctx) { - resp := pager.PageResponse() +```go +// this example accesses a public blob via anonymous access, so no credentials are required +client, err := azblob.NewClientWithNoCredential("https://azurestoragesamples.blob.core.windows.net/", nil) +// TODO: handle error + +// create or open a local file where we can download the blob +file, err := os.Create("cloud.jpg") +// TODO: handle error +defer file.Close() + +// download the blob +_, err = client.DownloadFile(context.TODO(), "samples", "cloud.jpg", file, nil) +// TODO: handle error +``` - for _, v := range resp.ContainerListBlobFlatSegmentResult.Segment.BlobItems { - fmt.Println(*v.Name) - } -} +### Enumerating blobs -if err = pager.Err(); err != nil { - handle(err) +```go +const ( + account = "https://MYSTORAGEACCOUNT.blob.core.windows.net/" + containerName = "sample-container" +) + +// authenticate with Azure Active Directory +cred, err := azidentity.NewDefaultAzureCredential(nil) +// TODO: handle error + +// create a client for the specified storage account +client, err := azblob.NewClient(account, cred, nil) +// TODO: handle error + +// blob listings are returned across multiple pages +pager := client.NewListBlobsFlatPager(containerName, nil) + +// continue fetching pages until no more remain +for pager.More() { + // advance to the next page + page, err := pager.NextPage(context.TODO()) + // TODO: handle error + + // print the blob names for this page + for _, blob := range page.Segment.BlobItems { + fmt.Println(*blob.Name) + } } - -// Delete the blob we created earlier. -_, err = blockBlob.Delete(ctx, nil) -handle(err) - -// Delete the container we created earlier. -_, err = container.Delete(ctx, nil) -handle(err) ``` ## Troubleshooting -### Error Handling - -All I/O operations will return an `error` that can be investigated to discover more information about the error. In -addition, you can investigate the raw response of any response object: +All Blob service operations will return an +[*azcore.ResponseError][azcore_response_error] on failure with a +populated `ErrorCode` field. Many of these errors are recoverable. +The [bloberror][blob_error] package provides the possible Storage error codes +along with various helper facilities for error handling. -```golang -var storageErr *azblob.StorageError -resp, err := serviceClient.CreateContainer(context.Background(), "testcontainername", nil) -if err != nil && errors.As(err, &storageErr) { - // do something with storageErr.Response() +```go +const ( + connectionString = "" + containerName = "sample-container" +) + +// create a client with the provided connection string +client, err := azblob.NewClientFromConnectionString(connectionString, nil) +// TODO: handle error + +// try to delete the container, avoiding any potential race conditions with an in-progress or completed deletion +_, err = client.DeleteContainer(context.TODO(), containerName, nil) + +if bloberror.HasCode(err, bloberror.ContainerBeingDeleted, bloberror.ContainerNotFound) { + // ignore any errors if the container is being deleted or already has been deleted +} else if err != nil { + // TODO: some other error } ``` -### Logging - -This module uses the classification based logging implementation in azcore. To turn on logging -set `AZURE_SDK_GO_LOGGING` to `all`. +## Next steps -If you only want to include logs for `azblob`, you must create your own logger and set the log classification -as `LogCredential`. +Get started with our [Blob samples][samples]. They contain complete examples of the above snippets and more. -To obtain more detailed logging, including request/response bodies and header values, make sure to leave the logger as -default or enable the `LogRequest` and/or `LogResponse` classificatons. A logger that only includes credential logs can -be like the following: +### Specialized clients -```golang -import azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" -// Set log to output to the console -azlog.SetListener(func (cls azlog.Classification, msg string) { - fmt.Println(msg) // printing log out to the console -}) +The Azure Blob Storage SDK for Go also provides specialized clients in various subpackages. +Use these clients when you need to interact with a specific kind of blob. +Learn more about the various types of blobs from the following links. -// Includes only requests and responses in credential logs -azlog.SetClassifications(azlog.Request, azlog.Response) -``` +- [appendblob][append_blob] - [REST docs](https://docs.microsoft.com/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs#about-append-blobs) +- [blockblob][block_blob] - [REST docs](https://docs.microsoft.com/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs#about-block-blobs) +- [pageblob][page_blob] - [REST docs](https://docs.microsoft.com/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs#about-page-blobs) -> CAUTION: logs from credentials contain sensitive information. -> These logs must be protected to avoid compromising account security. -> +The [blob][blob] package contains APIs common to all blob types. This includes APIs for deleting and undeleting a blob, setting metadata, and more. -## License +The [lease][lease] package contains clients for managing leases on blobs and containers. Please see the [reference docs](https://docs.microsoft.com/rest/api/storageservices/lease-blob#remarks) for general information on leases. -This project is licensed under MIT. +The [container][container] package contains APIs specific to containers. This includes APIs setting access policies or properties, and more. -## Provide Feedback +The [service][service] package contains APIs specific to blob service. This includes APIs for manipulating containers, retrieving account information, and more. -If you encounter bugs or have suggestions, please -[open an issue](https://github.com/Azure/azure-sdk-for-go/issues) and assign the `Azure.AzBlob` label. +The [sas][sas] package contains utilities to aid in the creation and manipulation of Shared Access Signature tokens. +See the package's documentation for more information. ## Contributing -This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License -Agreement (CLA) declaring that you have the right to, and actually do, grant us the rights to use your contribution. For -details, visit https://cla.microsoft.com. +See the [Storage CONTRIBUTING.md][storage_contrib] for details on building, +testing, and contributing to this library. -When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate -the PR appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to -do this once across all repos using our CLA. +This project welcomes contributions and suggestions. Most contributions require +you to agree to a Contributor License Agreement (CLA) declaring that you have +the right to, and actually do, grant us the rights to use your contribution. For +details, visit [cla.microsoft.com][cla]. -This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). -For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or -contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. +This project has adopted the [Microsoft Open Source Code of Conduct][coc]. +For more information see the [Code of Conduct FAQ][coc_faq] +or contact [opencode@microsoft.com][coc_contact] with any +additional questions or comments. +![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-go%2Fsdk%2Fstorage%2Fazblob%2FREADME.png) - -[azure_subscription]:https://azure.microsoft.com/free/ - -[azure_storage_account]:https://docs.microsoft.com/azure/storage/common/storage-account-create?tabs=azure-portal - -[azure_portal_create_account]:https://docs.microsoft.com/azure/storage/common/storage-account-create?tabs=azure-portal - -[azure_powershell_create_account]:https://docs.microsoft.com/azure/storage/common/storage-account-create?tabs=azure-powershell - -[azure_cli_create_account]: https://docs.microsoft.com/azure/storage/common/storage-account-create?tabs=azure-cli - -[azure_cli_account_url]:https://docs.microsoft.com/cli/azure/storage/account?view=azure-cli-latest#az-storage-account-show - -[azure_powershell_account_url]:https://docs.microsoft.com/powershell/module/az.storage/get-azstorageaccount?view=azps-4.6.1 - -[azure_portal_account_url]:https://docs.microsoft.com/azure/storage/common/storage-account-overview#storage-account-endpoints - -[azure_sas_token]:https://docs.microsoft.com/azure/storage/common/storage-sas-overview - -[azure_shared_key]:https://docs.microsoft.com/rest/api/storageservices/authorize-with-shared-key - -[azure_core_ref_docs]:https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore - -[azure_core_readme]: https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/azcore/README.md - -[blobs_error_codes]: https://docs.microsoft.com/en-us/rest/api/storageservices/blob-service-error-codes - -[msft_oss_coc]:https://opensource.microsoft.com/codeofconduct/ - -[msft_oss_coc_faq]:https://opensource.microsoft.com/codeofconduct/faq/ - -[contact_msft_oss]:mailto:opencode@microsoft.com - -[blobs_rest]: https://docs.microsoft.com/en-us/rest/api/storageservices/blob-service-rest-api +[source]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob +[docs]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob +[rest_docs]: https://docs.microsoft.com/rest/api/storageservices/blob-service-rest-api +[product_docs]: https://docs.microsoft.com/azure/storage/blobs/storage-blobs-overview +[godevdl]: https://go.dev/dl/ +[goget]: https://pkg.go.dev/cmd/go#hdr-Add_dependencies_to_current_module_and_install_them +[storage_account_docs]: https://docs.microsoft.com/azure/storage/common/storage-account-overview +[storage_account_create_ps]: https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-powershell +[storage_account_create_cli]: https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-cli +[storage_account_create_portal]: https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-portal +[azure_cli]: https://docs.microsoft.com/cli/azure +[azure_sub]: https://azure.microsoft.com/free/ +[azidentity]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity +[storage_ad]: https://docs.microsoft.com/azure/storage/common/storage-auth-aad +[azcore_response_error]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore#ResponseError +[samples]: https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/storage/azblob/examples_test.go +[append_blob]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/appendblob/client.go +[blob]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/blob/client.go +[blob_error]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/bloberror/error_codes.go +[block_blob]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/blockblob/client.go +[container]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/container/client.go +[lease]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/lease +[page_blob]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/pageblob/client.go +[sas]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/sas +[service]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/service/client.go +[storage_contrib]: https://github.com/Azure/azure-sdk-for-go/blob/main/CONTRIBUTING.md +[cla]: https://cla.microsoft.com +[coc]: https://opensource.microsoft.com/codeofconduct/ +[coc_faq]: https://opensource.microsoft.com/codeofconduct/faq/ +[coc_contact]: mailto:opencode@microsoft.com diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go new file mode 100644 index 000000000..a6c204ac6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go @@ -0,0 +1,276 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package appendblob + +import ( + "context" + "io" + "os" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" +) + +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions struct { + azcore.ClientOptions +} + +// Client represents a client to an Azure Storage append blob; +type Client base.CompositeClient[generated.BlobClient, generated.AppendBlobClient] + +// NewClient creates an instance of Client with the specified values. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt +// - cred - an Azure AD credential, typically obtained via the azidentity module +// - options - client options; pass nil to accept the default values +func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { + authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewAppendBlobClient(blobURL, pl, nil)), nil +} + +// NewClientWithNoCredential creates an instance of Client with the specified values. +// This is used to anonymously access a blob or with a shared access signature (SAS) token. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt? +// - options - client options; pass nil to accept the default values +func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) { + conOptions := shared.GetClientOptions(options) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewAppendBlobClient(blobURL, pl, nil)), nil +} + +// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt +// - cred - a SharedKeyCredential created with the matching blob's storage account and access key +// - options - client options; pass nil to accept the default values +func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCredential, options *ClientOptions) (*Client, error) { + authPolicy := exported.NewSharedKeyCredPolicy(cred) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewAppendBlobClient(blobURL, pl, cred)), nil +} + +// NewClientFromConnectionString creates an instance of Client with the specified values. +// - connectionString - a connection string for the desired storage account +// - containerName - the name of the container within the storage account +// - blobName - the name of the blob within the container +// - options - client options; pass nil to accept the default values +func NewClientFromConnectionString(connectionString, containerName, blobName string, options *ClientOptions) (*Client, error) { + parsed, err := shared.ParseConnectionString(connectionString) + if err != nil { + return nil, err + } + parsed.ServiceURL = runtime.JoinPaths(parsed.ServiceURL, containerName, blobName) + + if parsed.AccountKey != "" && parsed.AccountName != "" { + credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey) + if err != nil { + return nil, err + } + return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options) + } + + return NewClientWithNoCredential(parsed.ServiceURL, options) +} + +// BlobClient returns the embedded blob client for this AppendBlob client. +func (ab *Client) BlobClient() *blob.Client { + innerBlob, _ := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.AppendBlobClient])(ab)) + return (*blob.Client)(innerBlob) +} + +func (ab *Client) sharedKey() *blob.SharedKeyCredential { + return base.SharedKeyComposite((*base.CompositeClient[generated.BlobClient, generated.AppendBlobClient])(ab)) +} + +func (ab *Client) generated() *generated.AppendBlobClient { + _, appendBlob := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.AppendBlobClient])(ab)) + return appendBlob +} + +// URL returns the URL endpoint used by the Client object. +func (ab *Client) URL() string { + return ab.generated().Endpoint() +} + +// WithSnapshot creates a new AppendBlobURL object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (ab *Client) WithSnapshot(snapshot string) (*Client, error) { + p, err := blob.ParseURL(ab.URL()) + if err != nil { + return nil, err + } + p.Snapshot = snapshot + + return (*Client)(base.NewAppendBlobClient(p.String(), ab.generated().Pipeline(), ab.sharedKey())), nil +} + +// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. +// Pass "" to remove the versionID returning a URL to the base blob. +func (ab *Client) WithVersionID(versionID string) (*Client, error) { + p, err := blob.ParseURL(ab.URL()) + if err != nil { + return nil, err + } + p.VersionID = versionID + + return (*Client)(base.NewAppendBlobClient(p.String(), ab.generated().Pipeline(), ab.sharedKey())), nil +} + +// Create creates a 0-size append blob. Call AppendBlock to append data to an append blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. +func (ab *Client) Create(ctx context.Context, o *CreateOptions) (CreateResponse, error) { + opts, httpHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions := o.format() + resp, err := ab.generated().Create(ctx, 0, opts, httpHeaders, leaseAccessConditions, cpkInfo, + cpkScopeInfo, modifiedAccessConditions) + return resp, err +} + +// AppendBlock writes a stream to a new block of data to the end of the existing append blob. +// This method panics if the stream is not at position 0. +// Note that the http client closes the body stream after the request is sent to the service. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block. +func (ab *Client) AppendBlock(ctx context.Context, body io.ReadSeekCloser, o *AppendBlockOptions) (AppendBlockResponse, error) { + count, err := shared.ValidateSeekableStreamAt0AndGetCount(body) + if err != nil { + return AppendBlockResponse{}, nil + } + + appendOptions, appendPositionAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions, leaseAccessConditions := o.format() + + resp, err := ab.generated().AppendBlock(ctx, count, body, appendOptions, leaseAccessConditions, appendPositionAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions) + + return resp, err +} + +// AppendBlockFromURL copies a new block of data from source URL to the end of the existing append blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block-from-url. +func (ab *Client) AppendBlockFromURL(ctx context.Context, source string, o *AppendBlockFromURLOptions) (AppendBlockFromURLResponse, error) { + appendBlockFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions := o.format() + + // content length should be 0 on * from URL. always. It's a 400 if it isn't. + resp, err := ab.generated().AppendBlockFromURL(ctx, source, 0, appendBlockFromURLOptions, cpkInfo, cpkScopeInfo, + leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) + return resp, err +} + +// Seal - The purpose of Append Blob Seal is to allow users and applications to seal append blobs, marking them as read only. +// https://docs.microsoft.com/en-us/rest/api/storageservices/append-blob-seal +func (ab *Client) Seal(ctx context.Context, o *SealOptions) (SealResponse, error) { + leaseAccessConditions, modifiedAccessConditions, positionAccessConditions := o.format() + resp, err := ab.generated().Seal(ctx, nil, leaseAccessConditions, modifiedAccessConditions, positionAccessConditions) + return resp, err +} + +// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. +// Note that deleting a blob also deletes all its snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob. +func (ab *Client) Delete(ctx context.Context, o *blob.DeleteOptions) (blob.DeleteResponse, error) { + return ab.BlobClient().Delete(ctx, o) +} + +// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob. +func (ab *Client) Undelete(ctx context.Context, o *blob.UndeleteOptions) (blob.UndeleteResponse, error) { + return ab.BlobClient().Undelete(ctx, o) +} + +// SetTier operation sets the tier on a blob. The operation is allowed on a page +// blob in a premium storage account and on a block blob in a blob storage account (locally +// redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and +// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation +// does not update the blob's ETag. +// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers. +func (ab *Client) SetTier(ctx context.Context, tier blob.AccessTier, o *blob.SetTierOptions) (blob.SetTierResponse, error) { + return ab.BlobClient().SetTier(ctx, tier, o) +} + +// GetProperties returns the blob's properties. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties. +func (ab *Client) GetProperties(ctx context.Context, o *blob.GetPropertiesOptions) (blob.GetPropertiesResponse, error) { + return ab.BlobClient().GetProperties(ctx, o) +} + +// SetHTTPHeaders changes a blob's HTTP headers. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. +func (ab *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) { + return ab.BlobClient().SetHTTPHeaders(ctx, HTTPHeaders, o) +} + +// SetMetadata changes a blob's metadata. +// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata. +func (ab *Client) SetMetadata(ctx context.Context, metadata map[string]string, o *blob.SetMetadataOptions) (blob.SetMetadataResponse, error) { + return ab.BlobClient().SetMetadata(ctx, metadata, o) +} + +// CreateSnapshot creates a read-only snapshot of a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob. +func (ab *Client) CreateSnapshot(ctx context.Context, o *blob.CreateSnapshotOptions) (blob.CreateSnapshotResponse, error) { + return ab.BlobClient().CreateSnapshot(ctx, o) +} + +// StartCopyFromURL copies the data at the source URL to a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob. +func (ab *Client) StartCopyFromURL(ctx context.Context, copySource string, o *blob.StartCopyFromURLOptions) (blob.StartCopyFromURLResponse, error) { + return ab.BlobClient().StartCopyFromURL(ctx, copySource, o) +} + +// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob. +func (ab *Client) AbortCopyFromURL(ctx context.Context, copyID string, o *blob.AbortCopyFromURLOptions) (blob.AbortCopyFromURLResponse, error) { + return ab.BlobClient().AbortCopyFromURL(ctx, copyID, o) +} + +// SetTags operation enables users to set tags on a blob or specific blob version, but not snapshot. +// Each call to this operation replaces all existing tags attached to the blob. +// To remove all tags from the blob, call this operation with no tags set. +// https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tags +func (ab *Client) SetTags(ctx context.Context, tags map[string]string, o *blob.SetTagsOptions) (blob.SetTagsResponse, error) { + return ab.BlobClient().SetTags(ctx, tags, o) +} + +// GetTags operation enables users to get tags on a blob or specific blob version, or snapshot. +// https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-tags +func (ab *Client) GetTags(ctx context.Context, o *blob.GetTagsOptions) (blob.GetTagsResponse, error) { + return ab.BlobClient().GetTags(ctx, o) +} + +// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB. +// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url. +func (ab *Client) CopyFromURL(ctx context.Context, copySource string, o *blob.CopyFromURLOptions) (blob.CopyFromURLResponse, error) { + return ab.BlobClient().CopyFromURL(ctx, copySource, o) +} + +// Concurrent Download Functions ----------------------------------------------------------------------------------------- + +// DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob. +func (ab *Client) DownloadStream(ctx context.Context, o *blob.DownloadStreamOptions) (blob.DownloadStreamResponse, error) { + return ab.BlobClient().DownloadStream(ctx, o) +} + +// DownloadBuffer downloads an Azure blob to a buffer with parallel. +func (ab *Client) DownloadBuffer(ctx context.Context, buffer []byte, o *blob.DownloadBufferOptions) (int64, error) { + return ab.BlobClient().DownloadBuffer(ctx, shared.NewBytesWriter(buffer), o) +} + +// DownloadFile downloads an Azure blob to a local file. +// The file would be truncated if the size doesn't match. +func (ab *Client) DownloadFile(ctx context.Context, file *os.File, o *blob.DownloadFileOptions) (int64, error) { + return ab.BlobClient().DownloadFile(ctx, file, o) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/models.go new file mode 100644 index 000000000..69faf6ad6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/models.go @@ -0,0 +1,166 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package appendblob + +import ( + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" +) + +// Type Declarations --------------------------------------------------------------------- + +// AppendPositionAccessConditions contains a group of parameters for the Client.AppendBlock method. +type AppendPositionAccessConditions = generated.AppendPositionAccessConditions + +// Request Model Declaration ------------------------------------------------------------------------------------------- + +// CreateOptions provides set of configurations for Create Append Blob operation +type CreateOptions struct { + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *blob.ImmutabilityPolicySetting + + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + AccessConditions *blob.AccessConditions + + HTTPHeaders *blob.HTTPHeaders + + CpkInfo *blob.CpkInfo + + CpkScopeInfo *blob.CpkScopeInfo + + // Optional. Used to set blob tags in various blob operations. + Tags map[string]string + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs + // are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source + // blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. + // See Naming and Referencing Containers, Blobs, and Metadata for more information. + Metadata map[string]string +} + +func (o *CreateOptions) format() (*generated.AppendBlobClientCreateOptions, *generated.BlobHTTPHeaders, *generated.LeaseAccessConditions, *generated.CpkInfo, *generated.CpkScopeInfo, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil, nil + } + + options := generated.AppendBlobClientCreateOptions{ + BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.Tags), + Metadata: o.Metadata, + ImmutabilityPolicyExpiry: o.ImmutabilityPolicyExpiry, + ImmutabilityPolicyMode: o.ImmutabilityPolicyMode, + LegalHold: o.LegalHold, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &options, o.HTTPHeaders, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// AppendBlockOptions contains the optional parameters for the Client.AppendBlock method. +type AppendBlockOptions struct { + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte + + AppendPositionAccessConditions *AppendPositionAccessConditions + + CpkInfo *blob.CpkInfo + + CpkScopeInfo *blob.CpkScopeInfo + + AccessConditions *blob.AccessConditions +} + +func (o *AppendBlockOptions) format() (*generated.AppendBlobClientAppendBlockOptions, *generated.AppendPositionAccessConditions, + *generated.CpkInfo, *generated.CpkScopeInfo, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil, nil + } + + options := &generated.AppendBlobClientAppendBlockOptions{ + TransactionalContentCRC64: o.TransactionalContentCRC64, + TransactionalContentMD5: o.TransactionalContentMD5, + } + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return options, o.AppendPositionAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions, leaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// AppendBlockFromURLOptions contains the optional parameters for the Client.AppendBlockFromURL method. +type AppendBlockFromURLOptions struct { + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentCRC64 []byte + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte + + AppendPositionAccessConditions *AppendPositionAccessConditions + + CpkInfo *blob.CpkInfo + + CpkScopeInfo *blob.CpkScopeInfo + + SourceModifiedAccessConditions *blob.SourceModifiedAccessConditions + + AccessConditions *blob.AccessConditions + + // Range specifies a range of bytes. The default value is all bytes. + Range blob.HTTPRange +} + +func (o *AppendBlockFromURLOptions) format() (*generated.AppendBlobClientAppendBlockFromURLOptions, *generated.CpkInfo, + *generated.CpkScopeInfo, *generated.LeaseAccessConditions, *generated.AppendPositionAccessConditions, + *generated.ModifiedAccessConditions, *generated.SourceModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil, nil, nil + } + + options := &generated.AppendBlobClientAppendBlockFromURLOptions{ + SourceRange: exported.FormatHTTPRange(o.Range), + SourceContentMD5: o.SourceContentMD5, + SourceContentcrc64: o.SourceContentCRC64, + TransactionalContentMD5: o.TransactionalContentMD5, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return options, o.CpkInfo, o.CpkScopeInfo, leaseAccessConditions, o.AppendPositionAccessConditions, modifiedAccessConditions, o.SourceModifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SealOptions provides set of configurations for SealAppendBlob operation +type SealOptions struct { + AccessConditions *blob.AccessConditions + AppendPositionAccessConditions *AppendPositionAccessConditions +} + +func (o *SealOptions) format() (*generated.LeaseAccessConditions, + *generated.ModifiedAccessConditions, *generated.AppendPositionAccessConditions) { + if o == nil { + return nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return leaseAccessConditions, modifiedAccessConditions, o.AppendPositionAccessConditions + +} + +// --------------------------------------------------------------------------------------------------------------------- diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/responses.go new file mode 100644 index 000000000..e9ab4a3cc --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/responses.go @@ -0,0 +1,23 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package appendblob + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// CreateResponse contains the response from method Client.Create. +type CreateResponse = generated.AppendBlobClientCreateResponse + +// AppendBlockResponse contains the response from method Client.AppendBlock. +type AppendBlockResponse = generated.AppendBlobClientAppendBlockResponse + +// AppendBlockFromURLResponse contains the response from method Client.AppendBlockFromURL. +type AppendBlockFromURLResponse = generated.AppendBlobClientAppendBlockFromURLResponse + +// SealResponse contains the response from method Client.Seal. +type SealResponse = generated.AppendBlobClientSealResponse diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/autorest.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/autorest.md deleted file mode 100644 index 0a391904a..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/autorest.md +++ /dev/null @@ -1,171 +0,0 @@ -# Code Generation - Azure Blob SDK for Golang - - - -```bash -cd swagger -autorest autorest.md -gofmt -w generated/* -``` - -### Settings - -```yaml -go: true -clear-output-folder: false -version: "^3.0.0" -license-header: MICROSOFT_MIT_NO_VERSION -input-file: "https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/storage/data-plane/Microsoft.BlobStorage/preview/2020-10-02/blob.json" -module: "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" -credential-scope: "https://storage.azure.com/.default" -output-folder: internal/ -file-prefix: "zz_generated_" -openapi-type: "data-plane" -verbose: true -security: AzureKey -module-version: "0.3.0" -modelerfour: - group-parameters: false - seal-single-value-enum-by-default: true - lenient-model-deduplication: true -export-clients: false -use: "@autorest/go@4.0.0-preview.36" -``` - -### Fix BlobMetadata. - -``` yaml -directive: -- from: swagger-document - where: $.definitions - transform: > - delete $.BlobMetadata["properties"]; - -``` - -### Don't include container name or blob in path - we have direct URIs. - -``` yaml -directive: -- from: swagger-document - where: $["x-ms-paths"] - transform: > - for (const property in $) - { - if (property.includes('/{containerName}/{blob}')) - { - $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ContainerName") && false == param['$ref'].endsWith("#/parameters/Blob"))}); - } - else if (property.includes('/{containerName}')) - { - $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ContainerName"))}); - } - } -``` - -### Remove DataLake stuff. - -``` yaml -directive: -- from: swagger-document - where: $["x-ms-paths"] - transform: > - for (const property in $) - { - if (property.includes('filesystem')) - { - delete $[property]; - } - } -``` - -### Remove DataLakeStorageError - -``` yaml -directive: -- from: swagger-document - where: $.definitions - transform: > - delete $.DataLakeStorageError; -``` - -### Fix 304s - -``` yaml -directive: -- from: swagger-document - where: $["x-ms-paths"]["/{containerName}/{blob}"] - transform: > - $.get.responses["304"] = { - "description": "The condition specified using HTTP conditional header(s) is not met.", - "x-az-response-name": "ConditionNotMetError", - "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } } - }; -``` - -### Fix GeoReplication - -``` yaml -directive: -- from: swagger-document - where: $.definitions - transform: > - delete $.GeoReplication.properties.Status["x-ms-enum"]; - $.GeoReplication.properties.Status["x-ms-enum"] = { - "name": "BlobGeoReplicationStatus", - "modelAsString": false - }; -``` - -### Fix RehydratePriority - -``` yaml -directive: -- from: swagger-document - where: $.definitions - transform: > - delete $.RehydratePriority["x-ms-enum"]; - $.RehydratePriority["x-ms-enum"] = { - "name": "RehydratePriority", - "modelAsString": false - }; -``` - -### Fix BlobDeleteType - -``` yaml -directive: -- from: swagger-document - where: $.parameters - transform: > - delete $.BlobDeleteType.enum; - $.BlobDeleteType.enum = [ - "None", - "Permanent" - ]; -``` - -### Fix EncryptionAlgorithm - -``` yaml -directive: -- from: swagger-document - where: $.parameters - transform: > - delete $.EncryptionAlgorithm.enum; - $.EncryptionAlgorithm.enum = [ - "None", - "AES256" - ]; -``` - -### Fix XML string "ObjectReplicationMetadata" to "OrMetadata" - -``` yaml -directive: -- from: swagger-document - where: $.definitions - transform: > - $.BlobItemInternal.properties["OrMetadata"] = $.BlobItemInternal.properties["ObjectReplicationMetadata"]; - delete $.BlobItemInternal.properties["ObjectReplicationMetadata"]; -``` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go new file mode 100644 index 000000000..1b7cf6d86 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go @@ -0,0 +1,423 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blob + +import ( + "context" + "errors" + "io" + "os" + "sync" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" +) + +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions struct { + azcore.ClientOptions +} + +// Client represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob. +type Client base.Client[generated.BlobClient] + +// NewClient creates an instance of Client with the specified values. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt +// - cred - an Azure AD credential, typically obtained via the azidentity module +// - options - client options; pass nil to accept the default values +func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { + authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewBlobClient(blobURL, pl, nil)), nil +} + +// NewClientWithNoCredential creates an instance of Client with the specified values. +// This is used to anonymously access a blob or with a shared access signature (SAS) token. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt? +// - options - client options; pass nil to accept the default values +func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) { + conOptions := shared.GetClientOptions(options) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewBlobClient(blobURL, pl, nil)), nil +} + +// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt +// - cred - a SharedKeyCredential created with the matching blob's storage account and access key +// - options - client options; pass nil to accept the default values +func NewClientWithSharedKeyCredential(blobURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { + authPolicy := exported.NewSharedKeyCredPolicy(cred) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewBlobClient(blobURL, pl, cred)), nil +} + +// NewClientFromConnectionString creates an instance of Client with the specified values. +// - connectionString - a connection string for the desired storage account +// - containerName - the name of the container within the storage account +// - blobName - the name of the blob within the container +// - options - client options; pass nil to accept the default values +func NewClientFromConnectionString(connectionString, containerName, blobName string, options *ClientOptions) (*Client, error) { + parsed, err := shared.ParseConnectionString(connectionString) + if err != nil { + return nil, err + } + parsed.ServiceURL = runtime.JoinPaths(parsed.ServiceURL, containerName, blobName) + + if parsed.AccountKey != "" && parsed.AccountName != "" { + credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey) + if err != nil { + return nil, err + } + return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options) + } + + return NewClientWithNoCredential(parsed.ServiceURL, options) +} + +func (b *Client) generated() *generated.BlobClient { + return base.InnerClient((*base.Client[generated.BlobClient])(b)) +} + +func (b *Client) sharedKey() *SharedKeyCredential { + return base.SharedKey((*base.Client[generated.BlobClient])(b)) +} + +// URL returns the URL endpoint used by the Client object. +func (b *Client) URL() string { + return b.generated().Endpoint() +} + +// WithSnapshot creates a new Client object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (b *Client) WithSnapshot(snapshot string) (*Client, error) { + p, err := ParseURL(b.URL()) + if err != nil { + return nil, err + } + p.Snapshot = snapshot + + return (*Client)(base.NewBlobClient(p.String(), b.generated().Pipeline(), b.sharedKey())), nil +} + +// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. +// Pass "" to remove the versionID returning a URL to the base blob. +func (b *Client) WithVersionID(versionID string) (*Client, error) { + p, err := ParseURL(b.URL()) + if err != nil { + return nil, err + } + p.VersionID = versionID + + return (*Client)(base.NewBlobClient(p.String(), b.generated().Pipeline(), b.sharedKey())), nil +} + +// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. +// Note that deleting a blob also deletes all its snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob. +func (b *Client) Delete(ctx context.Context, o *DeleteOptions) (DeleteResponse, error) { + deleteOptions, leaseInfo, accessConditions := o.format() + resp, err := b.generated().Delete(ctx, deleteOptions, leaseInfo, accessConditions) + return resp, err +} + +// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob. +func (b *Client) Undelete(ctx context.Context, o *UndeleteOptions) (UndeleteResponse, error) { + undeleteOptions := o.format() + resp, err := b.generated().Undelete(ctx, undeleteOptions) + return resp, err +} + +// SetTier operation sets the tier on a blob. The operation is allowed on a page +// blob in a premium storage account and on a block blob in a blob storage account (locally +// redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and +// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation +// does not update the blob's ETag. +// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers. +func (b *Client) SetTier(ctx context.Context, tier AccessTier, o *SetTierOptions) (SetTierResponse, error) { + opts, leaseAccessConditions, modifiedAccessConditions := o.format() + resp, err := b.generated().SetTier(ctx, tier, opts, leaseAccessConditions, modifiedAccessConditions) + return resp, err +} + +// GetProperties returns the blob's properties. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties. +func (b *Client) GetProperties(ctx context.Context, options *GetPropertiesOptions) (GetPropertiesResponse, error) { + opts, leaseAccessConditions, cpkInfo, modifiedAccessConditions := options.format() + resp, err := b.generated().GetProperties(ctx, opts, leaseAccessConditions, cpkInfo, modifiedAccessConditions) + return resp, err +} + +// SetHTTPHeaders changes a blob's HTTP headers. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. +func (b *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders HTTPHeaders, o *SetHTTPHeadersOptions) (SetHTTPHeadersResponse, error) { + opts, leaseAccessConditions, modifiedAccessConditions := o.format() + resp, err := b.generated().SetHTTPHeaders(ctx, opts, &HTTPHeaders, leaseAccessConditions, modifiedAccessConditions) + return resp, err +} + +// SetMetadata changes a blob's metadata. +// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata. +func (b *Client) SetMetadata(ctx context.Context, metadata map[string]string, o *SetMetadataOptions) (SetMetadataResponse, error) { + basics := generated.BlobClientSetMetadataOptions{Metadata: metadata} + leaseAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions := o.format() + resp, err := b.generated().SetMetadata(ctx, &basics, leaseAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions) + return resp, err +} + +// CreateSnapshot creates a read-only snapshot of a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob. +func (b *Client) CreateSnapshot(ctx context.Context, options *CreateSnapshotOptions) (CreateSnapshotResponse, error) { + // CreateSnapshot does NOT panic if the user tries to create a snapshot using a URL that already has a snapshot query parameter + // because checking this would be a performance hit for a VERY unusual path, and we don't think the common case should suffer this + // performance hit. + opts, cpkInfo, cpkScope, modifiedAccessConditions, leaseAccessConditions := options.format() + resp, err := b.generated().CreateSnapshot(ctx, opts, cpkInfo, cpkScope, modifiedAccessConditions, leaseAccessConditions) + + return resp, err +} + +// StartCopyFromURL copies the data at the source URL to a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob. +func (b *Client) StartCopyFromURL(ctx context.Context, copySource string, options *StartCopyFromURLOptions) (StartCopyFromURLResponse, error) { + opts, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions := options.format() + resp, err := b.generated().StartCopyFromURL(ctx, copySource, opts, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions) + return resp, err +} + +// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob. +func (b *Client) AbortCopyFromURL(ctx context.Context, copyID string, options *AbortCopyFromURLOptions) (AbortCopyFromURLResponse, error) { + opts, leaseAccessConditions := options.format() + resp, err := b.generated().AbortCopyFromURL(ctx, copyID, opts, leaseAccessConditions) + return resp, err +} + +// SetTags operation enables users to set tags on a blob or specific blob version, but not snapshot. +// Each call to this operation replaces all existing tags attached to the blob. +// To remove all tags from the blob, call this operation with no tags set. +// https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tags +func (b *Client) SetTags(ctx context.Context, tags map[string]string, options *SetTagsOptions) (SetTagsResponse, error) { + serializedTags := shared.SerializeBlobTags(tags) + blobSetTagsOptions, modifiedAccessConditions, leaseAccessConditions := options.format() + resp, err := b.generated().SetTags(ctx, *serializedTags, blobSetTagsOptions, modifiedAccessConditions, leaseAccessConditions) + return resp, err +} + +// GetTags operation enables users to get tags on a blob or specific blob version, or snapshot. +// https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-tags +func (b *Client) GetTags(ctx context.Context, options *GetTagsOptions) (GetTagsResponse, error) { + blobGetTagsOptions, modifiedAccessConditions, leaseAccessConditions := options.format() + resp, err := b.generated().GetTags(ctx, blobGetTagsOptions, modifiedAccessConditions, leaseAccessConditions) + return resp, err + +} + +// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB. +// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url. +func (b *Client) CopyFromURL(ctx context.Context, copySource string, options *CopyFromURLOptions) (CopyFromURLResponse, error) { + copyOptions, smac, mac, lac := options.format() + resp, err := b.generated().CopyFromURL(ctx, copySource, copyOptions, smac, mac, lac) + return resp, err +} + +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at blob. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +func (b *Client) GetSASURL(permissions sas.BlobPermissions, start time.Time, expiry time.Time) (string, error) { + if b.sharedKey() == nil { + return "", errors.New("credential is not a SharedKeyCredential. SAS can only be signed with a SharedKeyCredential") + } + + urlParts, err := ParseURL(b.URL()) + if err != nil { + return "", err + } + + t, err := time.Parse(SnapshotTimeFormat, urlParts.Snapshot) + + if err != nil { + t = time.Time{} + } + + qps, err := sas.BlobSignatureValues{ + ContainerName: urlParts.ContainerName, + BlobName: urlParts.BlobName, + SnapshotTime: t, + Version: sas.Version, + + Permissions: permissions.String(), + + StartTime: start.UTC(), + ExpiryTime: expiry.UTC(), + }.SignWithSharedKey(b.sharedKey()) + + if err != nil { + return "", err + } + + endpoint := b.URL() + "?" + qps.Encode() + + return endpoint, nil +} + +// Concurrent Download Functions ----------------------------------------------------------------------------------------- + +// download downloads an Azure blob to a WriterAt in parallel. +func (b *Client) download(ctx context.Context, writer io.WriterAt, o downloadOptions) (int64, error) { + if o.BlockSize == 0 { + o.BlockSize = DefaultDownloadBlockSize + } + + count := o.Range.Count + if count == CountToEnd { // If size not specified, calculate it + // If we don't have the length at all, get it + downloadBlobOptions := o.getDownloadBlobOptions(HTTPRange{}, nil) + dr, err := b.DownloadStream(ctx, downloadBlobOptions) + if err != nil { + return 0, err + } + count = *dr.ContentLength - o.Range.Offset + } + + if count <= 0 { + // The file is empty, there is nothing to download. + return 0, nil + } + + // Prepare and do parallel download. + progress := int64(0) + progressLock := &sync.Mutex{} + + err := shared.DoBatchTransfer(ctx, &shared.BatchTransferOptions{ + OperationName: "downloadBlobToWriterAt", + TransferSize: count, + ChunkSize: o.BlockSize, + Concurrency: o.Concurrency, + Operation: func(chunkStart int64, count int64, ctx context.Context) error { + + downloadBlobOptions := o.getDownloadBlobOptions(HTTPRange{ + Offset: chunkStart + o.Range.Offset, + Count: count, + }, nil) + dr, err := b.DownloadStream(ctx, downloadBlobOptions) + if err != nil { + return err + } + var body io.ReadCloser = dr.NewRetryReader(ctx, &o.RetryReaderOptionsPerBlock) + if o.Progress != nil { + rangeProgress := int64(0) + body = streaming.NewResponseProgress( + body, + func(bytesTransferred int64) { + diff := bytesTransferred - rangeProgress + rangeProgress = bytesTransferred + progressLock.Lock() + progress += diff + o.Progress(progress) + progressLock.Unlock() + }) + } + _, err = io.Copy(shared.NewSectionWriter(writer, chunkStart, count), body) + if err != nil { + return err + } + err = body.Close() + return err + }, + }) + if err != nil { + return 0, err + } + return count, nil +} + +// DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob. +func (b *Client) DownloadStream(ctx context.Context, o *DownloadStreamOptions) (DownloadStreamResponse, error) { + downloadOptions, leaseAccessConditions, cpkInfo, modifiedAccessConditions := o.format() + if o == nil { + o = &DownloadStreamOptions{} + } + + dr, err := b.generated().Download(ctx, downloadOptions, leaseAccessConditions, cpkInfo, modifiedAccessConditions) + if err != nil { + return DownloadStreamResponse{}, err + } + + return DownloadStreamResponse{ + client: b, + BlobClientDownloadResponse: dr, + getInfo: httpGetterInfo{Range: o.Range, ETag: dr.ETag}, + ObjectReplicationRules: deserializeORSPolicies(dr.ObjectReplicationRules), + cpkInfo: o.CpkInfo, + cpkScope: o.CpkScopeInfo, + }, err +} + +// DownloadBuffer downloads an Azure blob to a buffer with parallel. +func (b *Client) DownloadBuffer(ctx context.Context, buffer []byte, o *DownloadBufferOptions) (int64, error) { + if o == nil { + o = &DownloadBufferOptions{} + } + return b.download(ctx, shared.NewBytesWriter(buffer), (downloadOptions)(*o)) +} + +// DownloadFile downloads an Azure blob to a local file. +// The file would be truncated if the size doesn't match. +func (b *Client) DownloadFile(ctx context.Context, file *os.File, o *DownloadFileOptions) (int64, error) { + if o == nil { + o = &DownloadFileOptions{} + } + do := (*downloadOptions)(o) + + // 1. Calculate the size of the destination file + var size int64 + + count := do.Range.Count + if count == CountToEnd { + // Try to get Azure blob's size + getBlobPropertiesOptions := do.getBlobPropertiesOptions() + props, err := b.GetProperties(ctx, getBlobPropertiesOptions) + if err != nil { + return 0, err + } + size = *props.ContentLength - do.Range.Offset + } else { + size = count + } + + // 2. Compare and try to resize local file's size if it doesn't match Azure blob's size. + stat, err := file.Stat() + if err != nil { + return 0, err + } + if stat.Size() != size { + if err = file.Truncate(size); err != nil { + return 0, err + } + } + + if size > 0 { + return b.download(ctx, file, *do) + } else { // if the blob's size is 0, there is no need in downloading it + return 0, nil + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/constants.go new file mode 100644 index 000000000..d0e5d7d82 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/constants.go @@ -0,0 +1,241 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blob + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +const ( + CountToEnd = 0 + + SnapshotTimeFormat = exported.SnapshotTimeFormat + + // DefaultDownloadBlockSize is default block size + DefaultDownloadBlockSize = int64(4 * 1024 * 1024) // 4MB +) + +// BlobType defines values for BlobType +type BlobType = generated.BlobType + +const ( + BlobTypeBlockBlob BlobType = generated.BlobTypeBlockBlob + BlobTypePageBlob BlobType = generated.BlobTypePageBlob + BlobTypeAppendBlob BlobType = generated.BlobTypeAppendBlob +) + +// PossibleBlobTypeValues returns the possible values for the BlobType const type. +func PossibleBlobTypeValues() []BlobType { + return generated.PossibleBlobTypeValues() +} + +// DeleteSnapshotsOptionType defines values for DeleteSnapshotsOptionType +type DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionType + +const ( + DeleteSnapshotsOptionTypeInclude DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionTypeInclude + DeleteSnapshotsOptionTypeOnly DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionTypeOnly +) + +// PossibleDeleteSnapshotsOptionTypeValues returns the possible values for the DeleteSnapshotsOptionType const type. +func PossibleDeleteSnapshotsOptionTypeValues() []DeleteSnapshotsOptionType { + return generated.PossibleDeleteSnapshotsOptionTypeValues() +} + +// AccessTier defines values for Blob Access Tier +type AccessTier = generated.AccessTier + +const ( + AccessTierArchive AccessTier = generated.AccessTierArchive + AccessTierCool AccessTier = generated.AccessTierCool + AccessTierHot AccessTier = generated.AccessTierHot + AccessTierP10 AccessTier = generated.AccessTierP10 + AccessTierP15 AccessTier = generated.AccessTierP15 + AccessTierP20 AccessTier = generated.AccessTierP20 + AccessTierP30 AccessTier = generated.AccessTierP30 + AccessTierP4 AccessTier = generated.AccessTierP4 + AccessTierP40 AccessTier = generated.AccessTierP40 + AccessTierP50 AccessTier = generated.AccessTierP50 + AccessTierP6 AccessTier = generated.AccessTierP6 + AccessTierP60 AccessTier = generated.AccessTierP60 + AccessTierP70 AccessTier = generated.AccessTierP70 + AccessTierP80 AccessTier = generated.AccessTierP80 + AccessTierPremium AccessTier = generated.AccessTierPremium +) + +// PossibleAccessTierValues returns the possible values for the AccessTier const type. +func PossibleAccessTierValues() []AccessTier { + return generated.PossibleAccessTierValues() +} + +// RehydratePriority - If an object is in rehydrate pending state then this header is returned with priority of rehydrate. +// Valid values are High and Standard. +type RehydratePriority = generated.RehydratePriority + +const ( + RehydratePriorityHigh RehydratePriority = generated.RehydratePriorityHigh + RehydratePriorityStandard RehydratePriority = generated.RehydratePriorityStandard +) + +// PossibleRehydratePriorityValues returns the possible values for the RehydratePriority const type. +func PossibleRehydratePriorityValues() []RehydratePriority { + return generated.PossibleRehydratePriorityValues() +} + +// ImmutabilityPolicyMode defines values for ImmutabilityPolicyMode +type ImmutabilityPolicyMode = generated.ImmutabilityPolicyMode + +const ( + ImmutabilityPolicyModeMutable ImmutabilityPolicyMode = generated.ImmutabilityPolicyModeMutable + ImmutabilityPolicyModeUnlocked ImmutabilityPolicyMode = generated.ImmutabilityPolicyModeUnlocked + ImmutabilityPolicyModeLocked ImmutabilityPolicyMode = generated.ImmutabilityPolicyModeLocked +) + +// PossibleImmutabilityPolicyModeValues returns the possible values for the ImmutabilityPolicyMode const type. +func PossibleImmutabilityPolicyModeValues() []ImmutabilityPolicyMode { + return generated.PossibleImmutabilityPolicyModeValues() +} + +// ImmutabilityPolicySetting returns the possible values for the ImmutabilityPolicySetting const type. +type ImmutabilityPolicySetting = generated.ImmutabilityPolicySetting + +const ( + ImmutabilityPolicySettingUnlocked ImmutabilityPolicySetting = generated.ImmutabilityPolicySettingUnlocked + ImmutabilityPolicySettingLocked ImmutabilityPolicySetting = generated.ImmutabilityPolicySettingLocked +) + +// PossibleImmutabilityPolicySettingValues returns the possible values for the ImmutabilityPolicySetting const type. +func PossibleImmutabilityPolicySettingValues() []ImmutabilityPolicySetting { + return generated.PossibleImmutabilityPolicySettingValues() +} + +// CopyStatusType defines values for CopyStatusType +type CopyStatusType = generated.CopyStatusType + +const ( + CopyStatusTypePending CopyStatusType = generated.CopyStatusTypePending + CopyStatusTypeSuccess CopyStatusType = generated.CopyStatusTypeSuccess + CopyStatusTypeAborted CopyStatusType = generated.CopyStatusTypeAborted + CopyStatusTypeFailed CopyStatusType = generated.CopyStatusTypeFailed +) + +// PossibleCopyStatusTypeValues returns the possible values for the CopyStatusType const type. +func PossibleCopyStatusTypeValues() []CopyStatusType { + return generated.PossibleCopyStatusTypeValues() +} + +// EncryptionAlgorithmType defines values for EncryptionAlgorithmType +type EncryptionAlgorithmType = generated.EncryptionAlgorithmType + +const ( + EncryptionAlgorithmTypeNone EncryptionAlgorithmType = generated.EncryptionAlgorithmTypeNone + EncryptionAlgorithmTypeAES256 EncryptionAlgorithmType = generated.EncryptionAlgorithmTypeAES256 +) + +// PossibleEncryptionAlgorithmTypeValues returns the possible values for the EncryptionAlgorithmType const type. +func PossibleEncryptionAlgorithmTypeValues() []EncryptionAlgorithmType { + return generated.PossibleEncryptionAlgorithmTypeValues() +} + +// ArchiveStatus defines values for ArchiveStatus +type ArchiveStatus = generated.ArchiveStatus + +const ( + ArchiveStatusRehydratePendingToCool ArchiveStatus = generated.ArchiveStatusRehydratePendingToCool + ArchiveStatusRehydratePendingToHot ArchiveStatus = generated.ArchiveStatusRehydratePendingToHot +) + +// PossibleArchiveStatusValues returns the possible values for the ArchiveStatus const type. +func PossibleArchiveStatusValues() []ArchiveStatus { + return generated.PossibleArchiveStatusValues() +} + +// DeleteType defines values for DeleteType +type DeleteType = generated.DeleteType + +const ( + DeleteTypeNone DeleteType = generated.DeleteTypeNone + DeleteTypePermanent DeleteType = generated.DeleteTypePermanent +) + +// PossibleDeleteTypeValues returns the possible values for the DeleteType const type. +func PossibleDeleteTypeValues() []DeleteType { + return generated.PossibleDeleteTypeValues() +} + +// ExpiryOptions defines values for ExpiryOptions +type ExpiryOptions = generated.ExpiryOptions + +const ( + ExpiryOptionsAbsolute ExpiryOptions = generated.ExpiryOptionsAbsolute + ExpiryOptionsNeverExpire ExpiryOptions = generated.ExpiryOptionsNeverExpire + ExpiryOptionsRelativeToCreation ExpiryOptions = generated.ExpiryOptionsRelativeToCreation + ExpiryOptionsRelativeToNow ExpiryOptions = generated.ExpiryOptionsRelativeToNow +) + +// PossibleExpiryOptionsValues returns the possible values for the ExpiryOptions const type. +func PossibleExpiryOptionsValues() []ExpiryOptions { + return generated.PossibleExpiryOptionsValues() +} + +// QueryFormatType - The quick query format type. +type QueryFormatType = generated.QueryFormatType + +const ( + QueryFormatTypeDelimited QueryFormatType = generated.QueryFormatTypeDelimited + QueryFormatTypeJSON QueryFormatType = generated.QueryFormatTypeJSON + QueryFormatTypeArrow QueryFormatType = generated.QueryFormatTypeArrow + QueryFormatTypeParquet QueryFormatType = generated.QueryFormatTypeParquet +) + +// PossibleQueryFormatTypeValues returns the possible values for the QueryFormatType const type. +func PossibleQueryFormatTypeValues() []QueryFormatType { + return generated.PossibleQueryFormatTypeValues() +} + +// LeaseDurationType defines values for LeaseDurationType +type LeaseDurationType = generated.LeaseDurationType + +const ( + LeaseDurationTypeInfinite LeaseDurationType = generated.LeaseDurationTypeInfinite + LeaseDurationTypeFixed LeaseDurationType = generated.LeaseDurationTypeFixed +) + +// PossibleLeaseDurationTypeValues returns the possible values for the LeaseDurationType const type. +func PossibleLeaseDurationTypeValues() []LeaseDurationType { + return generated.PossibleLeaseDurationTypeValues() +} + +// LeaseStateType defines values for LeaseStateType +type LeaseStateType = generated.LeaseStateType + +const ( + LeaseStateTypeAvailable LeaseStateType = generated.LeaseStateTypeAvailable + LeaseStateTypeLeased LeaseStateType = generated.LeaseStateTypeLeased + LeaseStateTypeExpired LeaseStateType = generated.LeaseStateTypeExpired + LeaseStateTypeBreaking LeaseStateType = generated.LeaseStateTypeBreaking + LeaseStateTypeBroken LeaseStateType = generated.LeaseStateTypeBroken +) + +// PossibleLeaseStateTypeValues returns the possible values for the LeaseStateType const type. +func PossibleLeaseStateTypeValues() []LeaseStateType { + return generated.PossibleLeaseStateTypeValues() +} + +// LeaseStatusType defines values for LeaseStatusType +type LeaseStatusType = generated.LeaseStatusType + +const ( + LeaseStatusTypeLocked LeaseStatusType = generated.LeaseStatusTypeLocked + LeaseStatusTypeUnlocked LeaseStatusType = generated.LeaseStatusTypeUnlocked +) + +// PossibleLeaseStatusTypeValues returns the possible values for the LeaseStatusType const type. +func PossibleLeaseStatusTypeValues() []LeaseStatusType { + return generated.PossibleLeaseStatusTypeValues() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go new file mode 100644 index 000000000..86af620c3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go @@ -0,0 +1,492 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blob + +import ( + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" +) + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential = exported.SharedKeyCredential + +// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the +// storage account's name and either its primary or secondary key. +func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) { + return exported.NewSharedKeyCredential(accountName, accountKey) +} + +// Type Declarations --------------------------------------------------------------------- + +// AccessConditions identifies blob-specific access conditions which you optionally set. +type AccessConditions = exported.BlobAccessConditions + +// LeaseAccessConditions contains optional parameters to access leased entity. +type LeaseAccessConditions = exported.LeaseAccessConditions + +// ModifiedAccessConditions contains a group of parameters for specifying access conditions. +type ModifiedAccessConditions = exported.ModifiedAccessConditions + +// CpkInfo contains a group of parameters for client provided encryption key. +type CpkInfo = generated.CpkInfo + +// CpkScopeInfo contains a group of parameters for client provided encryption scope. +type CpkScopeInfo = generated.CpkScopeInfo + +// HTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +type HTTPHeaders = generated.BlobHTTPHeaders + +// SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL method. +type SourceModifiedAccessConditions = generated.SourceModifiedAccessConditions + +// Tags represent map of blob index tags +type Tags = generated.BlobTag + +// HTTPRange defines a range of bytes within an HTTP resource, starting at offset and +// ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange +// which has an offset but no zero value count indicates from the offset to the resource's end. +type HTTPRange = exported.HTTPRange + +// Request Model Declaration ------------------------------------------------------------------------------------------- + +// DownloadStreamOptions contains the optional parameters for the Client.Download method. +type DownloadStreamOptions struct { + // When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the + // range is less than or equal to 4 MB in size. + RangeGetContentMD5 *bool + + // Range specifies a range of bytes. The default value is all bytes. + Range HTTPRange + + AccessConditions *AccessConditions + CpkInfo *CpkInfo + CpkScopeInfo *CpkScopeInfo +} + +func (o *DownloadStreamOptions) format() (*generated.BlobClientDownloadOptions, *generated.LeaseAccessConditions, *generated.CpkInfo, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil + } + + basics := generated.BlobClientDownloadOptions{ + RangeGetContentMD5: o.RangeGetContentMD5, + Range: exported.FormatHTTPRange(o.Range), + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &basics, leaseAccessConditions, o.CpkInfo, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// downloadOptions contains common options used by the DownloadBuffer and DownloadFile functions. +type downloadOptions struct { + // Range specifies a range of bytes. The default value is all bytes. + Range HTTPRange + + // BlockSize specifies the block size to use for each parallel download; the default size is DefaultDownloadBlockSize. + BlockSize int64 + + // Progress is a function that is invoked periodically as bytes are received. + Progress func(bytesTransferred int64) + + // BlobAccessConditions indicates the access conditions used when making HTTP GET requests against the blob. + AccessConditions *AccessConditions + + // ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data. + CpkInfo *CpkInfo + CpkScopeInfo *CpkScopeInfo + + // Concurrency indicates the maximum number of blocks to download in parallel (0=default) + Concurrency uint16 + + // RetryReaderOptionsPerBlock is used when downloading each block. + RetryReaderOptionsPerBlock RetryReaderOptions +} + +func (o *downloadOptions) getBlobPropertiesOptions() *GetPropertiesOptions { + if o == nil { + return nil + } + return &GetPropertiesOptions{ + AccessConditions: o.AccessConditions, + CpkInfo: o.CpkInfo, + } +} + +func (o *downloadOptions) getDownloadBlobOptions(rnge HTTPRange, rangeGetContentMD5 *bool) *DownloadStreamOptions { + if o == nil { + return nil + } + return &DownloadStreamOptions{ + AccessConditions: o.AccessConditions, + CpkInfo: o.CpkInfo, + CpkScopeInfo: o.CpkScopeInfo, + Range: rnge, + RangeGetContentMD5: rangeGetContentMD5, + } +} + +// DownloadBufferOptions contains the optional parameters for the DownloadBuffer method. +type DownloadBufferOptions struct { + // Range specifies a range of bytes. The default value is all bytes. + Range HTTPRange + + // BlockSize specifies the block size to use for each parallel download; the default size is DefaultDownloadBlockSize. + BlockSize int64 + + // Progress is a function that is invoked periodically as bytes are received. + Progress func(bytesTransferred int64) + + // BlobAccessConditions indicates the access conditions used when making HTTP GET requests against the blob. + AccessConditions *AccessConditions + + // CpkInfo contains a group of parameters for client provided encryption key. + CpkInfo *CpkInfo + + // CpkScopeInfo contains a group of parameters for client provided encryption scope. + CpkScopeInfo *CpkScopeInfo + + // Concurrency indicates the maximum number of blocks to download in parallel (0=default) + Concurrency uint16 + + // RetryReaderOptionsPerBlock is used when downloading each block. + RetryReaderOptionsPerBlock RetryReaderOptions +} + +// DownloadFileOptions contains the optional parameters for the DownloadFile method. +type DownloadFileOptions struct { + // Range specifies a range of bytes. The default value is all bytes. + Range HTTPRange + + // BlockSize specifies the block size to use for each parallel download; the default size is DefaultDownloadBlockSize. + BlockSize int64 + + // Progress is a function that is invoked periodically as bytes are received. + Progress func(bytesTransferred int64) + + // BlobAccessConditions indicates the access conditions used when making HTTP GET requests against the blob. + AccessConditions *AccessConditions + + // ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data. + CpkInfo *CpkInfo + CpkScopeInfo *CpkScopeInfo + + // Concurrency indicates the maximum number of blocks to download in parallel. The default value is 5. + Concurrency uint16 + + // RetryReaderOptionsPerBlock is used when downloading each block. + RetryReaderOptionsPerBlock RetryReaderOptions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// DeleteOptions contains the optional parameters for the Client.Delete method. +type DeleteOptions struct { + // Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob + // and all of its snapshots. only: Delete only the blob's snapshots and not the blob itself + DeleteSnapshots *DeleteSnapshotsOptionType + AccessConditions *AccessConditions +} + +func (o *DeleteOptions) format() (*generated.BlobClientDeleteOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + basics := generated.BlobClientDeleteOptions{ + DeleteSnapshots: o.DeleteSnapshots, + } + + if o.AccessConditions == nil { + return &basics, nil, nil + } + + return &basics, o.AccessConditions.LeaseAccessConditions, o.AccessConditions.ModifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// UndeleteOptions contains the optional parameters for the Client.Undelete method. +type UndeleteOptions struct { + // placeholder for future options +} + +func (o *UndeleteOptions) format() *generated.BlobClientUndeleteOptions { + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetTierOptions contains the optional parameters for the Client.SetTier method. +type SetTierOptions struct { + // Optional: Indicates the priority with which to rehydrate an archived blob. + RehydratePriority *RehydratePriority + + AccessConditions *AccessConditions +} + +func (o *SetTierOptions) format() (*generated.BlobClientSetTierOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &generated.BlobClientSetTierOptions{RehydratePriority: o.RehydratePriority}, leaseAccessConditions, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetPropertiesOptions contains the optional parameters for the Client.GetProperties method +type GetPropertiesOptions struct { + AccessConditions *AccessConditions + CpkInfo *CpkInfo +} + +func (o *GetPropertiesOptions) format() (*generated.BlobClientGetPropertiesOptions, + *generated.LeaseAccessConditions, *generated.CpkInfo, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return nil, leaseAccessConditions, o.CpkInfo, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetHTTPHeadersOptions contains the optional parameters for the Client.SetHTTPHeaders method. +type SetHTTPHeadersOptions struct { + AccessConditions *AccessConditions +} + +func (o *SetHTTPHeadersOptions) format() (*generated.BlobClientSetHTTPHeadersOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return nil, leaseAccessConditions, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetMetadataOptions provides set of configurations for Set Metadata on blob operation +type SetMetadataOptions struct { + AccessConditions *AccessConditions + CpkInfo *CpkInfo + CpkScopeInfo *CpkScopeInfo +} + +func (o *SetMetadataOptions) format() (*generated.LeaseAccessConditions, *CpkInfo, + *CpkScopeInfo, *ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// CreateSnapshotOptions contains the optional parameters for the Client.CreateSnapshot method. +type CreateSnapshotOptions struct { + Metadata map[string]string + AccessConditions *AccessConditions + CpkInfo *CpkInfo + CpkScopeInfo *CpkScopeInfo +} + +func (o *CreateSnapshotOptions) format() (*generated.BlobClientCreateSnapshotOptions, *generated.CpkInfo, + *generated.CpkScopeInfo, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + + return &generated.BlobClientCreateSnapshotOptions{ + Metadata: o.Metadata, + }, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions, leaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// StartCopyFromURLOptions contains the optional parameters for the Client.StartCopyFromURL method. +type StartCopyFromURLOptions struct { + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + // Specified if a legal hold should be set on the blob. + LegalHold *bool + // Optional. Used to set blob tags in various blob operations. + BlobTags map[string]string + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs + // are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source + // blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. + // See Naming and Referencing Containers, Blobs, and Metadata for more information. + Metadata map[string]string + // Optional: Indicates the priority with which to rehydrate an archived blob. + RehydratePriority *RehydratePriority + // Overrides the sealed state of the destination blob. Service version 2019-12-12 and newer. + SealBlob *bool + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + SourceModifiedAccessConditions *SourceModifiedAccessConditions + + AccessConditions *AccessConditions +} + +func (o *StartCopyFromURLOptions) format() (*generated.BlobClientStartCopyFromURLOptions, + *generated.SourceModifiedAccessConditions, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil, nil, nil + } + + basics := generated.BlobClientStartCopyFromURLOptions{ + BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.BlobTags), + Metadata: o.Metadata, + RehydratePriority: o.RehydratePriority, + SealBlob: o.SealBlob, + Tier: o.Tier, + ImmutabilityPolicyExpiry: o.ImmutabilityPolicyExpiry, + ImmutabilityPolicyMode: o.ImmutabilityPolicyMode, + LegalHold: o.LegalHold, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &basics, o.SourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// AbortCopyFromURLOptions contains the optional parameters for the Client.AbortCopyFromURL method. +type AbortCopyFromURLOptions struct { + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *AbortCopyFromURLOptions) format() (*generated.BlobClientAbortCopyFromURLOptions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil + } + return nil, o.LeaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetTagsOptions contains the optional parameters for the Client.SetTags method. +type SetTagsOptions struct { + // The version id parameter is an opaque DateTime value that, when present, + // specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. + VersionID *string + // Optional header, Specifies the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + // Optional header, Specifies the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte + + AccessConditions *AccessConditions +} + +func (o *SetTagsOptions) format() (*generated.BlobClientSetTagsOptions, *ModifiedAccessConditions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil, nil + } + + options := &generated.BlobClientSetTagsOptions{ + TransactionalContentMD5: o.TransactionalContentMD5, + TransactionalContentCRC64: o.TransactionalContentCRC64, + VersionID: o.VersionID, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return options, modifiedAccessConditions, leaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetTagsOptions contains the optional parameters for the Client.GetTags method. +type GetTagsOptions struct { + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. + Snapshot *string + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string + + BlobAccessConditions *AccessConditions +} + +func (o *GetTagsOptions) format() (*generated.BlobClientGetTagsOptions, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil, nil + } + + options := &generated.BlobClientGetTagsOptions{ + Snapshot: o.Snapshot, + VersionID: o.VersionID, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.BlobAccessConditions) + return options, modifiedAccessConditions, leaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// CopyFromURLOptions contains the optional parameters for the Client.CopyFromURL method. +type CopyFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTags map[string]string + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + // Specified if a legal hold should be set on the blob. + LegalHold *bool + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + SourceModifiedAccessConditions *SourceModifiedAccessConditions + + BlobAccessConditions *AccessConditions +} + +func (o *CopyFromURLOptions) format() (*generated.BlobClientCopyFromURLOptions, *generated.SourceModifiedAccessConditions, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil, nil, nil + } + + options := &generated.BlobClientCopyFromURLOptions{ + BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.BlobTags), + CopySourceAuthorization: o.CopySourceAuthorization, + ImmutabilityPolicyExpiry: o.ImmutabilityPolicyExpiry, + ImmutabilityPolicyMode: o.ImmutabilityPolicyMode, + LegalHold: o.LegalHold, + Metadata: o.Metadata, + SourceContentMD5: o.SourceContentMD5, + Tier: o.Tier, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.BlobAccessConditions) + return options, o.SourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/responses.go new file mode 100644 index 000000000..c13d332d7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/responses.go @@ -0,0 +1,104 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blob + +import ( + "context" + "io" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// DownloadStreamResponse contains the response from the DownloadStream method. +// To read from the stream, read from the Body field, or call the NewRetryReader method. +type DownloadStreamResponse struct { + generated.BlobClientDownloadResponse + ObjectReplicationRules []ObjectReplicationPolicy + + client *Client + getInfo httpGetterInfo + cpkInfo *CpkInfo + cpkScope *CpkScopeInfo +} + +// NewRetryReader constructs new RetryReader stream for reading data. If a connection fails while +// reading, it will make additional requests to reestablish a connection and continue reading. +// Pass nil for options to accept the default options. +// Callers of this method should not access the DowloadStreamResponse.Body field. +func (r *DownloadStreamResponse) NewRetryReader(ctx context.Context, options *RetryReaderOptions) *RetryReader { + if options == nil { + options = &RetryReaderOptions{} + } + + return newRetryReader(ctx, r.Body, r.getInfo, func(ctx context.Context, getInfo httpGetterInfo) (io.ReadCloser, error) { + accessConditions := &AccessConditions{ + ModifiedAccessConditions: &ModifiedAccessConditions{IfMatch: getInfo.ETag}, + } + options := DownloadStreamOptions{ + Range: getInfo.Range, + AccessConditions: accessConditions, + CpkInfo: r.cpkInfo, + CpkScopeInfo: r.cpkScope, + } + resp, err := r.client.DownloadStream(ctx, &options) + if err != nil { + return nil, err + } + return resp.Body, err + }, *options) +} + +// DeleteResponse contains the response from method BlobClient.Delete. +type DeleteResponse = generated.BlobClientDeleteResponse + +// UndeleteResponse contains the response from method BlobClient.Undelete. +type UndeleteResponse = generated.BlobClientUndeleteResponse + +// SetTierResponse contains the response from method BlobClient.SetTier. +type SetTierResponse = generated.BlobClientSetTierResponse + +// GetPropertiesResponse contains the response from method BlobClient.GetProperties. +type GetPropertiesResponse = generated.BlobClientGetPropertiesResponse + +// SetHTTPHeadersResponse contains the response from method BlobClient.SetHTTPHeaders. +type SetHTTPHeadersResponse = generated.BlobClientSetHTTPHeadersResponse + +// SetMetadataResponse contains the response from method BlobClient.SetMetadata. +type SetMetadataResponse = generated.BlobClientSetMetadataResponse + +// CreateSnapshotResponse contains the response from method BlobClient.CreateSnapshot. +type CreateSnapshotResponse = generated.BlobClientCreateSnapshotResponse + +// StartCopyFromURLResponse contains the response from method BlobClient.StartCopyFromURL. +type StartCopyFromURLResponse = generated.BlobClientStartCopyFromURLResponse + +// AbortCopyFromURLResponse contains the response from method BlobClient.AbortCopyFromURL. +type AbortCopyFromURLResponse = generated.BlobClientAbortCopyFromURLResponse + +// SetTagsResponse contains the response from method BlobClient.SetTags. +type SetTagsResponse = generated.BlobClientSetTagsResponse + +// GetTagsResponse contains the response from method BlobClient.GetTags. +type GetTagsResponse = generated.BlobClientGetTagsResponse + +// CopyFromURLResponse contains the response from method BlobClient.CopyFromURL. +type CopyFromURLResponse = generated.BlobClientCopyFromURLResponse + +// AcquireLeaseResponse contains the response from method BlobClient.AcquireLease. +type AcquireLeaseResponse = generated.BlobClientAcquireLeaseResponse + +// BreakLeaseResponse contains the response from method BlobClient.BreakLease. +type BreakLeaseResponse = generated.BlobClientBreakLeaseResponse + +// ChangeLeaseResponse contains the response from method BlobClient.ChangeLease. +type ChangeLeaseResponse = generated.BlobClientChangeLeaseResponse + +// ReleaseLeaseResponse contains the response from method BlobClient.ReleaseLease. +type ReleaseLeaseResponse = generated.BlobClientReleaseLeaseResponse + +// RenewLeaseResponse contains the response from method BlobClient.RenewLease. +type RenewLeaseResponse = generated.BlobClientRenewLeaseResponse diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_retry_reader.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/retry_reader.go similarity index 57% rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_retry_reader.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/retry_reader.go index 3179138f1..cc1b1365d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_retry_reader.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/retry_reader.go @@ -2,57 +2,46 @@ // +build go1.18 // Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. +// Licensed under the MIT License. See License.txt in the project root for license information. -package azblob +package blob import ( "context" "io" "net" - "net/http" "strings" "sync" -) -const CountToEnd = 0 + "github.com/Azure/azure-sdk-for-go/sdk/azcore" +) // HTTPGetter is a function type that refers to a method that performs an HTTP GET operation. -type HTTPGetter func(ctx context.Context, i HTTPGetterInfo) (*http.Response, error) +type httpGetter func(ctx context.Context, i httpGetterInfo) (io.ReadCloser, error) // HTTPGetterInfo is passed to an HTTPGetter function passing it parameters // that should be used to make an HTTP GET request. -type HTTPGetterInfo struct { - // Offset specifies the start offset that should be used when - // creating the HTTP GET request's Range header - Offset int64 - - // Count specifies the count of bytes that should be used to calculate - // the end offset when creating the HTTP GET request's Range header - Count int64 +type httpGetterInfo struct { + Range HTTPRange // ETag specifies the resource's etag that should be used when creating // the HTTP GET request's If-Match header - ETag string + ETag *azcore.ETag } -// FailedReadNotifier is a function type that represents the notification function called when a read fails -type FailedReadNotifier func(failureCount int, lastError error, offset int64, count int64, willRetry bool) - -// RetryReaderOptions contains properties which can help to decide when to do retry. +// RetryReaderOptions configures the retry reader's behavior. +// Zero-value fields will have their specified default values applied during use. +// This allows for modification of a subset of fields. type RetryReaderOptions struct { - // MaxRetryRequests specifies the maximum number of HTTP GET requests that will be made - // while reading from a RetryReader. A value of zero means that no additional HTTP - // GET requests will be made. - MaxRetryRequests int - doInjectError bool - doInjectErrorRound int - injectedError error + // MaxRetries specifies the maximum number of attempts a failed read will be retried + // before producing an error. + // The default value is three. + MaxRetries int32 - // NotifyFailedRead is called, if non-nil, after any failure to read. Expected usage is diagnostic logging. - NotifyFailedRead FailedReadNotifier + // OnFailedRead, when non-nil, is called after any failure to read. Expected usage is diagnostic logging. + OnFailedRead func(failureCount int32, lastError error, rnge HTTPRange, willRetry bool) - // TreatEarlyCloseAsError can be set to true to prevent retries after "read on closed response body". By default, + // EarlyCloseAsError can be set to true to prevent retries after "read on closed response body". By default, // retryReader has the following special behaviour: closing the response body before it is all read is treated as a // retryable error. This is to allow callers to force a retry by closing the body from another goroutine (e.g. if the = // read is too slow, caller may want to force a retry in the hope that the retry will be quicker). If @@ -61,52 +50,59 @@ type RetryReaderOptions struct { // Note that setting TreatEarlyCloseAsError only guarantees that Closing will produce a fatal error if the Close happens // from the same "thread" (goroutine) as Read. Concurrent Close calls from other goroutines may instead produce network errors // which will be retried. - TreatEarlyCloseAsError bool + // The default value is false. + EarlyCloseAsError bool - CpkInfo *CpkInfo - CpkScopeInfo *CpkScopeInfo + doInjectError bool + doInjectErrorRound int32 + injectedError error } -// retryReader implements io.ReaderCloser methods. -// retryReader tries to read from response, and if there is retriable network error +// RetryReader attempts to read from response, and if there is retriable network error // returned during reading, it will retry according to retry reader option through executing // user defined action with provided data to get a new response, and continue the overall reading process // through reading from the new response. -type retryReader struct { - ctx context.Context - info HTTPGetterInfo - countWasBounded bool - o RetryReaderOptions - getter HTTPGetter +// RetryReader implements the io.ReadCloser interface. +type RetryReader struct { + ctx context.Context + info httpGetterInfo + retryReaderOptions RetryReaderOptions + getter httpGetter + countWasBounded bool // we support Close-ing during Reads (from other goroutines), so we protect the shared state, which is response responseMu *sync.Mutex - response *http.Response + response io.ReadCloser } -// NewRetryReader creates a retry reader. -func NewRetryReader(ctx context.Context, initialResponse *http.Response, - info HTTPGetterInfo, o RetryReaderOptions, getter HTTPGetter) io.ReadCloser { - return &retryReader{ - ctx: ctx, - getter: getter, - info: info, - countWasBounded: info.Count != CountToEnd, - response: initialResponse, - responseMu: &sync.Mutex{}, - o: o} +// newRetryReader creates a retry reader. +func newRetryReader(ctx context.Context, initialResponse io.ReadCloser, info httpGetterInfo, getter httpGetter, o RetryReaderOptions) *RetryReader { + if o.MaxRetries < 1 { + o.MaxRetries = 3 + } + return &RetryReader{ + ctx: ctx, + getter: getter, + info: info, + countWasBounded: info.Range.Count != CountToEnd, + response: initialResponse, + responseMu: &sync.Mutex{}, + retryReaderOptions: o, + } } -func (s *retryReader) setResponse(r *http.Response) { +// setResponse function +func (s *RetryReader) setResponse(r io.ReadCloser) { s.responseMu.Lock() defer s.responseMu.Unlock() s.response = r } -func (s *retryReader) Read(p []byte) (n int, err error) { - for try := 0; ; try++ { +// Read from retry reader +func (s *RetryReader) Read(p []byte) (n int, err error) { + for try := int32(0); ; try++ { //fmt.Println(try) // Comment out for debugging. - if s.countWasBounded && s.info.Count == CountToEnd { + if s.countWasBounded && s.info.Range.Count == CountToEnd { // User specified an original count and the remaining bytes are 0, return 0, EOF return 0, io.EOF } @@ -123,12 +119,12 @@ func (s *retryReader) Read(p []byte) (n int, err error) { s.setResponse(newResponse) resp = newResponse } - n, err := resp.Body.Read(p) // Read from the stream (this will return non-nil err if forceRetry is called, from another goroutine, while it is running) + n, err := resp.Read(p) // Read from the stream (this will return non-nil err if forceRetry is called, from another goroutine, while it is running) // Injection mechanism for testing. - if s.o.doInjectError && try == s.o.doInjectErrorRound { - if s.o.injectedError != nil { - err = s.o.injectedError + if s.retryReaderOptions.doInjectError && try == s.retryReaderOptions.doInjectErrorRound { + if s.retryReaderOptions.injectedError != nil { + err = s.retryReaderOptions.injectedError } else { err = &net.DNSError{IsTemporary: true} } @@ -136,9 +132,9 @@ func (s *retryReader) Read(p []byte) (n int, err error) { // We successfully read data or end EOF. if err == nil || err == io.EOF { - s.info.Offset += int64(n) // Increments the start offset in case we need to make a new HTTP request in the future - if s.info.Count != CountToEnd { - s.info.Count -= int64(n) // Decrement the count in case we need to make a new HTTP request in the future + s.info.Range.Offset += int64(n) // Increments the start offset in case we need to make a new HTTP request in the future + if s.info.Range.Count != CountToEnd { + s.info.Range.Count -= int64(n) // Decrement the count in case we need to make a new HTTP request in the future } return n, err // Return the return to the caller } @@ -147,15 +143,15 @@ func (s *retryReader) Read(p []byte) (n int, err error) { s.setResponse(nil) // Our stream is no longer good // Check the retry count and error code, and decide whether to retry. - retriesExhausted := try >= s.o.MaxRetryRequests + retriesExhausted := try >= s.retryReaderOptions.MaxRetries _, isNetError := err.(net.Error) isUnexpectedEOF := err == io.ErrUnexpectedEOF willRetry := (isNetError || isUnexpectedEOF || s.wasRetryableEarlyClose(err)) && !retriesExhausted // Notify, for logging purposes, of any failures - if s.o.NotifyFailedRead != nil { + if s.retryReaderOptions.OnFailedRead != nil { failureCount := try + 1 // because try is zero-based - s.o.NotifyFailedRead(failureCount, err, s.info.Offset, s.info.Count, willRetry) + s.retryReaderOptions.OnFailedRead(failureCount, err, s.info.Range, willRetry) } if willRetry { @@ -174,21 +170,23 @@ func (s *retryReader) Read(p []byte) (n int, err error) { // then there are two different types of error that may happen - either the one one we check for here, // or a net.Error (due to closure of connection). Which one happens depends on timing. We only need this routine // to check for one, since the other is a net.Error, which our main Read retry loop is already handing. -func (s *retryReader) wasRetryableEarlyClose(err error) bool { - if s.o.TreatEarlyCloseAsError { +func (s *RetryReader) wasRetryableEarlyClose(err error) bool { + if s.retryReaderOptions.EarlyCloseAsError { return false // user wants all early closes to be errors, and so not retryable } // unfortunately, http.errReadOnClosedResBody is private, so the best we can do here is to check for its text return strings.HasSuffix(err.Error(), ReadOnClosedBodyMessage) } +// ReadOnClosedBodyMessage of retry reader const ReadOnClosedBodyMessage = "read on closed response body" -func (s *retryReader) Close() error { +// Close retry reader +func (s *RetryReader) Close() error { s.responseMu.Lock() defer s.responseMu.Unlock() - if s.response != nil && s.response.Body != nil { - return s.response.Body.Close() + if s.response != nil { + return s.response.Close() } return nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/utils.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/utils.go new file mode 100644 index 000000000..7b4c8e248 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/utils.go @@ -0,0 +1,79 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blob + +import ( + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" +) + +// ObjectReplicationRules struct +type ObjectReplicationRules struct { + RuleId string + Status string +} + +// ObjectReplicationPolicy are deserialized attributes +type ObjectReplicationPolicy struct { + PolicyId *string + Rules *[]ObjectReplicationRules +} + +// deserializeORSPolicies is utility function to deserialize ORS Policies +func deserializeORSPolicies(policies map[string]string) (objectReplicationPolicies []ObjectReplicationPolicy) { + if policies == nil { + return nil + } + // For source blobs (blobs that have policy ids and rule ids applied to them), + // the header will be formatted as "x-ms-or-_: {Complete, Failed}". + // The value of this header is the status of the replication. + orPolicyStatusHeader := make(map[string]string) + for key, value := range policies { + if strings.Contains(key, "or-") && key != "x-ms-or-policy-id" { + orPolicyStatusHeader[key] = value + } + } + + parsedResult := make(map[string][]ObjectReplicationRules) + for key, value := range orPolicyStatusHeader { + policyAndRuleIDs := strings.Split(strings.Split(key, "or-")[1], "_") + policyId, ruleId := policyAndRuleIDs[0], policyAndRuleIDs[1] + + parsedResult[policyId] = append(parsedResult[policyId], ObjectReplicationRules{RuleId: ruleId, Status: value}) + } + + for policyId, rules := range parsedResult { + objectReplicationPolicies = append(objectReplicationPolicies, ObjectReplicationPolicy{ + PolicyId: &policyId, + Rules: &rules, + }) + } + return +} + +// ParseHTTPHeaders parses GetPropertiesResponse and returns HTTPHeaders +func ParseHTTPHeaders(resp GetPropertiesResponse) HTTPHeaders { + return HTTPHeaders{ + BlobContentType: resp.ContentType, + BlobContentEncoding: resp.ContentEncoding, + BlobContentLanguage: resp.ContentLanguage, + BlobContentDisposition: resp.ContentDisposition, + BlobCacheControl: resp.CacheControl, + BlobContentMD5: resp.ContentMD5, + } +} + +// URLParts object represents the components that make up an Azure Storage Container/Blob URL. +// NOTE: Changing any SAS-related field requires computing a new SAS signature. +type URLParts = sas.URLParts + +// ParseURL parses a URL initializing URLParts' fields including any SAS-related & snapshot query parameters. Any other +// query parameters remain in the UnparsedParams field. This method overwrites all fields in the URLParts object. +func ParseURL(u string) (URLParts, error) { + return sas.ParseURL(u) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go new file mode 100644 index 000000000..6a1db8045 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go @@ -0,0 +1,150 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package bloberror + +import ( + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// HasCode returns true if the provided error is an *azcore.ResponseError +// with its ErrorCode field equal to one of the specified Codes. +func HasCode(err error, codes ...Code) bool { + var respErr *azcore.ResponseError + if !errors.As(err, &respErr) { + return false + } + + for _, code := range codes { + if respErr.ErrorCode == string(code) { + return true + } + } + + return false +} + +// Code - Error codes returned by the service +type Code = generated.StorageErrorCode + +const ( + AccountAlreadyExists Code = "AccountAlreadyExists" + AccountBeingCreated Code = "AccountBeingCreated" + AccountIsDisabled Code = "AccountIsDisabled" + AppendPositionConditionNotMet Code = "AppendPositionConditionNotMet" + AuthenticationFailed Code = "AuthenticationFailed" + AuthorizationFailure Code = "AuthorizationFailure" + AuthorizationPermissionMismatch Code = "AuthorizationPermissionMismatch" + AuthorizationProtocolMismatch Code = "AuthorizationProtocolMismatch" + AuthorizationResourceTypeMismatch Code = "AuthorizationResourceTypeMismatch" + AuthorizationServiceMismatch Code = "AuthorizationServiceMismatch" + AuthorizationSourceIPMismatch Code = "AuthorizationSourceIPMismatch" + BlobAlreadyExists Code = "BlobAlreadyExists" + BlobArchived Code = "BlobArchived" + BlobBeingRehydrated Code = "BlobBeingRehydrated" + BlobImmutableDueToPolicy Code = "BlobImmutableDueToPolicy" + BlobNotArchived Code = "BlobNotArchived" + BlobNotFound Code = "BlobNotFound" + BlobOverwritten Code = "BlobOverwritten" + BlobTierInadequateForContentLength Code = "BlobTierInadequateForContentLength" + BlobUsesCustomerSpecifiedEncryption Code = "BlobUsesCustomerSpecifiedEncryption" + BlockCountExceedsLimit Code = "BlockCountExceedsLimit" + BlockListTooLong Code = "BlockListTooLong" + CannotChangeToLowerTier Code = "CannotChangeToLowerTier" + CannotVerifyCopySource Code = "CannotVerifyCopySource" + ConditionHeadersNotSupported Code = "ConditionHeadersNotSupported" + ConditionNotMet Code = "ConditionNotMet" + ContainerAlreadyExists Code = "ContainerAlreadyExists" + ContainerBeingDeleted Code = "ContainerBeingDeleted" + ContainerDisabled Code = "ContainerDisabled" + ContainerNotFound Code = "ContainerNotFound" + ContentLengthLargerThanTierLimit Code = "ContentLengthLargerThanTierLimit" + CopyAcrossAccountsNotSupported Code = "CopyAcrossAccountsNotSupported" + CopyIDMismatch Code = "CopyIdMismatch" + EmptyMetadataKey Code = "EmptyMetadataKey" + FeatureVersionMismatch Code = "FeatureVersionMismatch" + IncrementalCopyBlobMismatch Code = "IncrementalCopyBlobMismatch" + IncrementalCopyOfEralierVersionSnapshotNotAllowed Code = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" + IncrementalCopySourceMustBeSnapshot Code = "IncrementalCopySourceMustBeSnapshot" + InfiniteLeaseDurationRequired Code = "InfiniteLeaseDurationRequired" + InsufficientAccountPermissions Code = "InsufficientAccountPermissions" + InternalError Code = "InternalError" + InvalidAuthenticationInfo Code = "InvalidAuthenticationInfo" + InvalidBlobOrBlock Code = "InvalidBlobOrBlock" + InvalidBlobTier Code = "InvalidBlobTier" + InvalidBlobType Code = "InvalidBlobType" + InvalidBlockID Code = "InvalidBlockId" + InvalidBlockList Code = "InvalidBlockList" + InvalidHTTPVerb Code = "InvalidHttpVerb" + InvalidHeaderValue Code = "InvalidHeaderValue" + InvalidInput Code = "InvalidInput" + InvalidMD5 Code = "InvalidMd5" + InvalidMetadata Code = "InvalidMetadata" + InvalidOperation Code = "InvalidOperation" + InvalidPageRange Code = "InvalidPageRange" + InvalidQueryParameterValue Code = "InvalidQueryParameterValue" + InvalidRange Code = "InvalidRange" + InvalidResourceName Code = "InvalidResourceName" + InvalidSourceBlobType Code = "InvalidSourceBlobType" + InvalidSourceBlobURL Code = "InvalidSourceBlobUrl" + InvalidURI Code = "InvalidUri" + InvalidVersionForPageBlobOperation Code = "InvalidVersionForPageBlobOperation" + InvalidXMLDocument Code = "InvalidXmlDocument" + InvalidXMLNodeValue Code = "InvalidXmlNodeValue" + LeaseAlreadyBroken Code = "LeaseAlreadyBroken" + LeaseAlreadyPresent Code = "LeaseAlreadyPresent" + LeaseIDMismatchWithBlobOperation Code = "LeaseIdMismatchWithBlobOperation" + LeaseIDMismatchWithContainerOperation Code = "LeaseIdMismatchWithContainerOperation" + LeaseIDMismatchWithLeaseOperation Code = "LeaseIdMismatchWithLeaseOperation" + LeaseIDMissing Code = "LeaseIdMissing" + LeaseIsBreakingAndCannotBeAcquired Code = "LeaseIsBreakingAndCannotBeAcquired" + LeaseIsBreakingAndCannotBeChanged Code = "LeaseIsBreakingAndCannotBeChanged" + LeaseIsBrokenAndCannotBeRenewed Code = "LeaseIsBrokenAndCannotBeRenewed" + LeaseLost Code = "LeaseLost" + LeaseNotPresentWithBlobOperation Code = "LeaseNotPresentWithBlobOperation" + LeaseNotPresentWithContainerOperation Code = "LeaseNotPresentWithContainerOperation" + LeaseNotPresentWithLeaseOperation Code = "LeaseNotPresentWithLeaseOperation" + MD5Mismatch Code = "Md5Mismatch" + MaxBlobSizeConditionNotMet Code = "MaxBlobSizeConditionNotMet" + MetadataTooLarge Code = "MetadataTooLarge" + MissingContentLengthHeader Code = "MissingContentLengthHeader" + MissingRequiredHeader Code = "MissingRequiredHeader" + MissingRequiredQueryParameter Code = "MissingRequiredQueryParameter" + MissingRequiredXMLNode Code = "MissingRequiredXmlNode" + MultipleConditionHeadersNotSupported Code = "MultipleConditionHeadersNotSupported" + NoAuthenticationInformation Code = "NoAuthenticationInformation" + NoPendingCopyOperation Code = "NoPendingCopyOperation" + OperationNotAllowedOnIncrementalCopyBlob Code = "OperationNotAllowedOnIncrementalCopyBlob" + OperationTimedOut Code = "OperationTimedOut" + OutOfRangeInput Code = "OutOfRangeInput" + OutOfRangeQueryParameterValue Code = "OutOfRangeQueryParameterValue" + PendingCopyOperation Code = "PendingCopyOperation" + PreviousSnapshotCannotBeNewer Code = "PreviousSnapshotCannotBeNewer" + PreviousSnapshotNotFound Code = "PreviousSnapshotNotFound" + PreviousSnapshotOperationNotSupported Code = "PreviousSnapshotOperationNotSupported" + RequestBodyTooLarge Code = "RequestBodyTooLarge" + RequestURLFailedToParse Code = "RequestUrlFailedToParse" + ResourceAlreadyExists Code = "ResourceAlreadyExists" + ResourceNotFound Code = "ResourceNotFound" + ResourceTypeMismatch Code = "ResourceTypeMismatch" + SequenceNumberConditionNotMet Code = "SequenceNumberConditionNotMet" + SequenceNumberIncrementTooLarge Code = "SequenceNumberIncrementTooLarge" + ServerBusy Code = "ServerBusy" + SnapshotCountExceeded Code = "SnapshotCountExceeded" + SnapshotOperationRateExceeded Code = "SnapshotOperationRateExceeded" + SnapshotsPresent Code = "SnapshotsPresent" + SourceConditionNotMet Code = "SourceConditionNotMet" + SystemInUse Code = "SystemInUse" + TargetConditionNotMet Code = "TargetConditionNotMet" + UnauthorizedBlobOverwrite Code = "UnauthorizedBlobOverwrite" + UnsupportedHTTPVerb Code = "UnsupportedHttpVerb" + UnsupportedHeader Code = "UnsupportedHeader" + UnsupportedQueryParameter Code = "UnsupportedQueryParameter" + UnsupportedXMLNode Code = "UnsupportedXmlNode" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/chunkwriting.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/chunkwriting.go similarity index 82% rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/chunkwriting.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/chunkwriting.go index d5ccdfb40..16927ecf8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/chunkwriting.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/chunkwriting.go @@ -2,9 +2,9 @@ // +build go1.18 // Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. +// Licensed under the MIT License. See License.txt in the project root for license information. -package azblob +package blockblob import ( "bytes" @@ -13,19 +13,19 @@ import ( "encoding/binary" "errors" "fmt" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal" "io" "sync" "sync/atomic" "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" ) // blockWriter provides methods to upload blocks that represent a file to a server and commit them. // This allows us to provide a local implementation that fakes the server for hermetic testing. type blockWriter interface { - StageBlock(context.Context, string, io.ReadSeekCloser, *BlockBlobStageBlockOptions) (BlockBlobStageBlockResponse, error) - CommitBlockList(context.Context, []string, *BlockBlobCommitBlockListOptions) (BlockBlobCommitBlockListResponse, error) + StageBlock(context.Context, string, io.ReadSeekCloser, *StageBlockOptions) (StageBlockResponse, error) + CommitBlockList(context.Context, []string, *CommitBlockListOptions) (CommitBlockListResponse, error) } // copyFromReader copies a source io.Reader to blob storage using concurrent uploads. @@ -36,9 +36,9 @@ type blockWriter interface { // well, 4 MiB or 8 MiB, and auto-scale to as many goroutines within the memory limit. This gives a single dial to tweak and we can // choose a max value for the memory setting based on internal transfers within Azure (which will give us the maximum throughput model). // We can even provide a utility to dial this number in for customer networks to optimize their copies. -func copyFromReader(ctx context.Context, from io.Reader, to blockWriter, o UploadStreamOptions) (BlockBlobCommitBlockListResponse, error) { - if err := o.defaults(); err != nil { - return BlockBlobCommitBlockListResponse{}, err +func copyFromReader(ctx context.Context, from io.Reader, to blockWriter, o UploadStreamOptions) (CommitBlockListResponse, error) { + if err := o.format(); err != nil { + return CommitBlockListResponse{}, err } ctx, cancel := context.WithCancel(ctx) @@ -47,7 +47,7 @@ func copyFromReader(ctx context.Context, from io.Reader, to blockWriter, o Uploa var err error generatedUuid, err := uuid.New() if err != nil { - return BlockBlobCommitBlockListResponse{}, err + return CommitBlockListResponse{}, err } cp := &copier{ @@ -68,12 +68,12 @@ func copyFromReader(ctx context.Context, from io.Reader, to blockWriter, o Uploa } // If the error is not EOF, then we have a problem. if err != nil && !errors.Is(err, io.EOF) { - return BlockBlobCommitBlockListResponse{}, err + return CommitBlockListResponse{}, err } // Close out our upload. if err := cp.close(); err != nil { - return BlockBlobCommitBlockListResponse{}, err + return CommitBlockListResponse{}, err } return cp.result, nil @@ -109,9 +109,10 @@ type copier struct { wg sync.WaitGroup // result holds the final result from blob storage after we have submitted all chunks. - result BlockBlobCommitBlockListResponse + result CommitBlockListResponse } +// copierChunk contains buffer type copierChunk struct { buffer []byte id string @@ -136,17 +137,17 @@ func (c *copier) sendChunk() error { return err } - buffer := c.o.TransferManager.Get() + buffer := c.o.transferManager.Get() if len(buffer) == 0 { - return fmt.Errorf("TransferManager returned a 0 size buffer, this is a bug in the manager") + return fmt.Errorf("transferManager returned a 0 size buffer, this is a bug in the manager") } n, err := io.ReadFull(c.reader, buffer) if n > 0 { - // Some data was read, schedule the write. + // Some data was read, schedule the Write. id := c.id.next() c.wg.Add(1) - c.o.TransferManager.Run( + c.o.transferManager.Run( func() { defer c.wg.Done() c.write(copierChunk{buffer: buffer, id: id, length: n}) @@ -154,7 +155,7 @@ func (c *copier) sendChunk() error { ) } else { // Return the unused buffer to the manager. - c.o.TransferManager.Put(buffer) + c.o.transferManager.Put(buffer) } if err == nil { @@ -172,16 +173,20 @@ func (c *copier) sendChunk() error { // write uploads a chunk to blob storage. func (c *copier) write(chunk copierChunk) { - defer c.o.TransferManager.Put(chunk.buffer) + defer c.o.transferManager.Put(chunk.buffer) if err := c.ctx.Err(); err != nil { return } stageBlockOptions := c.o.getStageBlockOptions() - _, err := c.to.StageBlock(c.ctx, chunk.id, internal.NopCloser(bytes.NewReader(chunk.buffer[:chunk.length])), stageBlockOptions) + _, err := c.to.StageBlock(c.ctx, chunk.id, shared.NopCloser(bytes.NewReader(chunk.buffer[:chunk.length])), stageBlockOptions) if err != nil { - c.errCh <- fmt.Errorf("write error: %w", err) - return + select { + case c.errCh <- err: + // failed to stage block, cancel the copy + default: + // don't block the goroutine if there's a pending error + } } } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go new file mode 100644 index 000000000..4af33356e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go @@ -0,0 +1,481 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blockblob + +import ( + "bytes" + "context" + "encoding/base64" + "errors" + "io" + "os" + "sync" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" +) + +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions struct { + azcore.ClientOptions +} + +// Client defines a set of operations applicable to block blobs. +type Client base.CompositeClient[generated.BlobClient, generated.BlockBlobClient] + +// NewClient creates an instance of Client with the specified values. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt +// - cred - an Azure AD credential, typically obtained via the azidentity module +// - options - client options; pass nil to accept the default values +func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { + authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewBlockBlobClient(blobURL, pl, nil)), nil +} + +// NewClientWithNoCredential creates an instance of Client with the specified values. +// This is used to anonymously access a blob or with a shared access signature (SAS) token. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt? +// - options - client options; pass nil to accept the default values +func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) { + conOptions := shared.GetClientOptions(options) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewBlockBlobClient(blobURL, pl, nil)), nil +} + +// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt +// - cred - a SharedKeyCredential created with the matching blob's storage account and access key +// - options - client options; pass nil to accept the default values +func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCredential, options *ClientOptions) (*Client, error) { + authPolicy := exported.NewSharedKeyCredPolicy(cred) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewBlockBlobClient(blobURL, pl, cred)), nil +} + +// NewClientFromConnectionString creates an instance of Client with the specified values. +// - connectionString - a connection string for the desired storage account +// - containerName - the name of the container within the storage account +// - blobName - the name of the blob within the container +// - options - client options; pass nil to accept the default values +func NewClientFromConnectionString(connectionString, containerName, blobName string, options *ClientOptions) (*Client, error) { + parsed, err := shared.ParseConnectionString(connectionString) + if err != nil { + return nil, err + } + parsed.ServiceURL = runtime.JoinPaths(parsed.ServiceURL, containerName, blobName) + + if parsed.AccountKey != "" && parsed.AccountName != "" { + credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey) + if err != nil { + return nil, err + } + return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options) + } + + return NewClientWithNoCredential(parsed.ServiceURL, options) +} + +func (bb *Client) sharedKey() *blob.SharedKeyCredential { + return base.SharedKeyComposite((*base.CompositeClient[generated.BlobClient, generated.BlockBlobClient])(bb)) +} + +func (bb *Client) generated() *generated.BlockBlobClient { + _, blockBlob := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.BlockBlobClient])(bb)) + return blockBlob +} + +// URL returns the URL endpoint used by the Client object. +func (bb *Client) URL() string { + return bb.generated().Endpoint() +} + +// BlobClient returns the embedded blob client for this AppendBlob client. +func (bb *Client) BlobClient() *blob.Client { + blobClient, _ := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.BlockBlobClient])(bb)) + return (*blob.Client)(blobClient) +} + +// WithSnapshot creates a new Client object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (bb *Client) WithSnapshot(snapshot string) (*Client, error) { + p, err := blob.ParseURL(bb.URL()) + if err != nil { + return nil, err + } + p.Snapshot = snapshot + + return (*Client)(base.NewBlockBlobClient(p.String(), bb.generated().Pipeline(), bb.sharedKey())), nil +} + +// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. +// Pass "" to remove the versionID returning a URL to the base blob. +func (bb *Client) WithVersionID(versionID string) (*Client, error) { + p, err := blob.ParseURL(bb.URL()) + if err != nil { + return nil, err + } + p.VersionID = versionID + + return (*Client)(base.NewBlockBlobClient(p.String(), bb.generated().Pipeline(), bb.sharedKey())), nil +} + +// Upload creates a new block blob or overwrites an existing block blob. +// Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not +// supported with Upload; the content of the existing blob is overwritten with the new content. To +// perform a partial update of a block blob, use StageBlock and CommitBlockList. +// This method panics if the stream is not at position 0. +// Note that the http client closes the body stream after the request is sent to the service. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. +func (bb *Client) Upload(ctx context.Context, body io.ReadSeekCloser, options *UploadOptions) (UploadResponse, error) { + count, err := shared.ValidateSeekableStreamAt0AndGetCount(body) + if err != nil { + return UploadResponse{}, err + } + + opts, httpHeaders, leaseInfo, cpkV, cpkN, accessConditions := options.format() + + resp, err := bb.generated().Upload(ctx, count, body, opts, httpHeaders, leaseInfo, cpkV, cpkN, accessConditions) + return resp, err +} + +// StageBlock uploads the specified block to the block blob's "staging area" to be later committed by a call to CommitBlockList. +// Note that the http client closes the body stream after the request is sent to the service. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block. +func (bb *Client) StageBlock(ctx context.Context, base64BlockID string, body io.ReadSeekCloser, options *StageBlockOptions) (StageBlockResponse, error) { + count, err := shared.ValidateSeekableStreamAt0AndGetCount(body) + if err != nil { + return StageBlockResponse{}, err + } + + opts, leaseAccessConditions, cpkInfo, cpkScopeInfo := options.format() + + resp, err := bb.generated().StageBlock(ctx, base64BlockID, count, body, opts, leaseAccessConditions, cpkInfo, cpkScopeInfo) + return resp, err +} + +// StageBlockFromURL copies the specified block from a source URL to the block blob's "staging area" to be later committed by a call to CommitBlockList. +// If count is CountToEnd (0), then data is read from specified offset to the end. +// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-from-url. +func (bb *Client) StageBlockFromURL(ctx context.Context, base64BlockID string, sourceURL string, + contentLength int64, options *StageBlockFromURLOptions) (StageBlockFromURLResponse, error) { + + stageBlockFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions := options.format() + + resp, err := bb.generated().StageBlockFromURL(ctx, base64BlockID, contentLength, sourceURL, stageBlockFromURLOptions, + cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions) + + return resp, err +} + +// CommitBlockList writes a blob by specifying the list of block IDs that make up the blob. +// In order to be written as part of a blob, a block must have been successfully written +// to the server in a prior PutBlock operation. You can call PutBlockList to update a blob +// by uploading only those blocks that have changed, then committing the new and existing +// blocks together. Any blocks not specified in the block list and permanently deleted. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block-list. +func (bb *Client) CommitBlockList(ctx context.Context, base64BlockIDs []string, options *CommitBlockListOptions) (CommitBlockListResponse, error) { + // this is a code smell in the generated code + blockIds := make([]*string, len(base64BlockIDs)) + for k, v := range base64BlockIDs { + blockIds[k] = to.Ptr(v) + } + + blockLookupList := generated.BlockLookupList{Latest: blockIds} + + var commitOptions *generated.BlockBlobClientCommitBlockListOptions + var headers *generated.BlobHTTPHeaders + var leaseAccess *blob.LeaseAccessConditions + var cpkInfo *generated.CpkInfo + var cpkScope *generated.CpkScopeInfo + var modifiedAccess *generated.ModifiedAccessConditions + + if options != nil { + commitOptions = &generated.BlockBlobClientCommitBlockListOptions{ + BlobTagsString: shared.SerializeBlobTagsToStrPtr(options.Tags), + Metadata: options.Metadata, + RequestID: options.RequestID, + Tier: options.Tier, + Timeout: options.Timeout, + TransactionalContentCRC64: options.TransactionalContentCRC64, + TransactionalContentMD5: options.TransactionalContentMD5, + } + + headers = options.HTTPHeaders + leaseAccess, modifiedAccess = exported.FormatBlobAccessConditions(options.AccessConditions) + cpkInfo = options.CpkInfo + cpkScope = options.CpkScopeInfo + } + + resp, err := bb.generated().CommitBlockList(ctx, blockLookupList, commitOptions, headers, leaseAccess, cpkInfo, cpkScope, modifiedAccess) + return resp, err +} + +// GetBlockList returns the list of blocks that have been uploaded as part of a block blob using the specified block list filter. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-block-list. +func (bb *Client) GetBlockList(ctx context.Context, listType BlockListType, options *GetBlockListOptions) (GetBlockListResponse, error) { + o, lac, mac := options.format() + + resp, err := bb.generated().GetBlockList(ctx, listType, o, lac, mac) + + return resp, err +} + +// Redeclared APIs ----- Copy over to Append blob and Page blob as well. + +// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. +// Note that deleting a blob also deletes all its snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob. +func (bb *Client) Delete(ctx context.Context, o *blob.DeleteOptions) (blob.DeleteResponse, error) { + return bb.BlobClient().Delete(ctx, o) +} + +// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob. +func (bb *Client) Undelete(ctx context.Context, o *blob.UndeleteOptions) (blob.UndeleteResponse, error) { + return bb.BlobClient().Undelete(ctx, o) +} + +// SetTier operation sets the tier on a blob. The operation is allowed on a page +// blob in a premium storage account and on a block blob in a blob storage account (locally +// redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and +// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation +// does not update the blob's ETag. +// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers. +func (bb *Client) SetTier(ctx context.Context, tier blob.AccessTier, o *blob.SetTierOptions) (blob.SetTierResponse, error) { + return bb.BlobClient().SetTier(ctx, tier, o) +} + +// GetProperties returns the blob's properties. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties. +func (bb *Client) GetProperties(ctx context.Context, o *blob.GetPropertiesOptions) (blob.GetPropertiesResponse, error) { + return bb.BlobClient().GetProperties(ctx, o) +} + +// SetHTTPHeaders changes a blob's HTTP headers. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. +func (bb *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) { + return bb.BlobClient().SetHTTPHeaders(ctx, HTTPHeaders, o) +} + +// SetMetadata changes a blob's metadata. +// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata. +func (bb *Client) SetMetadata(ctx context.Context, metadata map[string]string, o *blob.SetMetadataOptions) (blob.SetMetadataResponse, error) { + return bb.BlobClient().SetMetadata(ctx, metadata, o) +} + +// CreateSnapshot creates a read-only snapshot of a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob. +func (bb *Client) CreateSnapshot(ctx context.Context, o *blob.CreateSnapshotOptions) (blob.CreateSnapshotResponse, error) { + return bb.BlobClient().CreateSnapshot(ctx, o) +} + +// StartCopyFromURL copies the data at the source URL to a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob. +func (bb *Client) StartCopyFromURL(ctx context.Context, copySource string, o *blob.StartCopyFromURLOptions) (blob.StartCopyFromURLResponse, error) { + return bb.BlobClient().StartCopyFromURL(ctx, copySource, o) +} + +// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob. +func (bb *Client) AbortCopyFromURL(ctx context.Context, copyID string, o *blob.AbortCopyFromURLOptions) (blob.AbortCopyFromURLResponse, error) { + return bb.BlobClient().AbortCopyFromURL(ctx, copyID, o) +} + +// SetTags operation enables users to set tags on a blob or specific blob version, but not snapshot. +// Each call to this operation replaces all existing tags attached to the blob. +// To remove all tags from the blob, call this operation with no tags set. +// https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tags +func (bb *Client) SetTags(ctx context.Context, tags map[string]string, o *blob.SetTagsOptions) (blob.SetTagsResponse, error) { + return bb.BlobClient().SetTags(ctx, tags, o) +} + +// GetTags operation enables users to get tags on a blob or specific blob version, or snapshot. +// https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-tags +func (bb *Client) GetTags(ctx context.Context, o *blob.GetTagsOptions) (blob.GetTagsResponse, error) { + return bb.BlobClient().GetTags(ctx, o) +} + +// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB. +// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url. +func (bb *Client) CopyFromURL(ctx context.Context, copySource string, o *blob.CopyFromURLOptions) (blob.CopyFromURLResponse, error) { + return bb.BlobClient().CopyFromURL(ctx, copySource, o) +} + +// Concurrent Upload Functions ----------------------------------------------------------------------------------------- + +// uploadFromReader uploads a buffer in blocks to a block blob. +func (bb *Client) uploadFromReader(ctx context.Context, reader io.ReaderAt, readerSize int64, o *uploadFromReaderOptions) (uploadFromReaderResponse, error) { + if o.BlockSize == 0 { + // If bufferSize > (MaxStageBlockBytes * MaxBlocks), then error + if readerSize > MaxStageBlockBytes*MaxBlocks { + return uploadFromReaderResponse{}, errors.New("buffer is too large to upload to a block blob") + } + // If bufferSize <= MaxUploadBlobBytes, then Upload should be used with just 1 I/O request + if readerSize <= MaxUploadBlobBytes { + o.BlockSize = MaxUploadBlobBytes // Default if unspecified + } else { + if remainder := readerSize % MaxBlocks; remainder > 0 { + // ensure readerSize is a multiple of MaxBlocks + readerSize += (MaxBlocks - remainder) + } + o.BlockSize = readerSize / MaxBlocks // buffer / max blocks = block size to use all 50,000 blocks + if o.BlockSize < blob.DefaultDownloadBlockSize { // If the block size is smaller than 4MB, round up to 4MB + o.BlockSize = blob.DefaultDownloadBlockSize + } + // StageBlock will be called with blockSize blocks and a Concurrency of (BufferSize / BlockSize). + } + } + + if readerSize <= MaxUploadBlobBytes { + // If the size can fit in 1 Upload call, do it this way + var body io.ReadSeeker = io.NewSectionReader(reader, 0, readerSize) + if o.Progress != nil { + body = streaming.NewRequestProgress(shared.NopCloser(body), o.Progress) + } + + uploadBlockBlobOptions := o.getUploadBlockBlobOptions() + resp, err := bb.Upload(ctx, shared.NopCloser(body), uploadBlockBlobOptions) + + return toUploadReaderAtResponseFromUploadResponse(resp), err + } + + var numBlocks = uint16(((readerSize - 1) / o.BlockSize) + 1) + if numBlocks > MaxBlocks { + // prevent any math bugs from attempting to upload too many blocks which will always fail + return uploadFromReaderResponse{}, errors.New("block limit exceeded") + } + + blockIDList := make([]string, numBlocks) // Base-64 encoded block IDs + progress := int64(0) + progressLock := &sync.Mutex{} + + err := shared.DoBatchTransfer(ctx, &shared.BatchTransferOptions{ + OperationName: "uploadFromReader", + TransferSize: readerSize, + ChunkSize: o.BlockSize, + Concurrency: o.Concurrency, + Operation: func(offset int64, count int64, ctx context.Context) error { + // This function is called once per block. + // It is passed this block's offset within the buffer and its count of bytes + // Prepare to read the proper block/section of the buffer + var body io.ReadSeeker = io.NewSectionReader(reader, offset, count) + blockNum := offset / o.BlockSize + if o.Progress != nil { + blockProgress := int64(0) + body = streaming.NewRequestProgress(shared.NopCloser(body), + func(bytesTransferred int64) { + diff := bytesTransferred - blockProgress + blockProgress = bytesTransferred + progressLock.Lock() // 1 goroutine at a time gets progress report + progress += diff + o.Progress(progress) + progressLock.Unlock() + }) + } + + // Block IDs are unique values to avoid issue if 2+ clients are uploading blocks + // at the same time causing PutBlockList to get a mix of blocks from all the clients. + generatedUuid, err := uuid.New() + if err != nil { + return err + } + blockIDList[blockNum] = base64.StdEncoding.EncodeToString([]byte(generatedUuid.String())) + stageBlockOptions := o.getStageBlockOptions() + _, err = bb.StageBlock(ctx, blockIDList[blockNum], shared.NopCloser(body), stageBlockOptions) + return err + }, + }) + if err != nil { + return uploadFromReaderResponse{}, err + } + // All put blocks were successful, call Put Block List to finalize the blob + commitBlockListOptions := o.getCommitBlockListOptions() + resp, err := bb.CommitBlockList(ctx, blockIDList, commitBlockListOptions) + + return toUploadReaderAtResponseFromCommitBlockListResponse(resp), err +} + +// UploadBuffer uploads a buffer in blocks to a block blob. +func (bb *Client) UploadBuffer(ctx context.Context, buffer []byte, o *UploadBufferOptions) (UploadBufferResponse, error) { + uploadOptions := uploadFromReaderOptions{} + if o != nil { + uploadOptions = *o + } + return bb.uploadFromReader(ctx, bytes.NewReader(buffer), int64(len(buffer)), &uploadOptions) +} + +// UploadFile uploads a file in blocks to a block blob. +func (bb *Client) UploadFile(ctx context.Context, file *os.File, o *UploadFileOptions) (UploadFileResponse, error) { + stat, err := file.Stat() + if err != nil { + return uploadFromReaderResponse{}, err + } + uploadOptions := uploadFromReaderOptions{} + if o != nil { + uploadOptions = *o + } + return bb.uploadFromReader(ctx, file, stat.Size(), &uploadOptions) +} + +// UploadStream copies the file held in io.Reader to the Blob at blockBlobClient. +// A Context deadline or cancellation will cause this to error. +func (bb *Client) UploadStream(ctx context.Context, body io.Reader, o *UploadStreamOptions) (UploadStreamResponse, error) { + if err := o.format(); err != nil { + return CommitBlockListResponse{}, err + } + + if o == nil { + o = &UploadStreamOptions{} + } + + // If we used the default manager, we need to close it. + if o.transferMangerNotSet { + defer o.transferManager.Close() + } + + result, err := copyFromReader(ctx, body, bb, *o) + if err != nil { + return CommitBlockListResponse{}, err + } + + return result, nil +} + +// Concurrent Download Functions ----------------------------------------------------------------------------------------- + +// DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob. +func (bb *Client) DownloadStream(ctx context.Context, o *blob.DownloadStreamOptions) (blob.DownloadStreamResponse, error) { + return bb.BlobClient().DownloadStream(ctx, o) +} + +// DownloadBuffer downloads an Azure blob to a buffer with parallel. +func (bb *Client) DownloadBuffer(ctx context.Context, buffer []byte, o *blob.DownloadBufferOptions) (int64, error) { + return bb.BlobClient().DownloadBuffer(ctx, shared.NewBytesWriter(buffer), o) +} + +// DownloadFile downloads an Azure blob to a local file. +// The file would be truncated if the size doesn't match. +func (bb *Client) DownloadFile(ctx context.Context, file *os.File, o *blob.DownloadFileOptions) (int64, error) { + return bb.BlobClient().DownloadFile(ctx, file, o) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/constants.go new file mode 100644 index 000000000..46de1ede7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/constants.go @@ -0,0 +1,40 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blockblob + +import "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + +// nolint +const ( + // CountToEnd specifies the end of the file + CountToEnd = 0 + + _1MiB = 1024 * 1024 + + // MaxUploadBlobBytes indicates the maximum number of bytes that can be sent in a call to Upload. + MaxUploadBlobBytes = 256 * 1024 * 1024 // 256MB + + // MaxStageBlockBytes indicates the maximum number of bytes that can be sent in a call to StageBlock. + MaxStageBlockBytes = 4000 * 1024 * 1024 // 4GB + + // MaxBlocks indicates the maximum number of blocks allowed in a block blob. + MaxBlocks = 50000 +) + +// BlockListType defines values for BlockListType +type BlockListType = generated.BlockListType + +const ( + BlockListTypeCommitted BlockListType = generated.BlockListTypeCommitted + BlockListTypeUncommitted BlockListType = generated.BlockListTypeUncommitted + BlockListTypeAll BlockListType = generated.BlockListTypeAll +) + +// PossibleBlockListTypeValues returns the possible values for the BlockListType const type. +func PossibleBlockListTypeValues() []BlockListType { + return generated.PossibleBlockListTypeValues() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/models.go new file mode 100644 index 000000000..1e9314364 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/models.go @@ -0,0 +1,311 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blockblob + +import ( + "fmt" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" +) + +// Type Declarations --------------------------------------------------------------------- + +// Block - Represents a single block in a block blob. It describes the block's ID and size. +type Block = generated.Block + +// BlockList - type of blocklist (committed/uncommitted) +type BlockList = generated.BlockList + +// Request Model Declaration ------------------------------------------------------------------------------------------- + +// UploadOptions contains the optional parameters for the Client.Upload method. +type UploadOptions struct { + // Optional. Used to set blob tags in various blob operations. + Tags map[string]string + + // Optional. Specifies a user-defined name-value pair associated with the blob. + Metadata map[string]string + + // Optional. Indicates the tier to be set on the blob. + Tier *blob.AccessTier + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte + + HTTPHeaders *blob.HTTPHeaders + CpkInfo *blob.CpkInfo + CpkScopeInfo *blob.CpkScopeInfo + AccessConditions *blob.AccessConditions +} + +func (o *UploadOptions) format() (*generated.BlockBlobClientUploadOptions, *generated.BlobHTTPHeaders, *generated.LeaseAccessConditions, + *generated.CpkInfo, *generated.CpkScopeInfo, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil, nil + } + + basics := generated.BlockBlobClientUploadOptions{ + BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.Tags), + Metadata: o.Metadata, + Tier: o.Tier, + TransactionalContentMD5: o.TransactionalContentMD5, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &basics, o.HTTPHeaders, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// StageBlockOptions contains the optional parameters for the Client.StageBlock method. +type StageBlockOptions struct { + CpkInfo *blob.CpkInfo + + CpkScopeInfo *blob.CpkScopeInfo + + LeaseAccessConditions *blob.LeaseAccessConditions + + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// StageBlockOptions contains the optional parameters for the Client.StageBlock method. +func (o *StageBlockOptions) format() (*generated.BlockBlobClientStageBlockOptions, *generated.LeaseAccessConditions, *generated.CpkInfo, *generated.CpkScopeInfo) { + if o == nil { + return nil, nil, nil, nil + } + + return &generated.BlockBlobClientStageBlockOptions{ + TransactionalContentCRC64: o.TransactionalContentCRC64, + TransactionalContentMD5: o.TransactionalContentMD5, + }, o.LeaseAccessConditions, o.CpkInfo, o.CpkScopeInfo +} + +// --------------------------------------------------------------------------------------------------------------------- + +// StageBlockFromURLOptions contains the optional parameters for the Client.StageBlockFromURL method. +type StageBlockFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + LeaseAccessConditions *blob.LeaseAccessConditions + + SourceModifiedAccessConditions *blob.SourceModifiedAccessConditions + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentCRC64 []byte + + // Range specifies a range of bytes. The default value is all bytes. + Range blob.HTTPRange + + CpkInfo *blob.CpkInfo + + CpkScopeInfo *blob.CpkScopeInfo +} + +func (o *StageBlockFromURLOptions) format() (*generated.BlockBlobClientStageBlockFromURLOptions, *generated.CpkInfo, *generated.CpkScopeInfo, *generated.LeaseAccessConditions, *generated.SourceModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil + } + + options := &generated.BlockBlobClientStageBlockFromURLOptions{ + CopySourceAuthorization: o.CopySourceAuthorization, + SourceContentMD5: o.SourceContentMD5, + SourceContentcrc64: o.SourceContentCRC64, + SourceRange: exported.FormatHTTPRange(o.Range), + } + + return options, o.CpkInfo, o.CpkScopeInfo, o.LeaseAccessConditions, o.SourceModifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// CommitBlockListOptions contains the optional parameters for Client.CommitBlockList method. +type CommitBlockListOptions struct { + Tags map[string]string + Metadata map[string]string + RequestID *string + Tier *blob.AccessTier + Timeout *int32 + TransactionalContentCRC64 []byte + TransactionalContentMD5 []byte + HTTPHeaders *blob.HTTPHeaders + CpkInfo *blob.CpkInfo + CpkScopeInfo *blob.CpkScopeInfo + AccessConditions *blob.AccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetBlockListOptions contains the optional parameters for the Client.GetBlockList method. +type GetBlockListOptions struct { + Snapshot *string + AccessConditions *blob.AccessConditions +} + +func (o *GetBlockListOptions) format() (*generated.BlockBlobClientGetBlockListOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &generated.BlockBlobClientGetBlockListOptions{Snapshot: o.Snapshot}, leaseAccessConditions, modifiedAccessConditions +} + +// ------------------------------------------------------------ + +// uploadFromReaderOptions identifies options used by the UploadBuffer and UploadFile functions. +type uploadFromReaderOptions struct { + // BlockSize specifies the block size to use; the default (and maximum size) is MaxStageBlockBytes. + BlockSize int64 + + // Progress is a function that is invoked periodically as bytes are sent to the BlockBlobClient. + // Note that the progress reporting is not always increasing; it can go down when retrying a request. + Progress func(bytesTransferred int64) + + // HTTPHeaders indicates the HTTP headers to be associated with the blob. + HTTPHeaders *blob.HTTPHeaders + + // Metadata indicates the metadata to be associated with the blob when PutBlockList is called. + Metadata map[string]string + + // AccessConditions indicates the access conditions for the block blob. + AccessConditions *blob.AccessConditions + + // AccessTier indicates the tier of blob + AccessTier *blob.AccessTier + + // BlobTags + Tags map[string]string + + // ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data. + CpkInfo *blob.CpkInfo + CpkScopeInfo *blob.CpkScopeInfo + + // Concurrency indicates the maximum number of blocks to upload in parallel (0=default) + Concurrency uint16 + + // Optional header, Specifies the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 *[]byte + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 *[]byte +} + +// UploadBufferOptions provides set of configurations for UploadBuffer operation +type UploadBufferOptions = uploadFromReaderOptions + +// UploadFileOptions provides set of configurations for UploadFile operation +type UploadFileOptions = uploadFromReaderOptions + +func (o *uploadFromReaderOptions) getStageBlockOptions() *StageBlockOptions { + leaseAccessConditions, _ := exported.FormatBlobAccessConditions(o.AccessConditions) + return &StageBlockOptions{ + CpkInfo: o.CpkInfo, + CpkScopeInfo: o.CpkScopeInfo, + LeaseAccessConditions: leaseAccessConditions, + } +} + +func (o *uploadFromReaderOptions) getUploadBlockBlobOptions() *UploadOptions { + return &UploadOptions{ + Tags: o.Tags, + Metadata: o.Metadata, + Tier: o.AccessTier, + HTTPHeaders: o.HTTPHeaders, + AccessConditions: o.AccessConditions, + CpkInfo: o.CpkInfo, + CpkScopeInfo: o.CpkScopeInfo, + } +} + +func (o *uploadFromReaderOptions) getCommitBlockListOptions() *CommitBlockListOptions { + return &CommitBlockListOptions{ + Tags: o.Tags, + Metadata: o.Metadata, + Tier: o.AccessTier, + HTTPHeaders: o.HTTPHeaders, + CpkInfo: o.CpkInfo, + CpkScopeInfo: o.CpkScopeInfo, + } +} + +// --------------------------------------------------------------------------------------------------------------------- + +// UploadStreamOptions provides set of configurations for UploadStream operation +type UploadStreamOptions struct { + // transferManager provides a transferManager that controls buffer allocation/reuse and + // concurrency. This overrides BlockSize and MaxConcurrency if set. + transferManager shared.TransferManager + transferMangerNotSet bool + + // BlockSize defines the size of the buffer used during upload. The default and mimimum value is 1 MiB. + BlockSize int + + // Concurrency defines the number of concurrent uploads to be performed to upload the file. + // Each concurrent upload will create a buffer of size BlockSize. The default value is one. + Concurrency int + + HTTPHeaders *blob.HTTPHeaders + Metadata map[string]string + AccessConditions *blob.AccessConditions + AccessTier *blob.AccessTier + Tags map[string]string + CpkInfo *blob.CpkInfo + CpkScopeInfo *blob.CpkScopeInfo +} + +func (u *UploadStreamOptions) format() error { + if u == nil || u.transferManager != nil { + return nil + } + + if u.Concurrency == 0 { + u.Concurrency = 1 + } + + if u.BlockSize < _1MiB { + u.BlockSize = _1MiB + } + + var err error + u.transferManager, err = shared.NewStaticBuffer(u.BlockSize, u.Concurrency) + if err != nil { + return fmt.Errorf("bug: default transfer manager could not be created: %s", err) + } + u.transferMangerNotSet = true + return nil +} + +func (u *UploadStreamOptions) getStageBlockOptions() *StageBlockOptions { + leaseAccessConditions, _ := exported.FormatBlobAccessConditions(u.AccessConditions) + return &StageBlockOptions{ + CpkInfo: u.CpkInfo, + CpkScopeInfo: u.CpkScopeInfo, + LeaseAccessConditions: leaseAccessConditions, + } +} + +func (u *UploadStreamOptions) getCommitBlockListOptions() *CommitBlockListOptions { + options := &CommitBlockListOptions{ + Tags: u.Tags, + Metadata: u.Metadata, + Tier: u.AccessTier, + HTTPHeaders: u.HTTPHeaders, + CpkInfo: u.CpkInfo, + CpkScopeInfo: u.CpkScopeInfo, + AccessConditions: u.AccessConditions, + } + + return options +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/responses.go new file mode 100644 index 000000000..dab1873b1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/responses.go @@ -0,0 +1,111 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blockblob + +import ( + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// UploadResponse contains the response from method Client.Upload. +type UploadResponse = generated.BlockBlobClientUploadResponse + +// StageBlockResponse contains the response from method Client.StageBlock. +type StageBlockResponse = generated.BlockBlobClientStageBlockResponse + +// CommitBlockListResponse contains the response from method Client.CommitBlockList. +type CommitBlockListResponse = generated.BlockBlobClientCommitBlockListResponse + +// StageBlockFromURLResponse contains the response from method Client.StageBlockFromURL. +type StageBlockFromURLResponse = generated.BlockBlobClientStageBlockFromURLResponse + +// GetBlockListResponse contains the response from method Client.GetBlockList. +type GetBlockListResponse = generated.BlockBlobClientGetBlockListResponse + +// uploadFromReaderResponse contains the response from method Client.UploadBuffer/Client.UploadFile. +type uploadFromReaderResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + // Will be a part of response only if uploading data >= internal.MaxUploadBlobBytes (= 256 * 1024 * 1024 // 256MB) + ContentCRC64 []byte +} + +func toUploadReaderAtResponseFromUploadResponse(resp UploadResponse) uploadFromReaderResponse { + return uploadFromReaderResponse{ + ClientRequestID: resp.ClientRequestID, + ContentMD5: resp.ContentMD5, + Date: resp.Date, + ETag: resp.ETag, + EncryptionKeySHA256: resp.EncryptionKeySHA256, + EncryptionScope: resp.EncryptionScope, + IsServerEncrypted: resp.IsServerEncrypted, + LastModified: resp.LastModified, + RequestID: resp.RequestID, + Version: resp.Version, + VersionID: resp.VersionID, + } +} + +func toUploadReaderAtResponseFromCommitBlockListResponse(resp CommitBlockListResponse) uploadFromReaderResponse { + return uploadFromReaderResponse{ + ClientRequestID: resp.ClientRequestID, + ContentMD5: resp.ContentMD5, + Date: resp.Date, + ETag: resp.ETag, + EncryptionKeySHA256: resp.EncryptionKeySHA256, + EncryptionScope: resp.EncryptionScope, + IsServerEncrypted: resp.IsServerEncrypted, + LastModified: resp.LastModified, + RequestID: resp.RequestID, + Version: resp.Version, + VersionID: resp.VersionID, + ContentCRC64: resp.XMSContentCRC64, + } +} + +// UploadFileResponse contains the response from method Client.UploadBuffer/Client.UploadFile. +type UploadFileResponse = uploadFromReaderResponse + +// UploadBufferResponse contains the response from method Client.UploadBuffer/Client.UploadFile. +type UploadBufferResponse = uploadFromReaderResponse + +// UploadStreamResponse contains the response from method Client.CommitBlockList. +type UploadStreamResponse = CommitBlockListResponse diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go new file mode 100644 index 000000000..4cc9a51bb --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go @@ -0,0 +1,171 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package azblob + +import ( + "context" + "io" + "os" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" +) + +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions struct { + azcore.ClientOptions +} + +// Client represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob. +type Client struct { + svc *service.Client +} + +// NewClient creates an instance of Client with the specified values. +// - serviceURL - the URL of the storage account e.g. https://.blob.core.windows.net/ +// - cred - an Azure AD credential, typically obtained via the azidentity module +// - options - client options; pass nil to accept the default values +func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { + var clientOptions *service.ClientOptions + if options != nil { + clientOptions = &service.ClientOptions{ClientOptions: options.ClientOptions} + } + svcClient, err := service.NewClient(serviceURL, cred, clientOptions) + if err != nil { + return nil, err + } + + return &Client{ + svc: svcClient, + }, nil +} + +// NewClientWithNoCredential creates an instance of Client with the specified values. +// This is used to anonymously access a storage account or with a shared access signature (SAS) token. +// - serviceURL - the URL of the storage account e.g. https://.blob.core.windows.net/? +// - options - client options; pass nil to accept the default values +func NewClientWithNoCredential(serviceURL string, options *ClientOptions) (*Client, error) { + var clientOptions *service.ClientOptions + if options != nil { + clientOptions = &service.ClientOptions{ClientOptions: options.ClientOptions} + } + svcClient, err := service.NewClientWithNoCredential(serviceURL, clientOptions) + if err != nil { + return nil, err + } + + return &Client{ + svc: svcClient, + }, nil +} + +// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. +// - serviceURL - the URL of the storage account e.g. https://.blob.core.windows.net/ +// - cred - a SharedKeyCredential created with the matching storage account and access key +// - options - client options; pass nil to accept the default values +func NewClientWithSharedKeyCredential(serviceURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { + svcClient, err := service.NewClientWithSharedKeyCredential(serviceURL, cred, (*service.ClientOptions)(options)) + if err != nil { + return nil, err + } + + return &Client{ + svc: svcClient, + }, nil +} + +// NewClientFromConnectionString creates an instance of Client with the specified values. +// - connectionString - a connection string for the desired storage account +// - options - client options; pass nil to accept the default values +func NewClientFromConnectionString(connectionString string, options *ClientOptions) (*Client, error) { + if options == nil { + options = &ClientOptions{} + } + containerClient, err := service.NewClientFromConnectionString(connectionString, (*service.ClientOptions)(options)) + if err != nil { + return nil, err + } + return &Client{ + svc: containerClient, + }, nil +} + +// URL returns the URL endpoint used by the BlobClient object. +func (c *Client) URL() string { + return c.svc.URL() +} + +// CreateContainer is a lifecycle method to creates a new container under the specified account. +// If the container with the same name already exists, a ResourceExistsError will be raised. +// This method returns a client with which to interact with the newly created container. +func (c *Client) CreateContainer(ctx context.Context, containerName string, o *CreateContainerOptions) (CreateContainerResponse, error) { + return c.svc.CreateContainer(ctx, containerName, o) +} + +// DeleteContainer is a lifecycle method that marks the specified container for deletion. +// The container and any blobs contained within it are later deleted during garbage collection. +// If the container is not found, a ResourceNotFoundError will be raised. +func (c *Client) DeleteContainer(ctx context.Context, containerName string, o *DeleteContainerOptions) (DeleteContainerResponse, error) { + return c.svc.DeleteContainer(ctx, containerName, o) +} + +// DeleteBlob marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. +// Note that deleting a blob also deletes all its snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob. +func (c *Client) DeleteBlob(ctx context.Context, containerName string, blobName string, o *DeleteBlobOptions) (DeleteBlobResponse, error) { + return c.svc.NewContainerClient(containerName).NewBlobClient(blobName).Delete(ctx, o) +} + +// NewListBlobsFlatPager returns a pager for blobs starting from the specified Marker. Use an empty +// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs. +func (c *Client) NewListBlobsFlatPager(containerName string, o *ListBlobsFlatOptions) *runtime.Pager[ListBlobsFlatResponse] { + return c.svc.NewContainerClient(containerName).NewListBlobsFlatPager(o) +} + +// NewListContainersPager operation returns a pager of the containers under the specified account. +// Use an empty Marker to start enumeration from the beginning. Container names are returned in lexicographic order. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-containers2. +func (c *Client) NewListContainersPager(o *ListContainersOptions) *runtime.Pager[ListContainersResponse] { + return c.svc.NewListContainersPager(o) +} + +// UploadBuffer uploads a buffer in blocks to a block blob. +func (c *Client) UploadBuffer(ctx context.Context, containerName string, blobName string, buffer []byte, o *UploadBufferOptions) (UploadBufferResponse, error) { + return c.svc.NewContainerClient(containerName).NewBlockBlobClient(blobName).UploadBuffer(ctx, buffer, o) +} + +// UploadFile uploads a file in blocks to a block blob. +func (c *Client) UploadFile(ctx context.Context, containerName string, blobName string, file *os.File, o *UploadFileOptions) (UploadFileResponse, error) { + return c.svc.NewContainerClient(containerName).NewBlockBlobClient(blobName).UploadFile(ctx, file, o) +} + +// UploadStream copies the file held in io.Reader to the Blob at blockBlobClient. +// A Context deadline or cancellation will cause this to error. +func (c *Client) UploadStream(ctx context.Context, containerName string, blobName string, body io.Reader, o *UploadStreamOptions) (UploadStreamResponse, error) { + return c.svc.NewContainerClient(containerName).NewBlockBlobClient(blobName).UploadStream(ctx, body, o) +} + +// DownloadBuffer downloads an Azure blob to a buffer with parallel. +func (c *Client) DownloadBuffer(ctx context.Context, containerName string, blobName string, buffer []byte, o *DownloadBufferOptions) (int64, error) { + return c.svc.NewContainerClient(containerName).NewBlobClient(blobName).DownloadBuffer(ctx, shared.NewBytesWriter(buffer), o) +} + +// DownloadFile downloads an Azure blob to a local file. +// The file would be truncated if the size doesn't match. +func (c *Client) DownloadFile(ctx context.Context, containerName string, blobName string, file *os.File, o *DownloadFileOptions) (int64, error) { + return c.svc.NewContainerClient(containerName).NewBlobClient(blobName).DownloadFile(ctx, file, o) +} + +// DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob. +func (c *Client) DownloadStream(ctx context.Context, containerName string, blobName string, o *DownloadStreamOptions) (DownloadStreamResponse, error) { + o = shared.CopyOptions(o) + return c.svc.NewContainerClient(containerName).NewBlobClient(blobName).DownloadStream(ctx, o) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/common.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/common.go new file mode 100644 index 000000000..560e151d5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/common.go @@ -0,0 +1,36 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package azblob + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" +) + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential = exported.SharedKeyCredential + +// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the +// storage account's name and either its primary or secondary key. +func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) { + return exported.NewSharedKeyCredential(accountName, accountKey) +} + +// URLParts object represents the components that make up an Azure Storage Container/Blob URL. +// NOTE: Changing any SAS-related field requires computing a new SAS signature. +type URLParts = sas.URLParts + +// ParseURL parses a URL initializing URLParts' fields including any SAS-related & snapshot query parameters. Any other +// query parameters remain in the UnparsedParams field. This method overwrites all fields in the URLParts object. +func ParseURL(u string) (URLParts, error) { + return sas.ParseURL(u) +} + +// HTTPRange defines a range of bytes within an HTTP resource, starting at offset and +// ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange +// which has an offset but no zero value count indicates from the offset to the resource's end. +type HTTPRange = exported.HTTPRange diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/connection.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/connection.go deleted file mode 100644 index c5d501c66..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/connection.go +++ /dev/null @@ -1,39 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -package azblob - -import ( - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" -) - -type connection struct { - u string - p runtime.Pipeline -} - -// newConnection creates an instance of the connection type with the specified endpoint. -// Pass nil to accept the default options; this is the same as passing a zero-value options. -func newConnection(endpoint string, options *azcore.ClientOptions) *connection { - cp := azcore.ClientOptions{} - if options != nil { - cp = *options - } - return &connection{u: endpoint, p: runtime.NewPipeline(moduleName, moduleVersion, runtime.PipelineOptions{}, &cp)} -} - -// Endpoint returns the connection's endpoint. -func (c *connection) Endpoint() string { - return c.u -} - -// Pipeline returns the connection's pipeline. -func (c *connection) Pipeline() runtime.Pipeline { - return c.p -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/constants.go index c1c336ed4..d4c3262d9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/constants.go @@ -2,45 +2,36 @@ // +build go1.18 // Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. +// Licensed under the MIT License. See License.txt in the project root for license information. package azblob -var SASVersion = "2019-12-12" - -//nolint -const ( - // BlockBlobMaxUploadBlobBytes indicates the maximum number of bytes that can be sent in a call to Upload. - BlockBlobMaxUploadBlobBytes = 256 * 1024 * 1024 // 256MB +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) - // BlockBlobMaxStageBlockBytes indicates the maximum number of bytes that can be sent in a call to StageBlock. - BlockBlobMaxStageBlockBytes = 4000 * 1024 * 1024 // 4GB +// PublicAccessType defines values for AccessType - private (default) or blob or container +type PublicAccessType = generated.PublicAccessType - // BlockBlobMaxBlocks indicates the maximum number of blocks allowed in a block blob. - BlockBlobMaxBlocks = 50000 +const ( + PublicAccessTypeBlob PublicAccessType = generated.PublicAccessTypeBlob + PublicAccessTypeContainer PublicAccessType = generated.PublicAccessTypeContainer +) - // PageBlobPageBytes indicates the number of bytes in a page (512). - PageBlobPageBytes = 512 +// PossiblePublicAccessTypeValues returns the possible values for the PublicAccessType const type. +func PossiblePublicAccessTypeValues() []PublicAccessType { + return generated.PossiblePublicAccessTypeValues() +} - // BlobDefaultDownloadBlockSize is default block size - BlobDefaultDownloadBlockSize = int64(4 * 1024 * 1024) // 4MB -) +// DeleteSnapshotsOptionType defines values for DeleteSnapshotsOptionType +type DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionType const ( - headerAuthorization = "Authorization" - headerXmsDate = "x-ms-date" - headerContentLength = "Content-Length" - headerContentEncoding = "Content-Encoding" - headerContentLanguage = "Content-Language" - headerContentType = "Content-Type" - headerContentMD5 = "Content-MD5" - headerIfModifiedSince = "If-Modified-Since" - headerIfMatch = "If-Match" - headerIfNoneMatch = "If-None-Match" - headerIfUnmodifiedSince = "If-Unmodified-Since" - headerRange = "Range" + DeleteSnapshotsOptionTypeInclude DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionTypeInclude + DeleteSnapshotsOptionTypeOnly DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionTypeOnly ) -const ( - tokenScope = "https://storage.azure.com/.default" -) +// PossibleDeleteSnapshotsOptionTypeValues returns the possible values for the DeleteSnapshotsOptionType const type. +func PossibleDeleteSnapshotsOptionTypeValues() []DeleteSnapshotsOptionType { + return generated.PossibleDeleteSnapshotsOptionTypeValues() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go new file mode 100644 index 000000000..e4afad9f8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go @@ -0,0 +1,334 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package container + +import ( + "context" + "errors" + "net/http" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" +) + +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions struct { + azcore.ClientOptions +} + +// Client represents a URL to the Azure Storage container allowing you to manipulate its blobs. +type Client base.Client[generated.ContainerClient] + +// NewClient creates an instance of Client with the specified values. +// - containerURL - the URL of the container e.g. https://.blob.core.windows.net/container +// - cred - an Azure AD credential, typically obtained via the azidentity module +// - options - client options; pass nil to accept the default values +func NewClient(containerURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { + authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewContainerClient(containerURL, pl, nil)), nil +} + +// NewClientWithNoCredential creates an instance of Client with the specified values. +// This is used to anonymously access a container or with a shared access signature (SAS) token. +// - containerURL - the URL of the container e.g. https://.blob.core.windows.net/container? +// - options - client options; pass nil to accept the default values +func NewClientWithNoCredential(containerURL string, options *ClientOptions) (*Client, error) { + conOptions := shared.GetClientOptions(options) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewContainerClient(containerURL, pl, nil)), nil +} + +// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. +// - containerURL - the URL of the container e.g. https://.blob.core.windows.net/container +// - cred - a SharedKeyCredential created with the matching container's storage account and access key +// - options - client options; pass nil to accept the default values +func NewClientWithSharedKeyCredential(containerURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { + authPolicy := exported.NewSharedKeyCredPolicy(cred) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewContainerClient(containerURL, pl, cred)), nil +} + +// NewClientFromConnectionString creates an instance of Client with the specified values. +// - connectionString - a connection string for the desired storage account +// - containerName - the name of the container within the storage account +// - options - client options; pass nil to accept the default values +func NewClientFromConnectionString(connectionString string, containerName string, options *ClientOptions) (*Client, error) { + parsed, err := shared.ParseConnectionString(connectionString) + if err != nil { + return nil, err + } + parsed.ServiceURL = runtime.JoinPaths(parsed.ServiceURL, containerName) + + if parsed.AccountKey != "" && parsed.AccountName != "" { + credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey) + if err != nil { + return nil, err + } + return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options) + } + + return NewClientWithNoCredential(parsed.ServiceURL, options) +} + +func (c *Client) generated() *generated.ContainerClient { + return base.InnerClient((*base.Client[generated.ContainerClient])(c)) +} + +func (c *Client) sharedKey() *SharedKeyCredential { + return base.SharedKey((*base.Client[generated.ContainerClient])(c)) +} + +// URL returns the URL endpoint used by the Client object. +func (c *Client) URL() string { + return c.generated().Endpoint() +} + +// NewBlobClient creates a new BlobClient object by concatenating blobName to the end of +// Client's URL. The new BlobClient uses the same request policy pipeline as the Client. +// To change the pipeline, create the BlobClient and then call its WithPipeline method passing in the +// desired pipeline object. Or, call this package's NewBlobClient instead of calling this object's +// NewBlobClient method. +func (c *Client) NewBlobClient(blobName string) *blob.Client { + blobURL := runtime.JoinPaths(c.URL(), blobName) + return (*blob.Client)(base.NewBlobClient(blobURL, c.generated().Pipeline(), c.sharedKey())) +} + +// NewAppendBlobClient creates a new AppendBlobURL object by concatenating blobName to the end of +// Client's URL. The new AppendBlobURL uses the same request policy pipeline as the Client. +// To change the pipeline, create the AppendBlobURL and then call its WithPipeline method passing in the +// desired pipeline object. Or, call this package's NewAppendBlobClient instead of calling this object's +// NewAppendBlobClient method. +func (c *Client) NewAppendBlobClient(blobName string) *appendblob.Client { + blobURL := runtime.JoinPaths(c.URL(), blobName) + return (*appendblob.Client)(base.NewAppendBlobClient(blobURL, c.generated().Pipeline(), c.sharedKey())) +} + +// NewBlockBlobClient creates a new BlockBlobClient object by concatenating blobName to the end of +// Client's URL. The new BlockBlobClient uses the same request policy pipeline as the Client. +// To change the pipeline, create the BlockBlobClient and then call its WithPipeline method passing in the +// desired pipeline object. Or, call this package's NewBlockBlobClient instead of calling this object's +// NewBlockBlobClient method. +func (c *Client) NewBlockBlobClient(blobName string) *blockblob.Client { + blobURL := runtime.JoinPaths(c.URL(), blobName) + return (*blockblob.Client)(base.NewBlockBlobClient(blobURL, c.generated().Pipeline(), c.sharedKey())) +} + +// NewPageBlobClient creates a new PageBlobURL object by concatenating blobName to the end of Client's URL. The new PageBlobURL uses the same request policy pipeline as the Client. +// To change the pipeline, create the PageBlobURL and then call its WithPipeline method passing in the +// desired pipeline object. Or, call this package's NewPageBlobClient instead of calling this object's +// NewPageBlobClient method. +func (c *Client) NewPageBlobClient(blobName string) *pageblob.Client { + blobURL := runtime.JoinPaths(c.URL(), blobName) + return (*pageblob.Client)(base.NewPageBlobClient(blobURL, c.generated().Pipeline(), c.sharedKey())) +} + +// Create creates a new container within a storage account. If a container with the same name already exists, the operation fails. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/create-container. +func (c *Client) Create(ctx context.Context, options *CreateOptions) (CreateResponse, error) { + var opts *generated.ContainerClientCreateOptions + var cpkScopes *generated.ContainerCpkScopeInfo + if options != nil { + opts = &generated.ContainerClientCreateOptions{ + Access: options.Access, + Metadata: options.Metadata, + } + cpkScopes = options.CpkScopeInfo + } + resp, err := c.generated().Create(ctx, opts, cpkScopes) + + return resp, err +} + +// Delete marks the specified container for deletion. The container and any blobs contained within it are later deleted during garbage collection. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-container. +func (c *Client) Delete(ctx context.Context, options *DeleteOptions) (DeleteResponse, error) { + opts, leaseAccessConditions, modifiedAccessConditions := options.format() + resp, err := c.generated().Delete(ctx, opts, leaseAccessConditions, modifiedAccessConditions) + + return resp, err +} + +// Restore operation restore the contents and properties of a soft deleted container to a specified container. +// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/restore-container. +func (c *Client) Restore(ctx context.Context, deletedContainerVersion string, options *RestoreOptions) (RestoreResponse, error) { + urlParts, err := blob.ParseURL(c.URL()) + if err != nil { + return RestoreResponse{}, err + } + + opts := &generated.ContainerClientRestoreOptions{ + DeletedContainerName: &urlParts.ContainerName, + DeletedContainerVersion: &deletedContainerVersion, + } + resp, err := c.generated().Restore(ctx, opts) + + return resp, err +} + +// GetProperties returns the container's properties. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-metadata. +func (c *Client) GetProperties(ctx context.Context, o *GetPropertiesOptions) (GetPropertiesResponse, error) { + // NOTE: GetMetadata actually calls GetProperties internally because GetProperties returns the metadata AND the properties. + // This allows us to not expose a GetProperties method at all simplifying the API. + // The optionals are nil, like they were in track 1.5 + opts, leaseAccessConditions := o.format() + + resp, err := c.generated().GetProperties(ctx, opts, leaseAccessConditions) + return resp, err +} + +// SetMetadata sets the container's metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-metadata. +func (c *Client) SetMetadata(ctx context.Context, o *SetMetadataOptions) (SetMetadataResponse, error) { + metadataOptions, lac, mac := o.format() + resp, err := c.generated().SetMetadata(ctx, metadataOptions, lac, mac) + + return resp, err +} + +// GetAccessPolicy returns the container's access policy. The access policy indicates whether container's blobs may be accessed publicly. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-acl. +func (c *Client) GetAccessPolicy(ctx context.Context, o *GetAccessPolicyOptions) (GetAccessPolicyResponse, error) { + options, ac := o.format() + resp, err := c.generated().GetAccessPolicy(ctx, options, ac) + return resp, err +} + +// SetAccessPolicy sets the container's permissions. The access policy indicates whether blobs in a container may be accessed publicly. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-acl. +func (c *Client) SetAccessPolicy(ctx context.Context, containerACL []*SignedIdentifier, o *SetAccessPolicyOptions) (SetAccessPolicyResponse, error) { + accessPolicy, mac, lac := o.format() + resp, err := c.generated().SetAccessPolicy(ctx, containerACL, accessPolicy, mac, lac) + return resp, err +} + +// NewListBlobsFlatPager returns a pager for blobs starting from the specified Marker. Use an empty +// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs. +func (c *Client) NewListBlobsFlatPager(o *ListBlobsFlatOptions) *runtime.Pager[ListBlobsFlatResponse] { + listOptions := generated.ContainerClientListBlobFlatSegmentOptions{} + if o != nil { + listOptions.Include = o.Include.format() + listOptions.Marker = o.Marker + listOptions.Maxresults = o.MaxResults + listOptions.Prefix = o.Prefix + } + return runtime.NewPager(runtime.PagingHandler[ListBlobsFlatResponse]{ + More: func(page ListBlobsFlatResponse) bool { + return page.NextMarker != nil && len(*page.NextMarker) > 0 + }, + Fetcher: func(ctx context.Context, page *ListBlobsFlatResponse) (ListBlobsFlatResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = c.generated().ListBlobFlatSegmentCreateRequest(ctx, &listOptions) + } else { + listOptions.Marker = page.NextMarker + req, err = c.generated().ListBlobFlatSegmentCreateRequest(ctx, &listOptions) + } + if err != nil { + return ListBlobsFlatResponse{}, err + } + resp, err := c.generated().Pipeline().Do(req) + if err != nil { + return ListBlobsFlatResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + // TOOD: storage error? + return ListBlobsFlatResponse{}, runtime.NewResponseError(resp) + } + return c.generated().ListBlobFlatSegmentHandleResponse(resp) + }, + }) +} + +// NewListBlobsHierarchyPager returns a channel of blobs starting from the specified Marker. Use an empty +// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order. +// After getting a segment, process it, and then call ListBlobsHierarchicalSegment again (passing the the +// previously-returned Marker) to get the next segment. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs. +// AutoPagerTimeout specifies the amount of time with no read operations before the channel times out and closes. Specify no time and it will be ignored. +// AutoPagerBufferSize specifies the channel's buffer size. +// Both the blob item channel and error channel should be watched. Only one error will be released via this channel (or a nil error, to register a clean exit.) +func (c *Client) NewListBlobsHierarchyPager(delimiter string, o *ListBlobsHierarchyOptions) *runtime.Pager[ListBlobsHierarchyResponse] { + listOptions := o.format() + return runtime.NewPager(runtime.PagingHandler[ListBlobsHierarchyResponse]{ + More: func(page ListBlobsHierarchyResponse) bool { + return page.NextMarker != nil && len(*page.NextMarker) > 0 + }, + Fetcher: func(ctx context.Context, page *ListBlobsHierarchyResponse) (ListBlobsHierarchyResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = c.generated().ListBlobHierarchySegmentCreateRequest(ctx, delimiter, &listOptions) + } else { + listOptions.Marker = page.NextMarker + req, err = c.generated().ListBlobHierarchySegmentCreateRequest(ctx, delimiter, &listOptions) + } + if err != nil { + return ListBlobsHierarchyResponse{}, err + } + resp, err := c.generated().Pipeline().Do(req) + if err != nil { + return ListBlobsHierarchyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ListBlobsHierarchyResponse{}, runtime.NewResponseError(resp) + } + return c.generated().ListBlobHierarchySegmentHandleResponse(resp) + }, + }) +} + +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at container. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +func (c *Client) GetSASURL(permissions sas.ContainerPermissions, start time.Time, expiry time.Time) (string, error) { + if c.sharedKey() == nil { + return "", errors.New("SAS can only be signed with a SharedKeyCredential") + } + + urlParts, err := blob.ParseURL(c.URL()) + if err != nil { + return "", err + } + + // Containers do not have snapshots, nor versions. + qps, err := sas.BlobSignatureValues{ + Version: sas.Version, + Protocol: sas.ProtocolHTTPS, + ContainerName: urlParts.ContainerName, + Permissions: permissions.String(), + StartTime: start.UTC(), + ExpiryTime: expiry.UTC(), + }.SignWithSharedKey(c.sharedKey()) + if err != nil { + return "", err + } + + endpoint := c.URL() + "?" + qps.Encode() + + return endpoint, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/constants.go new file mode 100644 index 000000000..ce6e67b5e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/constants.go @@ -0,0 +1,166 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package container + +import "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + +// PublicAccessType defines values for AccessType - private (default) or blob or container +type PublicAccessType = generated.PublicAccessType + +const ( + PublicAccessTypeBlob PublicAccessType = generated.PublicAccessTypeBlob + PublicAccessTypeContainer PublicAccessType = generated.PublicAccessTypeContainer +) + +// PossiblePublicAccessTypeValues returns the possible values for the PublicAccessType const type. +func PossiblePublicAccessTypeValues() []PublicAccessType { + return generated.PossiblePublicAccessTypeValues() +} + +// SKUName defines values for SkuName - LRS, GRS, RAGRS, ZRS, Premium LRS +type SKUName = generated.SKUName + +const ( + SKUNameStandardLRS SKUName = generated.SKUNameStandardLRS + SKUNameStandardGRS SKUName = generated.SKUNameStandardGRS + SKUNameStandardRAGRS SKUName = generated.SKUNameStandardRAGRS + SKUNameStandardZRS SKUName = generated.SKUNameStandardZRS + SKUNamePremiumLRS SKUName = generated.SKUNamePremiumLRS +) + +// PossibleSKUNameValues returns the possible values for the SKUName const type. +func PossibleSKUNameValues() []SKUName { + return generated.PossibleSKUNameValues() +} + +// AccountKind defines values for AccountKind +type AccountKind = generated.AccountKind + +const ( + AccountKindStorage AccountKind = generated.AccountKindStorage + AccountKindBlobStorage AccountKind = generated.AccountKindBlobStorage + AccountKindStorageV2 AccountKind = generated.AccountKindStorageV2 + AccountKindFileStorage AccountKind = generated.AccountKindFileStorage + AccountKindBlockBlobStorage AccountKind = generated.AccountKindBlockBlobStorage +) + +// PossibleAccountKindValues returns the possible values for the AccountKind const type. +func PossibleAccountKindValues() []AccountKind { + return generated.PossibleAccountKindValues() +} + +// BlobType defines values for BlobType +type BlobType = generated.BlobType + +const ( + BlobTypeBlockBlob BlobType = generated.BlobTypeBlockBlob + BlobTypePageBlob BlobType = generated.BlobTypePageBlob + BlobTypeAppendBlob BlobType = generated.BlobTypeAppendBlob +) + +// PossibleBlobTypeValues returns the possible values for the BlobType const type. +func PossibleBlobTypeValues() []BlobType { + return generated.PossibleBlobTypeValues() +} + +// LeaseStatusType defines values for LeaseStatusType +type LeaseStatusType = generated.LeaseStatusType + +const ( + LeaseStatusTypeLocked LeaseStatusType = generated.LeaseStatusTypeLocked + LeaseStatusTypeUnlocked LeaseStatusType = generated.LeaseStatusTypeUnlocked +) + +// PossibleLeaseStatusTypeValues returns the possible values for the LeaseStatusType const type. +func PossibleLeaseStatusTypeValues() []LeaseStatusType { + return generated.PossibleLeaseStatusTypeValues() +} + +// LeaseDurationType defines values for LeaseDurationType +type LeaseDurationType = generated.LeaseDurationType + +const ( + LeaseDurationTypeInfinite LeaseDurationType = generated.LeaseDurationTypeInfinite + LeaseDurationTypeFixed LeaseDurationType = generated.LeaseDurationTypeFixed +) + +// PossibleLeaseDurationTypeValues returns the possible values for the LeaseDurationType const type. +func PossibleLeaseDurationTypeValues() []LeaseDurationType { + return generated.PossibleLeaseDurationTypeValues() +} + +// LeaseStateType defines values for LeaseStateType +type LeaseStateType = generated.LeaseStateType + +const ( + LeaseStateTypeAvailable LeaseStateType = generated.LeaseStateTypeAvailable + LeaseStateTypeLeased LeaseStateType = generated.LeaseStateTypeLeased + LeaseStateTypeExpired LeaseStateType = generated.LeaseStateTypeExpired + LeaseStateTypeBreaking LeaseStateType = generated.LeaseStateTypeBreaking + LeaseStateTypeBroken LeaseStateType = generated.LeaseStateTypeBroken +) + +// PossibleLeaseStateTypeValues returns the possible values for the LeaseStateType const type. +func PossibleLeaseStateTypeValues() []LeaseStateType { + return generated.PossibleLeaseStateTypeValues() +} + +// ArchiveStatus defines values for ArchiveStatus +type ArchiveStatus = generated.ArchiveStatus + +const ( + ArchiveStatusRehydratePendingToCool ArchiveStatus = generated.ArchiveStatusRehydratePendingToCool + ArchiveStatusRehydratePendingToHot ArchiveStatus = generated.ArchiveStatusRehydratePendingToHot +) + +// PossibleArchiveStatusValues returns the possible values for the ArchiveStatus const type. +func PossibleArchiveStatusValues() []ArchiveStatus { + return generated.PossibleArchiveStatusValues() +} + +// CopyStatusType defines values for CopyStatusType +type CopyStatusType = generated.CopyStatusType + +const ( + CopyStatusTypePending CopyStatusType = generated.CopyStatusTypePending + CopyStatusTypeSuccess CopyStatusType = generated.CopyStatusTypeSuccess + CopyStatusTypeAborted CopyStatusType = generated.CopyStatusTypeAborted + CopyStatusTypeFailed CopyStatusType = generated.CopyStatusTypeFailed +) + +// PossibleCopyStatusTypeValues returns the possible values for the CopyStatusType const type. +func PossibleCopyStatusTypeValues() []CopyStatusType { + return generated.PossibleCopyStatusTypeValues() +} + +// ImmutabilityPolicyMode defines values for ImmutabilityPolicyMode +type ImmutabilityPolicyMode = generated.ImmutabilityPolicyMode + +const ( + ImmutabilityPolicyModeMutable ImmutabilityPolicyMode = generated.ImmutabilityPolicyModeMutable + ImmutabilityPolicyModeUnlocked ImmutabilityPolicyMode = generated.ImmutabilityPolicyModeUnlocked + ImmutabilityPolicyModeLocked ImmutabilityPolicyMode = generated.ImmutabilityPolicyModeLocked +) + +// PossibleImmutabilityPolicyModeValues returns the possible values for the ImmutabilityPolicyMode const type. +func PossibleImmutabilityPolicyModeValues() []ImmutabilityPolicyMode { + return generated.PossibleImmutabilityPolicyModeValues() +} + +// RehydratePriority - If an object is in rehydrate pending state then this header is returned with priority of rehydrate. +// Valid values are High and Standard. +type RehydratePriority = generated.RehydratePriority + +const ( + RehydratePriorityHigh RehydratePriority = generated.RehydratePriorityHigh + RehydratePriorityStandard RehydratePriority = generated.RehydratePriorityStandard +) + +// PossibleRehydratePriorityValues returns the possible values for the RehydratePriority const type. +func PossibleRehydratePriorityValues() []RehydratePriority { + return generated.PossibleRehydratePriorityValues() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go new file mode 100644 index 000000000..eb65d5fc3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go @@ -0,0 +1,263 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package container + +import ( + "reflect" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential = exported.SharedKeyCredential + +// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the +// storage account's name and either its primary or secondary key. +func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) { + return exported.NewSharedKeyCredential(accountName, accountKey) +} + +// Request Model Declaration ------------------------------------------------------------------------------------------- + +// CpkScopeInfo contains a group of parameters for the ContainerClient.Create method. +type CpkScopeInfo = generated.ContainerCpkScopeInfo + +// BlobProperties - Properties of a blob +type BlobProperties = generated.BlobPropertiesInternal + +// BlobItem - An Azure Storage blob +type BlobItem = generated.BlobItemInternal + +// AccessConditions identifies container-specific access conditions which you optionally set. +type AccessConditions = exported.ContainerAccessConditions + +// LeaseAccessConditions contains optional parameters to access leased entity. +type LeaseAccessConditions = exported.LeaseAccessConditions + +// ModifiedAccessConditions contains a group of parameters for specifying access conditions. +type ModifiedAccessConditions = exported.ModifiedAccessConditions + +// AccessPolicy - An Access policy +type AccessPolicy = generated.AccessPolicy + +// AccessPolicyPermission type simplifies creating the permissions string for a container's access policy. +// Initialize an instance of this type and then call its String method to set AccessPolicy's Permission field. +type AccessPolicyPermission = exported.AccessPolicyPermission + +// SignedIdentifier - signed identifier +type SignedIdentifier = generated.SignedIdentifier + +// Request Model Declaration ------------------------------------------------------------------------------------------- + +// CreateOptions contains the optional parameters for the Client.Create method. +type CreateOptions struct { + // Specifies whether data in the container may be accessed publicly and the level of access + Access *PublicAccessType + + // Optional. Specifies a user-defined name-value pair associated with the blob. + Metadata map[string]string + + // Optional. Specifies the encryption scope settings to set on the container. + CpkScopeInfo *CpkScopeInfo +} + +// --------------------------------------------------------------------------------------------------------------------- + +// DeleteOptions contains the optional parameters for the Client.Delete method. +type DeleteOptions struct { + AccessConditions *AccessConditions +} + +func (o *DeleteOptions) format() (*generated.ContainerClientDeleteOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatContainerAccessConditions(o.AccessConditions) + return nil, leaseAccessConditions, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// RestoreOptions contains the optional parameters for the Client.Restore method. +type RestoreOptions struct { + // placeholder for future options +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method. +type GetPropertiesOptions struct { + LeaseAccessConditions *LeaseAccessConditions +} + +// ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method. +func (o *GetPropertiesOptions) format() (*generated.ContainerClientGetPropertiesOptions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil + } + + return nil, o.LeaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ListBlobsInclude indicates what additional information the service should return with each blob. +type ListBlobsInclude struct { + Copy, Metadata, Snapshots, UncommittedBlobs, Deleted, Tags, Versions, LegalHold, ImmutabilityPolicy, DeletedWithVersions bool +} + +func (l ListBlobsInclude) format() []generated.ListBlobsIncludeItem { + if reflect.ValueOf(l).IsZero() { + return nil + } + + include := []generated.ListBlobsIncludeItem{} + + if l.Copy { + include = append(include, generated.ListBlobsIncludeItemCopy) + } + if l.Deleted { + include = append(include, generated.ListBlobsIncludeItemDeleted) + } + if l.DeletedWithVersions { + include = append(include, generated.ListBlobsIncludeItemDeletedwithversions) + } + if l.ImmutabilityPolicy { + include = append(include, generated.ListBlobsIncludeItemImmutabilitypolicy) + } + if l.LegalHold { + include = append(include, generated.ListBlobsIncludeItemLegalhold) + } + if l.Metadata { + include = append(include, generated.ListBlobsIncludeItemMetadata) + } + if l.Snapshots { + include = append(include, generated.ListBlobsIncludeItemSnapshots) + } + if l.Tags { + include = append(include, generated.ListBlobsIncludeItemTags) + } + if l.UncommittedBlobs { + include = append(include, generated.ListBlobsIncludeItemUncommittedblobs) + } + if l.Versions { + include = append(include, generated.ListBlobsIncludeItemVersions) + } + + return include +} + +// ListBlobsFlatOptions contains the optional parameters for the ContainerClient.ListBlobFlatSegment method. +type ListBlobsFlatOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include ListBlobsInclude + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + MaxResults *int32 + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ListBlobsHierarchyOptions provides set of configurations for Client.NewListBlobsHierarchyPager +type ListBlobsHierarchyOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include ListBlobsInclude + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + MaxResults *int32 + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string +} + +// ContainerClientListBlobHierarchySegmentOptions contains the optional parameters for the ContainerClient.ListBlobHierarchySegment method. +func (o *ListBlobsHierarchyOptions) format() generated.ContainerClientListBlobHierarchySegmentOptions { + if o == nil { + return generated.ContainerClientListBlobHierarchySegmentOptions{} + } + + return generated.ContainerClientListBlobHierarchySegmentOptions{ + Include: o.Include.format(), + Marker: o.Marker, + Maxresults: o.MaxResults, + Prefix: o.Prefix, + } +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetMetadataOptions contains the optional parameters for the Client.SetMetadata method. +type SetMetadataOptions struct { + Metadata map[string]string + LeaseAccessConditions *LeaseAccessConditions + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *SetMetadataOptions) format() (*generated.ContainerClientSetMetadataOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + return &generated.ContainerClientSetMetadataOptions{Metadata: o.Metadata}, o.LeaseAccessConditions, o.ModifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetAccessPolicyOptions contains the optional parameters for the Client.GetAccessPolicy method. +type GetAccessPolicyOptions struct { + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *GetAccessPolicyOptions) format() (*generated.ContainerClientGetAccessPolicyOptions, *LeaseAccessConditions) { + if o == nil { + return nil, nil + } + + return nil, o.LeaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetAccessPolicyOptions provides set of configurations for ContainerClient.SetAccessPolicy operation +type SetAccessPolicyOptions struct { + // Specifies whether data in the container may be accessed publicly and the level of access + Access *PublicAccessType + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + AccessConditions *AccessConditions +} + +func (o *SetAccessPolicyOptions) format() (*generated.ContainerClientSetAccessPolicyOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + lac, mac := exported.FormatContainerAccessConditions(o.AccessConditions) + return &generated.ContainerClientSetAccessPolicyOptions{ + Access: o.Access, + }, lac, mac +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/responses.go new file mode 100644 index 000000000..9d8672b13 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/responses.go @@ -0,0 +1,38 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package container + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// CreateResponse contains the response from method Client.Create. +type CreateResponse = generated.ContainerClientCreateResponse + +// DeleteResponse contains the response from method Client.Delete. +type DeleteResponse = generated.ContainerClientDeleteResponse + +// RestoreResponse contains the response from method Client.Restore. +type RestoreResponse = generated.ContainerClientRestoreResponse + +// GetPropertiesResponse contains the response from method Client.GetProperties. +type GetPropertiesResponse = generated.ContainerClientGetPropertiesResponse + +// ListBlobsFlatResponse contains the response from method Client.ListBlobFlatSegment. +type ListBlobsFlatResponse = generated.ContainerClientListBlobFlatSegmentResponse + +// ListBlobsHierarchyResponse contains the response from method Client.ListBlobHierarchySegment. +type ListBlobsHierarchyResponse = generated.ContainerClientListBlobHierarchySegmentResponse + +// SetMetadataResponse contains the response from method Client.SetMetadata. +type SetMetadataResponse = generated.ContainerClientSetMetadataResponse + +// GetAccessPolicyResponse contains the response from method Client.GetAccessPolicy. +type GetAccessPolicyResponse = generated.ContainerClientGetAccessPolicyResponse + +// SetAccessPolicyResponse contains the response from method Client.SetAccessPolicy. +type SetAccessPolicyResponse = generated.ContainerClientSetAccessPolicyResponse diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/doc.go index c2426eb70..d5b6ed6a7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/doc.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/doc.go @@ -1,9 +1,8 @@ //go:build go1.18 // +build go1.18 -// Copyright 2017 Microsoft Corporation. All rights reserved. -// Use of this source code is governed by an MIT -// license that can be found in the LICENSE file. +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. /* @@ -167,13 +166,13 @@ Examples handle(err) // Download the blob's contents and ensure that the download worked properly - blobDownloadResponse, err := blockBlobClient.Download(context.TODO(), nil) + blobDownloadResponse, err := blockBlobClient.DownloadStream(context.TODO(), nil) handle(err) // Use the bytes.Buffer object to read the downloaded data. // RetryReaderOptions has a lot of in-depth tuning abilities, but for the sake of simplicity, we'll omit those here. reader := blobDownloadResponse.Body(nil) - downloadData, err := ioutil.ReadAll(reader) + downloadData, err := io.ReadAll(reader) handle(err) if string(downloadData) != uploadData { handle(errors.New("Uploaded data should be same as downloaded data")) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/highlevel.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/highlevel.go deleted file mode 100644 index 287250039..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/highlevel.go +++ /dev/null @@ -1,316 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import ( - "context" - "encoding/base64" - "io" - "net/http" - "sync" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" - "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal" - - "bytes" - "errors" - "os" -) - -// uploadReaderAtToBlockBlob uploads a buffer in blocks to a block blob. -func (bb *BlockBlobClient) uploadReaderAtToBlockBlob(ctx context.Context, reader io.ReaderAt, readerSize int64, o UploadOption) (*http.Response, error) { - if o.BlockSize == 0 { - // If bufferSize > (BlockBlobMaxStageBlockBytes * BlockBlobMaxBlocks), then error - if readerSize > BlockBlobMaxStageBlockBytes*BlockBlobMaxBlocks { - return nil, errors.New("buffer is too large to upload to a block blob") - } - // If bufferSize <= BlockBlobMaxUploadBlobBytes, then Upload should be used with just 1 I/O request - if readerSize <= BlockBlobMaxUploadBlobBytes { - o.BlockSize = BlockBlobMaxUploadBlobBytes // Default if unspecified - } else { - o.BlockSize = readerSize / BlockBlobMaxBlocks // buffer / max blocks = block size to use all 50,000 blocks - if o.BlockSize < BlobDefaultDownloadBlockSize { // If the block size is smaller than 4MB, round up to 4MB - o.BlockSize = BlobDefaultDownloadBlockSize - } - // StageBlock will be called with blockSize blocks and a Parallelism of (BufferSize / BlockSize). - } - } - - if readerSize <= BlockBlobMaxUploadBlobBytes { - // If the size can fit in 1 Upload call, do it this way - var body io.ReadSeeker = io.NewSectionReader(reader, 0, readerSize) - if o.Progress != nil { - body = streaming.NewRequestProgress(internal.NopCloser(body), o.Progress) - } - - uploadBlockBlobOptions := o.getUploadBlockBlobOptions() - resp, err := bb.Upload(ctx, internal.NopCloser(body), uploadBlockBlobOptions) - - return resp.RawResponse, err - } - - var numBlocks = uint16(((readerSize - 1) / o.BlockSize) + 1) - - blockIDList := make([]string, numBlocks) // Base-64 encoded block IDs - progress := int64(0) - progressLock := &sync.Mutex{} - - err := DoBatchTransfer(ctx, BatchTransferOptions{ - OperationName: "uploadReaderAtToBlockBlob", - TransferSize: readerSize, - ChunkSize: o.BlockSize, - Parallelism: o.Parallelism, - Operation: func(offset int64, count int64, ctx context.Context) error { - // This function is called once per block. - // It is passed this block's offset within the buffer and its count of bytes - // Prepare to read the proper block/section of the buffer - var body io.ReadSeeker = io.NewSectionReader(reader, offset, count) - blockNum := offset / o.BlockSize - if o.Progress != nil { - blockProgress := int64(0) - body = streaming.NewRequestProgress(internal.NopCloser(body), - func(bytesTransferred int64) { - diff := bytesTransferred - blockProgress - blockProgress = bytesTransferred - progressLock.Lock() // 1 goroutine at a time gets progress report - progress += diff - o.Progress(progress) - progressLock.Unlock() - }) - } - - // Block IDs are unique values to avoid issue if 2+ clients are uploading blocks - // at the same time causing PutBlockList to get a mix of blocks from all the clients. - generatedUuid, err := uuid.New() - if err != nil { - return err - } - blockIDList[blockNum] = base64.StdEncoding.EncodeToString([]byte(generatedUuid.String())) - stageBlockOptions := o.getStageBlockOptions() - _, err = bb.StageBlock(ctx, blockIDList[blockNum], internal.NopCloser(body), stageBlockOptions) - return err - }, - }) - if err != nil { - return nil, err - } - // All put blocks were successful, call Put Block List to finalize the blob - commitBlockListOptions := o.getCommitBlockListOptions() - resp, err := bb.CommitBlockList(ctx, blockIDList, commitBlockListOptions) - - return resp.RawResponse, err -} - -// UploadBuffer uploads a buffer in blocks to a block blob. -func (bb *BlockBlobClient) UploadBuffer(ctx context.Context, b []byte, o UploadOption) (*http.Response, error) { - return bb.uploadReaderAtToBlockBlob(ctx, bytes.NewReader(b), int64(len(b)), o) -} - -// UploadFile uploads a file in blocks to a block blob. -func (bb *BlockBlobClient) UploadFile(ctx context.Context, file *os.File, o UploadOption) (*http.Response, error) { - - stat, err := file.Stat() - if err != nil { - return nil, err - } - return bb.uploadReaderAtToBlockBlob(ctx, file, stat.Size(), o) -} - -// --------------------------------------------------------------------------------------------------------------------- - -// UploadStream copies the file held in io.Reader to the Blob at blockBlobClient. -// A Context deadline or cancellation will cause this to error. -func (bb *BlockBlobClient) UploadStream(ctx context.Context, body io.Reader, o UploadStreamOptions) (BlockBlobCommitBlockListResponse, error) { - if err := o.defaults(); err != nil { - return BlockBlobCommitBlockListResponse{}, err - } - - // If we used the default manager, we need to close it. - if o.transferMangerNotSet { - defer o.TransferManager.Close() - } - - result, err := copyFromReader(ctx, body, bb, o) - if err != nil { - return BlockBlobCommitBlockListResponse{}, err - } - - return result, nil -} - -// --------------------------------------------------------------------------------------------------------------------- - -// DownloadToWriterAt downloads an Azure blob to a WriterAt with parallel. -// Offset and count are optional, pass 0 for both to download the entire blob. -func (b *BlobClient) DownloadToWriterAt(ctx context.Context, offset int64, count int64, writer io.WriterAt, o DownloadOptions) error { - if o.BlockSize == 0 { - o.BlockSize = BlobDefaultDownloadBlockSize - } - - if count == CountToEnd { // If size not specified, calculate it - // If we don't have the length at all, get it - downloadBlobOptions := o.getDownloadBlobOptions(0, CountToEnd, nil) - dr, err := b.Download(ctx, downloadBlobOptions) - if err != nil { - return err - } - count = *dr.ContentLength - offset - } - - if count <= 0 { - // The file is empty, there is nothing to download. - return nil - } - - // Prepare and do parallel download. - progress := int64(0) - progressLock := &sync.Mutex{} - - err := DoBatchTransfer(ctx, BatchTransferOptions{ - OperationName: "downloadBlobToWriterAt", - TransferSize: count, - ChunkSize: o.BlockSize, - Parallelism: o.Parallelism, - Operation: func(chunkStart int64, count int64, ctx context.Context) error { - - downloadBlobOptions := o.getDownloadBlobOptions(chunkStart+offset, count, nil) - dr, err := b.Download(ctx, downloadBlobOptions) - if err != nil { - return err - } - body := dr.Body(&o.RetryReaderOptionsPerBlock) - if o.Progress != nil { - rangeProgress := int64(0) - body = streaming.NewResponseProgress( - body, - func(bytesTransferred int64) { - diff := bytesTransferred - rangeProgress - rangeProgress = bytesTransferred - progressLock.Lock() - progress += diff - o.Progress(progress) - progressLock.Unlock() - }) - } - _, err = io.Copy(newSectionWriter(writer, chunkStart, count), body) - if err != nil { - return err - } - err = body.Close() - return err - }, - }) - if err != nil { - return err - } - return nil -} - -// DownloadToBuffer downloads an Azure blob to a buffer with parallel. -// Offset and count are optional, pass 0 for both to download the entire blob. -func (b *BlobClient) DownloadToBuffer(ctx context.Context, offset int64, count int64, _bytes []byte, o DownloadOptions) error { - return b.DownloadToWriterAt(ctx, offset, count, newBytesWriter(_bytes), o) -} - -// DownloadToFile downloads an Azure blob to a local file. -// The file would be truncated if the size doesn't match. -// Offset and count are optional, pass 0 for both to download the entire blob. -func (b *BlobClient) DownloadToFile(ctx context.Context, offset int64, count int64, file *os.File, o DownloadOptions) error { - // 1. Calculate the size of the destination file - var size int64 - - if count == CountToEnd { - // Try to get Azure blob's size - getBlobPropertiesOptions := o.getBlobPropertiesOptions() - props, err := b.GetProperties(ctx, getBlobPropertiesOptions) - if err != nil { - return err - } - size = *props.ContentLength - offset - } else { - size = count - } - - // 2. Compare and try to resize local file's size if it doesn't match Azure blob's size. - stat, err := file.Stat() - if err != nil { - return err - } - if stat.Size() != size { - if err = file.Truncate(size); err != nil { - return err - } - } - - if size > 0 { - return b.DownloadToWriterAt(ctx, offset, size, file, o) - } else { // if the blob's size is 0, there is no need in downloading it - return nil - } -} - -// --------------------------------------------------------------------------------------------------------------------- - -// DoBatchTransfer helps to execute operations in a batch manner. -// Can be used by users to customize batch works (for other scenarios that the SDK does not provide) -func DoBatchTransfer(ctx context.Context, o BatchTransferOptions) error { - if o.ChunkSize == 0 { - return errors.New("ChunkSize cannot be 0") - } - - if o.Parallelism == 0 { - o.Parallelism = 5 // default Parallelism - } - - // Prepare and do parallel operations. - numChunks := uint16(((o.TransferSize - 1) / o.ChunkSize) + 1) - operationChannel := make(chan func() error, o.Parallelism) // Create the channel that release 'Parallelism' goroutines concurrently - operationResponseChannel := make(chan error, numChunks) // Holds each response - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - // Create the goroutines that process each operation (in parallel). - for g := uint16(0); g < o.Parallelism; g++ { - //grIndex := g - go func() { - for f := range operationChannel { - err := f() - operationResponseChannel <- err - } - }() - } - - // Add each chunk's operation to the channel. - for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ { - curChunkSize := o.ChunkSize - - if chunkNum == numChunks-1 { // Last chunk - curChunkSize = o.TransferSize - (int64(chunkNum) * o.ChunkSize) // Remove size of all transferred chunks from total - } - offset := int64(chunkNum) * o.ChunkSize - - operationChannel <- func() error { - return o.Operation(offset, curChunkSize, ctx) - } - } - close(operationChannel) - - // Wait for the operations to complete. - var firstErr error = nil - for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ { - responseError := <-operationResponseChannel - // record the first error (the original error which should cause the other chunks to fail with canceled context) - if responseError != nil && firstErr == nil { - cancel() // As soon as any operation fails, cancel all remaining operation calls - firstErr = responseError - } - } - return firstErr -} - -// --------------------------------------------------------------------------------------------------------------------- diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go new file mode 100644 index 000000000..16e6cac06 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go @@ -0,0 +1,89 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package base + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +type Client[T any] struct { + inner *T + sharedKey *exported.SharedKeyCredential +} + +func InnerClient[T any](client *Client[T]) *T { + return client.inner +} + +func SharedKey[T any](client *Client[T]) *exported.SharedKeyCredential { + return client.sharedKey +} + +func NewClient[T any](inner *T) *Client[T] { + return &Client[T]{inner: inner} +} + +func NewServiceClient(containerURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *Client[generated.ServiceClient] { + return &Client[generated.ServiceClient]{ + inner: generated.NewServiceClient(containerURL, pipeline), + sharedKey: sharedKey, + } +} + +func NewContainerClient(containerURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *Client[generated.ContainerClient] { + return &Client[generated.ContainerClient]{ + inner: generated.NewContainerClient(containerURL, pipeline), + sharedKey: sharedKey, + } +} + +func NewBlobClient(blobURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *Client[generated.BlobClient] { + return &Client[generated.BlobClient]{ + inner: generated.NewBlobClient(blobURL, pipeline), + sharedKey: sharedKey, + } +} + +type CompositeClient[T, U any] struct { + innerT *T + innerU *U + sharedKey *exported.SharedKeyCredential +} + +func InnerClients[T, U any](client *CompositeClient[T, U]) (*Client[T], *U) { + return &Client[T]{inner: client.innerT}, client.innerU +} + +func NewAppendBlobClient(blobURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.AppendBlobClient] { + return &CompositeClient[generated.BlobClient, generated.AppendBlobClient]{ + innerT: generated.NewBlobClient(blobURL, pipeline), + innerU: generated.NewAppendBlobClient(blobURL, pipeline), + sharedKey: sharedKey, + } +} + +func NewBlockBlobClient(blobURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.BlockBlobClient] { + return &CompositeClient[generated.BlobClient, generated.BlockBlobClient]{ + innerT: generated.NewBlobClient(blobURL, pipeline), + innerU: generated.NewBlockBlobClient(blobURL, pipeline), + sharedKey: sharedKey, + } +} + +func NewPageBlobClient(blobURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.PageBlobClient] { + return &CompositeClient[generated.BlobClient, generated.PageBlobClient]{ + innerT: generated.NewBlobClient(blobURL, pipeline), + innerU: generated.NewPageBlobClient(blobURL, pipeline), + sharedKey: sharedKey, + } +} + +func SharedKeyComposite[T, U any](client *CompositeClient[T, U]) *exported.SharedKeyCredential { + return client.sharedKey +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/access_conditions.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/access_conditions.go new file mode 100644 index 000000000..96d188fa5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/access_conditions.go @@ -0,0 +1,43 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + +const SnapshotTimeFormat = "2006-01-02T15:04:05.0000000Z07:00" + +// ContainerAccessConditions identifies container-specific access conditions which you optionally set. +type ContainerAccessConditions struct { + ModifiedAccessConditions *ModifiedAccessConditions + LeaseAccessConditions *LeaseAccessConditions +} + +func FormatContainerAccessConditions(b *ContainerAccessConditions) (*LeaseAccessConditions, *ModifiedAccessConditions) { + if b == nil { + return nil, nil + } + return b.LeaseAccessConditions, b.ModifiedAccessConditions +} + +// BlobAccessConditions identifies blob-specific access conditions which you optionally set. +type BlobAccessConditions struct { + LeaseAccessConditions *LeaseAccessConditions + ModifiedAccessConditions *ModifiedAccessConditions +} + +func FormatBlobAccessConditions(b *BlobAccessConditions) (*LeaseAccessConditions, *ModifiedAccessConditions) { + if b == nil { + return nil, nil + } + return b.LeaseAccessConditions, b.ModifiedAccessConditions +} + +// LeaseAccessConditions contains optional parameters to access leased entity. +type LeaseAccessConditions = generated.LeaseAccessConditions + +// ModifiedAccessConditions contains a group of parameters for specifying access conditions. +type ModifiedAccessConditions = generated.ModifiedAccessConditions diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_access_policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/access_policy.go similarity index 88% rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_access_policy.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/access_policy.go index 612bc784c..14c293cf6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_access_policy.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/access_policy.go @@ -2,9 +2,9 @@ // +build go1.18 // Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. +// Licensed under the MIT License. See License.txt in the project root for license information. -package azblob +package exported import ( "bytes" @@ -19,7 +19,7 @@ type AccessPolicyPermission struct { // String produces the access policy permission string for an Azure Storage container. // Call this method to set AccessPolicy's Permission field. -func (p AccessPolicyPermission) String() string { +func (p *AccessPolicyPermission) String() string { var b bytes.Buffer if p.Read { b.WriteRune('r') diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/exported.go new file mode 100644 index 000000000..9bc1ca47d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/exported.go @@ -0,0 +1,33 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "fmt" + "strconv" +) + +// HTTPRange defines a range of bytes within an HTTP resource, starting at offset and +// ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange +// which has an offset but no zero value count indicates from the offset to the resource's end. +type HTTPRange struct { + Offset int64 + Count int64 +} + +// FormatHTTPRange converts an HTTPRange to its string format. +func FormatHTTPRange(r HTTPRange) *string { + if r.Offset == 0 && r.Count == 0 { + return nil // No specified range + } + endOffset := "" // if count == CountToEnd (0) + if r.Count > 0 { + endOffset = strconv.FormatInt((r.Offset+r.Count)-1, 10) + } + dataRange := fmt.Sprintf("bytes=%v-%s", r.Offset, endOffset) + return &dataRange +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_shared_policy_shared_key_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/shared_key_credential.go similarity index 73% rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_shared_policy_shared_key_credential.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/shared_key_credential.go index 60b1e5a76..d15631054 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_shared_policy_shared_key_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/shared_key_credential.go @@ -2,9 +2,9 @@ // +build go1.18 // Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. +// Licensed under the MIT License. See License.txt in the project root for license information. -package azblob +package exported import ( "bytes" @@ -22,6 +22,7 @@ import ( azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" ) // NewSharedKeyCredential creates an immutable SharedKeyCredential containing the @@ -35,7 +36,6 @@ func NewSharedKeyCredential(accountName string, accountKey string) (*SharedKeyCr } // SharedKeyCredential contains an account's name and its primary or secondary key. -// It is immutable making it shareable and goroutine-safe. type SharedKeyCredential struct { // Only the NewSharedKeyCredential method should set these; all other methods should treat them as read-only accountName string @@ -58,7 +58,7 @@ func (c *SharedKeyCredential) SetAccountKey(accountKey string) error { } // ComputeHMACSHA256 generates a hash signature for an HTTP request or for a SAS. -func (c *SharedKeyCredential) ComputeHMACSHA256(message string) (string, error) { +func (c *SharedKeyCredential) computeHMACSHA256(message string) (string, error) { h := hmac.New(sha256.New, c.accountKey.Load().([]byte)) _, err := h.Write([]byte(message)) return base64.StdEncoding.EncodeToString(h.Sum(nil)), err @@ -67,7 +67,7 @@ func (c *SharedKeyCredential) ComputeHMACSHA256(message string) (string, error) func (c *SharedKeyCredential) buildStringToSign(req *http.Request) (string, error) { // https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services headers := req.Header - contentLength := headers.Get(headerContentLength) + contentLength := getHeader(shared.HeaderContentLength, headers) if contentLength == "0" { contentLength = "" } @@ -79,23 +79,36 @@ func (c *SharedKeyCredential) buildStringToSign(req *http.Request) (string, erro stringToSign := strings.Join([]string{ req.Method, - headers.Get(headerContentEncoding), - headers.Get(headerContentLanguage), + getHeader(shared.HeaderContentEncoding, headers), + getHeader(shared.HeaderContentLanguage, headers), contentLength, - headers.Get(headerContentMD5), - headers.Get(headerContentType), + getHeader(shared.HeaderContentMD5, headers), + getHeader(shared.HeaderContentType, headers), "", // Empty date because x-ms-date is expected (as per web page above) - headers.Get(headerIfModifiedSince), - headers.Get(headerIfMatch), - headers.Get(headerIfNoneMatch), - headers.Get(headerIfUnmodifiedSince), - headers.Get(headerRange), + getHeader(shared.HeaderIfModifiedSince, headers), + getHeader(shared.HeaderIfMatch, headers), + getHeader(shared.HeaderIfNoneMatch, headers), + getHeader(shared.HeaderIfUnmodifiedSince, headers), + getHeader(shared.HeaderRange, headers), c.buildCanonicalizedHeader(headers), canonicalizedResource, }, "\n") return stringToSign, nil } +func getHeader(key string, headers map[string][]string) string { + if headers == nil { + return "" + } + if v, ok := headers[key]; ok { + if len(v) > 0 { + return v[0] + } + } + + return "" +} + func (c *SharedKeyCredential) buildCanonicalizedHeader(headers http.Header) string { cm := map[string][]string{} for k, v := range headers { @@ -165,33 +178,41 @@ func (c *SharedKeyCredential) buildCanonicalizedResource(u *url.URL) (string, er return cr.String(), nil } -type sharedKeyCredPolicy struct { +// ComputeHMACSHA256 is a helper for computing the signed string outside of this package. +func ComputeHMACSHA256(cred *SharedKeyCredential, message string) (string, error) { + return cred.computeHMACSHA256(message) +} + +// the following content isn't actually exported but must live +// next to SharedKeyCredential as it uses its unexported methods + +type SharedKeyCredPolicy struct { cred *SharedKeyCredential } -func newSharedKeyCredPolicy(cred *SharedKeyCredential) *sharedKeyCredPolicy { - return &sharedKeyCredPolicy{cred: cred} +func NewSharedKeyCredPolicy(cred *SharedKeyCredential) *SharedKeyCredPolicy { + return &SharedKeyCredPolicy{cred: cred} } -func (s *sharedKeyCredPolicy) Do(req *policy.Request) (*http.Response, error) { - if d := req.Raw().Header.Get(headerXmsDate); d == "" { - req.Raw().Header.Set(headerXmsDate, time.Now().UTC().Format(http.TimeFormat)) +func (s *SharedKeyCredPolicy) Do(req *policy.Request) (*http.Response, error) { + if d := getHeader(shared.HeaderXmsDate, req.Raw().Header); d == "" { + req.Raw().Header.Set(shared.HeaderXmsDate, time.Now().UTC().Format(http.TimeFormat)) } stringToSign, err := s.cred.buildStringToSign(req.Raw()) if err != nil { return nil, err } - signature, err := s.cred.ComputeHMACSHA256(stringToSign) + signature, err := s.cred.computeHMACSHA256(stringToSign) if err != nil { return nil, err } authHeader := strings.Join([]string{"SharedKey ", s.cred.AccountName(), ":", signature}, "") - req.Raw().Header.Set(headerAuthorization, authHeader) + req.Raw().Header.Set(shared.HeaderAuthorization, authHeader) response, err := req.Next() if err != nil && response != nil && response.StatusCode == http.StatusForbidden { // Service failed to authenticate request, log it - log.Write(azlog.EventResponse, "===== HTTP Forbidden status, String-to-NewSASQueryParameters:\n"+stringToSign+"\n===============================\n") + log.Write(azlog.EventResponse, "===== HTTP Forbidden status, String-to-Sign:\n"+stringToSign+"\n===============================\n") } return response, err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/user_delegation_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/user_delegation_credential.go new file mode 100644 index 000000000..2e2dd16e4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/user_delegation_credential.go @@ -0,0 +1,64 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// NewUserDelegationCredential creates a new UserDelegationCredential using a Storage account's Name and a user delegation Key from it +func NewUserDelegationCredential(accountName string, udk UserDelegationKey) *UserDelegationCredential { + return &UserDelegationCredential{ + accountName: accountName, + userDelegationKey: udk, + } +} + +// UserDelegationKey contains UserDelegationKey. +type UserDelegationKey = generated.UserDelegationKey + +// UserDelegationCredential contains an account's name and its user delegation key. +type UserDelegationCredential struct { + accountName string + userDelegationKey UserDelegationKey +} + +// getAccountName returns the Storage account's Name +func (f *UserDelegationCredential) getAccountName() string { + return f.accountName +} + +// GetAccountName is a helper method for accessing the user delegation key parameters outside this package. +func GetAccountName(udc *UserDelegationCredential) string { + return udc.getAccountName() +} + +// computeHMACSHA256 generates a hash signature for an HTTP request or for a SAS. +func (f *UserDelegationCredential) computeHMACSHA256(message string) (string, error) { + bytes, _ := base64.StdEncoding.DecodeString(*f.userDelegationKey.Value) + h := hmac.New(sha256.New, bytes) + _, err := h.Write([]byte(message)) + return base64.StdEncoding.EncodeToString(h.Sum(nil)), err +} + +// ComputeUDCHMACSHA256 is a helper method for computing the signed string outside this package. +func ComputeUDCHMACSHA256(udc *UserDelegationCredential, message string) (string, error) { + return udc.computeHMACSHA256(message) +} + +// getUDKParams returns UserDelegationKey +func (f *UserDelegationCredential) getUDKParams() *UserDelegationKey { + return &f.userDelegationKey +} + +// GetUDKParams is a helper method for accessing the user delegation key parameters outside this package. +func GetUDKParams(udc *UserDelegationCredential) *UserDelegationKey { + return udc.getUDKParams() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go new file mode 100644 index 000000000..7b8ee601d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go @@ -0,0 +1,12 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +const ( + ModuleName = "azblob" + ModuleVersion = "v0.5.1" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/appendblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/appendblob_client.go new file mode 100644 index 000000000..3b6184fea --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/appendblob_client.go @@ -0,0 +1,19 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package generated + +import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + +func (client *AppendBlobClient) Endpoint() string { + return client.endpoint +} + +func (client *AppendBlobClient) Pipeline() runtime.Pipeline { + return client.pl +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md new file mode 100644 index 000000000..d4e2100e1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md @@ -0,0 +1,304 @@ +# Code Generation - Azure Blob SDK for Golang + +### Settings + +```yaml +go: true +clear-output-folder: false +version: "^3.0.0" +license-header: MICROSOFT_MIT_NO_VERSION +input-file: "https://raw.githubusercontent.com/Azure/azure-rest-api-specs/e515b6251fdc21015282d2e84b85beec7c091763/specification/storage/data-plane/Microsoft.BlobStorage/preview/2020-10-02/blob.json" +credential-scope: "https://storage.azure.com/.default" +output-folder: . +file-prefix: "zz_" +openapi-type: "data-plane" +verbose: true +security: AzureKey +modelerfour: + group-parameters: false + seal-single-value-enum-by-default: true + lenient-model-deduplication: true +export-clients: true +use: "@autorest/go@4.0.0-preview.43" +``` + +### Remove pager methods and export various generated methods in container client + +``` yaml +directive: + - from: zz_container_client.go + where: $ + transform: >- + return $. + replace(/func \(client \*ContainerClient\) NewListBlobFlatSegmentPager\(.+\/\/ listBlobFlatSegmentCreateRequest creates the ListBlobFlatSegment request/s, `// listBlobFlatSegmentCreateRequest creates the ListBlobFlatSegment request`). + replace(/\(client \*ContainerClient\) listBlobFlatSegmentCreateRequest\(/, `(client *ContainerClient) ListBlobFlatSegmentCreateRequest(`). + replace(/\(client \*ContainerClient\) listBlobFlatSegmentHandleResponse\(/, `(client *ContainerClient) ListBlobFlatSegmentHandleResponse(`); +``` + +### Remove pager methods and export various generated methods in service client + +``` yaml +directive: + - from: zz_service_client.go + where: $ + transform: >- + return $. + replace(/func \(client \*ServiceClient\) NewListContainersSegmentPager\(.+\/\/ listContainersSegmentCreateRequest creates the ListContainersSegment request/s, `// listContainersSegmentCreateRequest creates the ListContainersSegment request`). + replace(/\(client \*ServiceClient\) listContainersSegmentCreateRequest\(/, `(client *ServiceClient) ListContainersSegmentCreateRequest(`). + replace(/\(client \*ServiceClient\) listContainersSegmentHandleResponse\(/, `(client *ServiceClient) ListContainersSegmentHandleResponse(`); +``` + +### Fix BlobMetadata. + +``` yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + delete $.BlobMetadata["properties"]; + +``` + +### Don't include container name or blob in path - we have direct URIs. + +``` yaml +directive: +- from: swagger-document + where: $["x-ms-paths"] + transform: > + for (const property in $) + { + if (property.includes('/{containerName}/{blob}')) + { + $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ContainerName") && false == param['$ref'].endsWith("#/parameters/Blob"))}); + } + else if (property.includes('/{containerName}')) + { + $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ContainerName"))}); + } + } +``` + +### Remove DataLake stuff. + +``` yaml +directive: +- from: swagger-document + where: $["x-ms-paths"] + transform: > + for (const property in $) + { + if (property.includes('filesystem')) + { + delete $[property]; + } + } +``` + +### Remove DataLakeStorageError + +``` yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + delete $.DataLakeStorageError; +``` + +### Fix 304s + +``` yaml +directive: +- from: swagger-document + where: $["x-ms-paths"]["/{containerName}/{blob}"] + transform: > + $.get.responses["304"] = { + "description": "The condition specified using HTTP conditional header(s) is not met.", + "x-az-response-name": "ConditionNotMetError", + "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } } + }; +``` + +### Fix GeoReplication + +``` yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + delete $.GeoReplication.properties.Status["x-ms-enum"]; + $.GeoReplication.properties.Status["x-ms-enum"] = { + "name": "BlobGeoReplicationStatus", + "modelAsString": false + }; +``` + +### Fix RehydratePriority + +``` yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + delete $.RehydratePriority["x-ms-enum"]; + $.RehydratePriority["x-ms-enum"] = { + "name": "RehydratePriority", + "modelAsString": false + }; +``` + +### Fix BlobDeleteType + +``` yaml +directive: +- from: swagger-document + where: $.parameters + transform: > + delete $.BlobDeleteType.enum; + $.BlobDeleteType.enum = [ + "None", + "Permanent" + ]; +``` + +### Fix EncryptionAlgorithm + +``` yaml +directive: +- from: swagger-document + where: $.parameters + transform: > + delete $.EncryptionAlgorithm.enum; + $.EncryptionAlgorithm.enum = [ + "None", + "AES256" + ]; +``` + +### Fix XML string "ObjectReplicationMetadata" to "OrMetadata" + +``` yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + $.BlobItemInternal.properties["OrMetadata"] = $.BlobItemInternal.properties["ObjectReplicationMetadata"]; + delete $.BlobItemInternal.properties["ObjectReplicationMetadata"]; +``` + +# Export various createRequest/HandleResponse methods + +``` yaml +directive: +- from: zz_container_client.go + where: $ + transform: >- + return $. + replace(/listBlobHierarchySegmentCreateRequest/g, function(_, s) { return `ListBlobHierarchySegmentCreateRequest` }). + replace(/listBlobHierarchySegmentHandleResponse/g, function(_, s) { return `ListBlobHierarchySegmentHandleResponse` }); + +- from: zz_pageblob_client.go + where: $ + transform: >- + return $. + replace(/getPageRanges(Diff)?CreateRequest/g, function(_, s) { if (s === undefined) { s = '' }; return `GetPageRanges${s}CreateRequest` }). + replace(/getPageRanges(Diff)?HandleResponse/g, function(_, s) { if (s === undefined) { s = '' }; return `GetPageRanges${s}HandleResponse` }); +``` + +### Clean up some const type names so they don't stutter + +``` yaml +directive: +- from: swagger-document + where: $.parameters['BlobDeleteType'] + transform: > + $["x-ms-enum"].name = "DeleteType"; + $["x-ms-client-name"] = "DeleteType"; + +- from: swagger-document + where: $.parameters['BlobExpiryOptions'] + transform: > + $["x-ms-enum"].name = "ExpiryOptions"; + $["x-ms-client-name"].name = "ExpiryOptions"; + +- from: swagger-document + where: $["x-ms-paths"][*].*.responses[*].headers["x-ms-immutability-policy-mode"] + transform: > + $["x-ms-client-name"].name = "ImmutabilityPolicyMode"; + $.enum = [ "Mutable", "Unlocked", "Locked"]; + $["x-ms-enum"] = { "name": "ImmutabilityPolicyMode", "modelAsString": false }; + +- from: swagger-document + where: $.parameters['ImmutabilityPolicyMode'] + transform: > + $["x-ms-enum"].name = "ImmutabilityPolicySetting"; + $["x-ms-client-name"].name = "ImmutabilityPolicySetting"; + +- from: swagger-document + where: $.definitions['BlobPropertiesInternal'] + transform: > + $.properties.ImmutabilityPolicyMode["x-ms-enum"].name = "ImmutabilityPolicyMode"; +``` + +### use azcore.ETag + +``` yaml +directive: +- from: zz_models.go + where: $ + transform: >- + return $. + replace(/import "time"/, `import (\n\t"time"\n\t"github.com/Azure/azure-sdk-for-go/sdk/azcore"\n)`). + replace(/Etag\s+\*string/g, `ETag *azcore.ETag`). + replace(/IfMatch\s+\*string/g, `IfMatch *azcore.ETag`). + replace(/IfNoneMatch\s+\*string/g, `IfNoneMatch *azcore.ETag`). + replace(/SourceIfMatch\s+\*string/g, `SourceIfMatch *azcore.ETag`). + replace(/SourceIfNoneMatch\s+\*string/g, `SourceIfNoneMatch *azcore.ETag`); + +- from: zz_response_types.go + where: $ + transform: >- + return $. + replace(/"time"/, `"time"\n\t"github.com/Azure/azure-sdk-for-go/sdk/azcore"`). + replace(/ETag\s+\*string/g, `ETag *azcore.ETag`); + +- from: + - zz_appendblob_client.go + - zz_blob_client.go + - zz_blockblob_client.go + - zz_container_client.go + - zz_pageblob_client.go + where: $ + transform: >- + return $. + replace(/"github\.com\/Azure\/azure\-sdk\-for\-go\/sdk\/azcore\/policy"/, `"github.com/Azure/azure-sdk-for-go/sdk/azcore"\n\t"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"`). + replace(/result\.ETag\s+=\s+&val/g, `result.ETag = (*azcore.ETag)(&val)`). + replace(/\*modifiedAccessConditions.IfMatch/g, `string(*modifiedAccessConditions.IfMatch)`). + replace(/\*modifiedAccessConditions.IfNoneMatch/g, `string(*modifiedAccessConditions.IfNoneMatch)`). + replace(/\*sourceModifiedAccessConditions.SourceIfMatch/g, `string(*sourceModifiedAccessConditions.SourceIfMatch)`). + replace(/\*sourceModifiedAccessConditions.SourceIfNoneMatch/g, `string(*sourceModifiedAccessConditions.SourceIfNoneMatch)`); +``` + +### Unsure why this casing changed, but fixing it + +``` yaml +directive: +- from: zz_models.go + where: $ + transform: >- + return $. + replace(/SignedOid\s+\*string/g, `SignedOID *string`). + replace(/SignedTid\s+\*string/g, `SignedTID *string`); +``` + +### Fixing Typo with StorageErrorCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed + +``` yaml +directive: +- from: zz_constants.go + where: $ + transform: >- + return $. + replace(/StorageErrorCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed\t+\StorageErrorCode\s+=\s+\"IncrementalCopyOfEralierVersionSnapshotNotAllowed"\n, /StorageErrorCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed\t+\StorageErrorCode\s+=\s+\"IncrementalCopyOfEarlierVersionSnapshotNotAllowed"\ + replace(/StorageErrorCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed/g, /StorageErrorCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed/g) +``` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/blob_client.go new file mode 100644 index 000000000..c3d3c2607 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/blob_client.go @@ -0,0 +1,17 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated + +import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + +func (client *BlobClient) Endpoint() string { + return client.endpoint +} + +func (client *BlobClient) Pipeline() runtime.Pipeline { + return client.pl +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/block_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/block_blob_client.go new file mode 100644 index 000000000..a43e327ec --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/block_blob_client.go @@ -0,0 +1,19 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package generated + +import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + +func (client *BlockBlobClient) Endpoint() string { + return client.endpoint +} + +func (client *BlockBlobClient) Pipeline() runtime.Pipeline { + return client.pl +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/container_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/container_client.go new file mode 100644 index 000000000..bbbf828a0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/container_client.go @@ -0,0 +1,17 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated + +import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + +func (client *ContainerClient) Endpoint() string { + return client.endpoint +} + +func (client *ContainerClient) Pipeline() runtime.Pipeline { + return client.pl +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/pageblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/pageblob_client.go new file mode 100644 index 000000000..8a212cc3d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/pageblob_client.go @@ -0,0 +1,17 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated + +import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + +func (client *PageBlobClient) Endpoint() string { + return client.endpoint +} + +func (client *PageBlobClient) Pipeline() runtime.Pipeline { + return client.pl +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/service_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/service_client.go new file mode 100644 index 000000000..1f449b955 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/service_client.go @@ -0,0 +1,17 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated + +import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + +func (client *ServiceClient) Endpoint() string { + return client.endpoint +} + +func (client *ServiceClient) Pipeline() runtime.Pipeline { + return client.pl +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go new file mode 100644 index 000000000..d0fe18c5d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go @@ -0,0 +1,653 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "context" + "encoding/base64" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "io" + "net/http" + "strconv" + "time" +) + +// AppendBlobClient contains the methods for the AppendBlob group. +// Don't use this type directly, use NewAppendBlobClient() instead. +type AppendBlobClient struct { + endpoint string + pl runtime.Pipeline +} + +// NewAppendBlobClient creates a new instance of AppendBlobClient with the specified values. +// endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// pl - the pipeline used for sending requests and handling responses. +func NewAppendBlobClient(endpoint string, pl runtime.Pipeline) *AppendBlobClient { + client := &AppendBlobClient{ + endpoint: endpoint, + pl: pl, + } + return client +} + +// AppendBlock - The Append Block operation commits a new block of data to the end of an existing append blob. The Append +// Block operation is permitted only if the blob was created with x-ms-blob-type set to +// AppendBlob. Append Block is supported only on version 2015-02-21 version or later. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// contentLength - The length of the request. +// body - Initial data +// options - AppendBlobClientAppendBlockOptions contains the optional parameters for the AppendBlobClient.AppendBlock method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock +// method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *AppendBlobClient) AppendBlock(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *AppendBlobClientAppendBlockOptions, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (AppendBlobClientAppendBlockResponse, error) { + req, err := client.appendBlockCreateRequest(ctx, contentLength, body, options, leaseAccessConditions, appendPositionAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return AppendBlobClientAppendBlockResponse{}, runtime.NewResponseError(resp) + } + return client.appendBlockHandleResponse(resp) +} + +// appendBlockCreateRequest creates the AppendBlock request. +func (client *AppendBlobClient) appendBlockCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *AppendBlobClientAppendBlockOptions, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "appendblock") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + if options != nil && options.TransactionalContentCRC64 != nil { + req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if appendPositionAccessConditions != nil && appendPositionAccessConditions.MaxSize != nil { + req.Raw().Header["x-ms-blob-condition-maxsize"] = []string{strconv.FormatInt(*appendPositionAccessConditions.MaxSize, 10)} + } + if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil { + req.Raw().Header["x-ms-blob-condition-appendpos"] = []string{strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, req.SetBody(body, "application/octet-stream") +} + +// appendBlockHandleResponse handles the AppendBlock response. +func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) (AppendBlobClientAppendBlockResponse, error) { + result := AppendBlobClientAppendBlockResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + result.XMSContentCRC64 = xMSContentCRC64 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-blob-append-offset"); val != "" { + result.BlobAppendOffset = &val + } + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + result.BlobCommittedBlockCount = &blobCommittedBlockCount + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// AppendBlockFromURL - The Append Block operation commits a new block of data to the end of an existing append blob where +// the contents are read from a source url. The Append Block operation is permitted only if the blob was +// created with x-ms-blob-type set to AppendBlob. Append Block is supported only on version 2015-02-21 version or later. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// sourceURL - Specify a URL to the copy source. +// contentLength - The length of the request. +// options - AppendBlobClientAppendBlockFromURLOptions contains the optional parameters for the AppendBlobClient.AppendBlockFromURL +// method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL +// method. +func (client *AppendBlobClient) AppendBlockFromURL(ctx context.Context, sourceURL string, contentLength int64, options *AppendBlobClientAppendBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (AppendBlobClientAppendBlockFromURLResponse, error) { + req, err := client.appendBlockFromURLCreateRequest(ctx, sourceURL, contentLength, options, cpkInfo, cpkScopeInfo, leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return AppendBlobClientAppendBlockFromURLResponse{}, runtime.NewResponseError(resp) + } + return client.appendBlockFromURLHandleResponse(resp) +} + +// appendBlockFromURLCreateRequest creates the AppendBlockFromURL request. +func (client *AppendBlobClient) appendBlockFromURLCreateRequest(ctx context.Context, sourceURL string, contentLength int64, options *AppendBlobClientAppendBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "appendblock") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-copy-source"] = []string{sourceURL} + if options != nil && options.SourceRange != nil { + req.Raw().Header["x-ms-source-range"] = []string{*options.SourceRange} + } + if options != nil && options.SourceContentMD5 != nil { + req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} + } + if options != nil && options.SourceContentcrc64 != nil { + req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentcrc64)} + } + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if appendPositionAccessConditions != nil && appendPositionAccessConditions.MaxSize != nil { + req.Raw().Header["x-ms-blob-condition-maxsize"] = []string{strconv.FormatInt(*appendPositionAccessConditions.MaxSize, 10)} + } + if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil { + req.Raw().Header["x-ms-blob-condition-appendpos"] = []string{strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.CopySourceAuthorization != nil { + req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// appendBlockFromURLHandleResponse handles the AppendBlockFromURL response. +func (client *AppendBlobClient) appendBlockFromURLHandleResponse(resp *http.Response) (AppendBlobClientAppendBlockFromURLResponse, error) { + result := AppendBlobClientAppendBlockFromURLResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + result.XMSContentCRC64 = xMSContentCRC64 + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-blob-append-offset"); val != "" { + result.BlobAppendOffset = &val + } + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + result.BlobCommittedBlockCount = &blobCommittedBlockCount + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + return result, nil +} + +// Create - The Create Append Blob operation creates a new append blob. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// contentLength - The length of the request. +// options - AppendBlobClientCreateOptions contains the optional parameters for the AppendBlobClient.Create method. +// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *AppendBlobClient) Create(ctx context.Context, contentLength int64, options *AppendBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (AppendBlobClientCreateResponse, error) { + req, err := client.createCreateRequest(ctx, contentLength, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return AppendBlobClientCreateResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return AppendBlobClientCreateResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return AppendBlobClientCreateResponse{}, runtime.NewResponseError(resp) + } + return client.createHandleResponse(resp) +} + +// createCreateRequest creates the Create request. +func (client *AppendBlobClient) createCreateRequest(ctx context.Context, contentLength int64, options *AppendBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-blob-type"] = []string{"AppendBlob"} + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { + req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { + req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + } + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + req.Raw().Header["x-ms-meta-"+k] = []string{v} + } + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)} + } + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + } + if options != nil && options.LegalHold != nil { + req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// createHandleResponse handles the Create response. +func (client *AppendBlobClient) createHandleResponse(resp *http.Response) (AppendBlobClientCreateResponse, error) { + result := AppendBlobClientCreateResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientCreateResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return AppendBlobClientCreateResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientCreateResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return AppendBlobClientCreateResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// Seal - The Seal operation seals the Append Blob to make it read-only. Seal is supported only on version 2019-12-12 version +// or later. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// options - AppendBlobClientSealOptions contains the optional parameters for the AppendBlobClient.Seal method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock +// method. +func (client *AppendBlobClient) Seal(ctx context.Context, options *AppendBlobClientSealOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions) (AppendBlobClientSealResponse, error) { + req, err := client.sealCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions, appendPositionAccessConditions) + if err != nil { + return AppendBlobClientSealResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return AppendBlobClientSealResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return AppendBlobClientSealResponse{}, runtime.NewResponseError(resp) + } + return client.sealHandleResponse(resp) +} + +// sealCreateRequest creates the Seal request. +func (client *AppendBlobClient) sealCreateRequest(ctx context.Context, options *AppendBlobClientSealOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "seal") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil { + req.Raw().Header["x-ms-blob-condition-appendpos"] = []string{strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// sealHandleResponse handles the Seal response. +func (client *AppendBlobClient) sealHandleResponse(resp *http.Response) (AppendBlobClientSealResponse, error) { + result := AppendBlobClientSealResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientSealResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientSealResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-blob-sealed"); val != "" { + isSealed, err := strconv.ParseBool(val) + if err != nil { + return AppendBlobClientSealResponse{}, err + } + result.IsSealed = &isSealed + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go similarity index 52% rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_blob_client.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go index 607c6a714..713fb5279 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_blob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go @@ -5,12 +5,14 @@ // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. -package azblob +package generated import ( "context" "encoding/base64" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "net/http" @@ -19,16 +21,18 @@ import ( "time" ) -type blobClient struct { +// BlobClient contains the methods for the Blob group. +// Don't use this type directly, use NewBlobClient() instead. +type BlobClient struct { endpoint string pl runtime.Pipeline } -// newBlobClient creates a new instance of blobClient with the specified values. +// NewBlobClient creates a new instance of BlobClient with the specified values. // endpoint - The URL of the service account, container, or blob that is the target of the desired operation. // pl - the pipeline used for sending requests and handling responses. -func newBlobClient(endpoint string, pl runtime.Pipeline) *blobClient { - client := &blobClient{ +func NewBlobClient(endpoint string, pl runtime.Pipeline) *BlobClient { + client := &BlobClient{ endpoint: endpoint, pl: pl, } @@ -38,27 +42,27 @@ func newBlobClient(endpoint string, pl runtime.Pipeline) *blobClient { // AbortCopyFromURL - The Abort Copy From URL operation aborts a pending Copy From URL operation, and leaves a destination // blob with zero length and full metadata. // If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 // copyID - The copy identifier provided in the x-ms-copy-id header of the original Copy Blob operation. -// blobClientAbortCopyFromURLOptions - blobClientAbortCopyFromURLOptions contains the optional parameters for the blobClient.AbortCopyFromURL -// method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -func (client *blobClient) AbortCopyFromURL(ctx context.Context, copyID string, blobClientAbortCopyFromURLOptions *blobClientAbortCopyFromURLOptions, leaseAccessConditions *LeaseAccessConditions) (blobClientAbortCopyFromURLResponse, error) { - req, err := client.abortCopyFromURLCreateRequest(ctx, copyID, blobClientAbortCopyFromURLOptions, leaseAccessConditions) +// options - BlobClientAbortCopyFromURLOptions contains the optional parameters for the BlobClient.AbortCopyFromURL method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *BlobClient) AbortCopyFromURL(ctx context.Context, copyID string, options *BlobClientAbortCopyFromURLOptions, leaseAccessConditions *LeaseAccessConditions) (BlobClientAbortCopyFromURLResponse, error) { + req, err := client.abortCopyFromURLCreateRequest(ctx, copyID, options, leaseAccessConditions) if err != nil { - return blobClientAbortCopyFromURLResponse{}, err + return BlobClientAbortCopyFromURLResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientAbortCopyFromURLResponse{}, err + return BlobClientAbortCopyFromURLResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusNoContent) { - return blobClientAbortCopyFromURLResponse{}, runtime.NewResponseError(resp) + return BlobClientAbortCopyFromURLResponse{}, runtime.NewResponseError(resp) } return client.abortCopyFromURLHandleResponse(resp) } // abortCopyFromURLCreateRequest creates the AbortCopyFromURL request. -func (client *blobClient) abortCopyFromURLCreateRequest(ctx context.Context, copyID string, blobClientAbortCopyFromURLOptions *blobClientAbortCopyFromURLOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { +func (client *BlobClient) abortCopyFromURLCreateRequest(ctx context.Context, copyID string, options *BlobClientAbortCopyFromURLOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -66,25 +70,25 @@ func (client *blobClient) abortCopyFromURLCreateRequest(ctx context.Context, cop reqQP := req.Raw().URL.Query() reqQP.Set("comp", "copy") reqQP.Set("copyid", copyID) - if blobClientAbortCopyFromURLOptions != nil && blobClientAbortCopyFromURLOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientAbortCopyFromURLOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-copy-action", "abort") + req.Raw().Header["x-ms-copy-action"] = []string{"abort"} if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blobClientAbortCopyFromURLOptions != nil && blobClientAbortCopyFromURLOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blobClientAbortCopyFromURLOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // abortCopyFromURLHandleResponse handles the AbortCopyFromURL response. -func (client *blobClient) abortCopyFromURLHandleResponse(resp *http.Response) (blobClientAbortCopyFromURLResponse, error) { - result := blobClientAbortCopyFromURLResponse{RawResponse: resp} +func (client *BlobClient) abortCopyFromURLHandleResponse(resp *http.Response) (BlobClientAbortCopyFromURLResponse, error) { + result := BlobClientAbortCopyFromURLResponse{} if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } @@ -97,7 +101,7 @@ func (client *blobClient) abortCopyFromURLHandleResponse(resp *http.Response) (b if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientAbortCopyFromURLResponse{}, err + return BlobClientAbortCopyFromURLResponse{}, err } result.Date = &date } @@ -106,76 +110,76 @@ func (client *blobClient) abortCopyFromURLHandleResponse(resp *http.Response) (b // AcquireLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations // If the operation fails it returns an *azcore.ResponseError type. -// blobClientAcquireLeaseOptions - blobClientAcquireLeaseOptions contains the optional parameters for the blobClient.AcquireLease -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *blobClient) AcquireLease(ctx context.Context, blobClientAcquireLeaseOptions *blobClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (blobClientAcquireLeaseResponse, error) { - req, err := client.acquireLeaseCreateRequest(ctx, blobClientAcquireLeaseOptions, modifiedAccessConditions) +// Generated from API version 2020-10-02 +// options - BlobClientAcquireLeaseOptions contains the optional parameters for the BlobClient.AcquireLease method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) AcquireLease(ctx context.Context, options *BlobClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientAcquireLeaseResponse, error) { + req, err := client.acquireLeaseCreateRequest(ctx, options, modifiedAccessConditions) if err != nil { - return blobClientAcquireLeaseResponse{}, err + return BlobClientAcquireLeaseResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientAcquireLeaseResponse{}, err + return BlobClientAcquireLeaseResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusCreated) { - return blobClientAcquireLeaseResponse{}, runtime.NewResponseError(resp) + return BlobClientAcquireLeaseResponse{}, runtime.NewResponseError(resp) } return client.acquireLeaseHandleResponse(resp) } // acquireLeaseCreateRequest creates the AcquireLease request. -func (client *blobClient) acquireLeaseCreateRequest(ctx context.Context, blobClientAcquireLeaseOptions *blobClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *BlobClient) acquireLeaseCreateRequest(ctx context.Context, options *BlobClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("comp", "lease") - if blobClientAcquireLeaseOptions != nil && blobClientAcquireLeaseOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientAcquireLeaseOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-lease-action", "acquire") - if blobClientAcquireLeaseOptions != nil && blobClientAcquireLeaseOptions.Duration != nil { - req.Raw().Header.Set("x-ms-lease-duration", strconv.FormatInt(int64(*blobClientAcquireLeaseOptions.Duration), 10)) + req.Raw().Header["x-ms-lease-action"] = []string{"acquire"} + if options != nil && options.Duration != nil { + req.Raw().Header["x-ms-lease-duration"] = []string{strconv.FormatInt(int64(*options.Duration), 10)} } - if blobClientAcquireLeaseOptions != nil && blobClientAcquireLeaseOptions.ProposedLeaseID != nil { - req.Raw().Header.Set("x-ms-proposed-lease-id", *blobClientAcquireLeaseOptions.ProposedLeaseID) + if options != nil && options.ProposedLeaseID != nil { + req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blobClientAcquireLeaseOptions != nil && blobClientAcquireLeaseOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blobClientAcquireLeaseOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // acquireLeaseHandleResponse handles the AcquireLease response. -func (client *blobClient) acquireLeaseHandleResponse(resp *http.Response) (blobClientAcquireLeaseResponse, error) { - result := blobClientAcquireLeaseResponse{RawResponse: resp} +func (client *BlobClient) acquireLeaseHandleResponse(resp *http.Response) (BlobClientAcquireLeaseResponse, error) { + result := BlobClientAcquireLeaseResponse{} if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientAcquireLeaseResponse{}, err + return BlobClientAcquireLeaseResponse{}, err } result.LastModified = &lastModified } @@ -194,7 +198,7 @@ func (client *blobClient) acquireLeaseHandleResponse(resp *http.Response) (blobC if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientAcquireLeaseResponse{}, err + return BlobClientAcquireLeaseResponse{}, err } result.Date = &date } @@ -203,73 +207,73 @@ func (client *blobClient) acquireLeaseHandleResponse(resp *http.Response) (blobC // BreakLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations // If the operation fails it returns an *azcore.ResponseError type. -// blobClientBreakLeaseOptions - blobClientBreakLeaseOptions contains the optional parameters for the blobClient.BreakLease -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *blobClient) BreakLease(ctx context.Context, blobClientBreakLeaseOptions *blobClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (blobClientBreakLeaseResponse, error) { - req, err := client.breakLeaseCreateRequest(ctx, blobClientBreakLeaseOptions, modifiedAccessConditions) +// Generated from API version 2020-10-02 +// options - BlobClientBreakLeaseOptions contains the optional parameters for the BlobClient.BreakLease method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) BreakLease(ctx context.Context, options *BlobClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientBreakLeaseResponse, error) { + req, err := client.breakLeaseCreateRequest(ctx, options, modifiedAccessConditions) if err != nil { - return blobClientBreakLeaseResponse{}, err + return BlobClientBreakLeaseResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientBreakLeaseResponse{}, err + return BlobClientBreakLeaseResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return blobClientBreakLeaseResponse{}, runtime.NewResponseError(resp) + return BlobClientBreakLeaseResponse{}, runtime.NewResponseError(resp) } return client.breakLeaseHandleResponse(resp) } // breakLeaseCreateRequest creates the BreakLease request. -func (client *blobClient) breakLeaseCreateRequest(ctx context.Context, blobClientBreakLeaseOptions *blobClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *BlobClient) breakLeaseCreateRequest(ctx context.Context, options *BlobClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("comp", "lease") - if blobClientBreakLeaseOptions != nil && blobClientBreakLeaseOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientBreakLeaseOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-lease-action", "break") - if blobClientBreakLeaseOptions != nil && blobClientBreakLeaseOptions.BreakPeriod != nil { - req.Raw().Header.Set("x-ms-lease-break-period", strconv.FormatInt(int64(*blobClientBreakLeaseOptions.BreakPeriod), 10)) + req.Raw().Header["x-ms-lease-action"] = []string{"break"} + if options != nil && options.BreakPeriod != nil { + req.Raw().Header["x-ms-lease-break-period"] = []string{strconv.FormatInt(int64(*options.BreakPeriod), 10)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blobClientBreakLeaseOptions != nil && blobClientBreakLeaseOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blobClientBreakLeaseOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // breakLeaseHandleResponse handles the BreakLease response. -func (client *blobClient) breakLeaseHandleResponse(resp *http.Response) (blobClientBreakLeaseResponse, error) { - result := blobClientBreakLeaseResponse{RawResponse: resp} +func (client *BlobClient) breakLeaseHandleResponse(resp *http.Response) (BlobClientBreakLeaseResponse, error) { + result := BlobClientBreakLeaseResponse{} if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientBreakLeaseResponse{}, err + return BlobClientBreakLeaseResponse{}, err } result.LastModified = &lastModified } @@ -277,7 +281,7 @@ func (client *blobClient) breakLeaseHandleResponse(resp *http.Response) (blobCli leaseTime32, err := strconv.ParseInt(val, 10, 32) leaseTime := int32(leaseTime32) if err != nil { - return blobClientBreakLeaseResponse{}, err + return BlobClientBreakLeaseResponse{}, err } result.LeaseTime = &leaseTime } @@ -293,7 +297,7 @@ func (client *blobClient) breakLeaseHandleResponse(resp *http.Response) (blobCli if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientBreakLeaseResponse{}, err + return BlobClientBreakLeaseResponse{}, err } result.Date = &date } @@ -302,76 +306,76 @@ func (client *blobClient) breakLeaseHandleResponse(resp *http.Response) (blobCli // ChangeLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations // If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 // leaseID - Specifies the current lease ID on the resource. // proposedLeaseID - Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed // lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID // string formats. -// blobClientChangeLeaseOptions - blobClientChangeLeaseOptions contains the optional parameters for the blobClient.ChangeLease -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *blobClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, blobClientChangeLeaseOptions *blobClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (blobClientChangeLeaseResponse, error) { - req, err := client.changeLeaseCreateRequest(ctx, leaseID, proposedLeaseID, blobClientChangeLeaseOptions, modifiedAccessConditions) +// options - BlobClientChangeLeaseOptions contains the optional parameters for the BlobClient.ChangeLease method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, options *BlobClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientChangeLeaseResponse, error) { + req, err := client.changeLeaseCreateRequest(ctx, leaseID, proposedLeaseID, options, modifiedAccessConditions) if err != nil { - return blobClientChangeLeaseResponse{}, err + return BlobClientChangeLeaseResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientChangeLeaseResponse{}, err + return BlobClientChangeLeaseResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return blobClientChangeLeaseResponse{}, runtime.NewResponseError(resp) + return BlobClientChangeLeaseResponse{}, runtime.NewResponseError(resp) } return client.changeLeaseHandleResponse(resp) } // changeLeaseCreateRequest creates the ChangeLease request. -func (client *blobClient) changeLeaseCreateRequest(ctx context.Context, leaseID string, proposedLeaseID string, blobClientChangeLeaseOptions *blobClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *BlobClient) changeLeaseCreateRequest(ctx context.Context, leaseID string, proposedLeaseID string, options *BlobClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("comp", "lease") - if blobClientChangeLeaseOptions != nil && blobClientChangeLeaseOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientChangeLeaseOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-lease-action", "change") - req.Raw().Header.Set("x-ms-lease-id", leaseID) - req.Raw().Header.Set("x-ms-proposed-lease-id", proposedLeaseID) + req.Raw().Header["x-ms-lease-action"] = []string{"change"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + req.Raw().Header["x-ms-proposed-lease-id"] = []string{proposedLeaseID} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blobClientChangeLeaseOptions != nil && blobClientChangeLeaseOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blobClientChangeLeaseOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // changeLeaseHandleResponse handles the ChangeLease response. -func (client *blobClient) changeLeaseHandleResponse(resp *http.Response) (blobClientChangeLeaseResponse, error) { - result := blobClientChangeLeaseResponse{RawResponse: resp} +func (client *BlobClient) changeLeaseHandleResponse(resp *http.Response) (BlobClientChangeLeaseResponse, error) { + result := BlobClientChangeLeaseResponse{} if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientChangeLeaseResponse{}, err + return BlobClientChangeLeaseResponse{}, err } result.LastModified = &lastModified } @@ -390,7 +394,7 @@ func (client *blobClient) changeLeaseHandleResponse(resp *http.Response) (blobCl if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientChangeLeaseResponse{}, err + return BlobClientChangeLeaseResponse{}, err } result.Date = &date } @@ -400,117 +404,117 @@ func (client *blobClient) changeLeaseHandleResponse(resp *http.Response) (blobCl // CopyFromURL - The Copy From URL operation copies a blob or an internet resource to a new blob. It will not return a response // until the copy is complete. // If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 // copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies // a page blob snapshot. The value should be URL-encoded as it would appear in a request // URI. The source blob must either be public or must be authenticated via a shared access signature. -// blobClientCopyFromURLOptions - blobClientCopyFromURLOptions contains the optional parameters for the blobClient.CopyFromURL +// options - BlobClientCopyFromURLOptions contains the optional parameters for the BlobClient.CopyFromURL method. +// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL // method. -// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the blobClient.StartCopyFromURL -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -func (client *blobClient) CopyFromURL(ctx context.Context, copySource string, blobClientCopyFromURLOptions *blobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (blobClientCopyFromURLResponse, error) { - req, err := client.copyFromURLCreateRequest(ctx, copySource, blobClientCopyFromURLOptions, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions) +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *BlobClient) CopyFromURL(ctx context.Context, copySource string, options *BlobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientCopyFromURLResponse, error) { + req, err := client.copyFromURLCreateRequest(ctx, copySource, options, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions) if err != nil { - return blobClientCopyFromURLResponse{}, err + return BlobClientCopyFromURLResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientCopyFromURLResponse{}, err + return BlobClientCopyFromURLResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return blobClientCopyFromURLResponse{}, runtime.NewResponseError(resp) + return BlobClientCopyFromURLResponse{}, runtime.NewResponseError(resp) } return client.copyFromURLHandleResponse(resp) } // copyFromURLCreateRequest creates the CopyFromURL request. -func (client *blobClient) copyFromURLCreateRequest(ctx context.Context, copySource string, blobClientCopyFromURLOptions *blobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { +func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySource string, options *BlobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() - if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientCopyFromURLOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-requires-sync", "true") - if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.Metadata != nil { - for k, v := range blobClientCopyFromURLOptions.Metadata { - req.Raw().Header.Set("x-ms-meta-"+k, v) + req.Raw().Header["x-ms-requires-sync"] = []string{"true"} + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + req.Raw().Header["x-ms-meta-"+k] = []string{v} } } - if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.Tier != nil { - req.Raw().Header.Set("x-ms-access-tier", string(*blobClientCopyFromURLOptions.Tier)) + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header.Set("x-ms-source-if-modified-since", sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["x-ms-source-if-modified-since"] = []string{sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header.Set("x-ms-source-if-unmodified-since", sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { - req.Raw().Header.Set("x-ms-source-if-match", *sourceModifiedAccessConditions.SourceIfMatch) + req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { - req.Raw().Header.Set("x-ms-source-if-none-match", *sourceModifiedAccessConditions.SourceIfNoneMatch) + req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header.Set("x-ms-copy-source", copySource) + req.Raw().Header["x-ms-copy-source"] = []string{copySource} if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blobClientCopyFromURLOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.SourceContentMD5 != nil { - req.Raw().Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(blobClientCopyFromURLOptions.SourceContentMD5)) + if options != nil && options.SourceContentMD5 != nil { + req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} } - if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.BlobTagsString != nil { - req.Raw().Header.Set("x-ms-tags", *blobClientCopyFromURLOptions.BlobTagsString) + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} } - if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.ImmutabilityPolicyExpiry != nil { - req.Raw().Header.Set("x-ms-immutability-policy-until-date", blobClientCopyFromURLOptions.ImmutabilityPolicyExpiry.Format(time.RFC1123)) + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)} } - if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.ImmutabilityPolicyMode != nil { - req.Raw().Header.Set("x-ms-immutability-policy-mode", string(*blobClientCopyFromURLOptions.ImmutabilityPolicyMode)) + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} } - if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.LegalHold != nil { - req.Raw().Header.Set("x-ms-legal-hold", strconv.FormatBool(*blobClientCopyFromURLOptions.LegalHold)) + if options != nil && options.LegalHold != nil { + req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} } - if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.CopySourceAuthorization != nil { - req.Raw().Header.Set("x-ms-copy-source-authorization", *blobClientCopyFromURLOptions.CopySourceAuthorization) + if options != nil && options.CopySourceAuthorization != nil { + req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // copyFromURLHandleResponse handles the CopyFromURL response. -func (client *blobClient) copyFromURLHandleResponse(resp *http.Response) (blobClientCopyFromURLResponse, error) { - result := blobClientCopyFromURLResponse{RawResponse: resp} +func (client *BlobClient) copyFromURLHandleResponse(resp *http.Response) (BlobClientCopyFromURLResponse, error) { + result := BlobClientCopyFromURLResponse{} if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientCopyFromURLResponse{}, err + return BlobClientCopyFromURLResponse{}, err } result.LastModified = &lastModified } @@ -529,7 +533,7 @@ func (client *blobClient) copyFromURLHandleResponse(resp *http.Response) (blobCl if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientCopyFromURLResponse{}, err + return BlobClientCopyFromURLResponse{}, err } result.Date = &date } @@ -542,14 +546,14 @@ func (client *blobClient) copyFromURLHandleResponse(resp *http.Response) (blobCl if val := resp.Header.Get("Content-MD5"); val != "" { contentMD5, err := base64.StdEncoding.DecodeString(val) if err != nil { - return blobClientCopyFromURLResponse{}, err + return BlobClientCopyFromURLResponse{}, err } result.ContentMD5 = contentMD5 } if val := resp.Header.Get("x-ms-content-crc64"); val != "" { xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) if err != nil { - return blobClientCopyFromURLResponse{}, err + return BlobClientCopyFromURLResponse{}, err } result.XMSContentCRC64 = xMSContentCRC64 } @@ -558,95 +562,95 @@ func (client *blobClient) copyFromURLHandleResponse(resp *http.Response) (blobCl // CreateSnapshot - The Create Snapshot operation creates a read-only snapshot of a blob // If the operation fails it returns an *azcore.ResponseError type. -// blobClientCreateSnapshotOptions - blobClientCreateSnapshotOptions contains the optional parameters for the blobClient.CreateSnapshot -// method. -// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -func (client *blobClient) CreateSnapshot(ctx context.Context, blobClientCreateSnapshotOptions *blobClientCreateSnapshotOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (blobClientCreateSnapshotResponse, error) { - req, err := client.createSnapshotCreateRequest(ctx, blobClientCreateSnapshotOptions, cpkInfo, cpkScopeInfo, modifiedAccessConditions, leaseAccessConditions) +// Generated from API version 2020-10-02 +// options - BlobClientCreateSnapshotOptions contains the optional parameters for the BlobClient.CreateSnapshot method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *BlobClient) CreateSnapshot(ctx context.Context, options *BlobClientCreateSnapshotOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientCreateSnapshotResponse, error) { + req, err := client.createSnapshotCreateRequest(ctx, options, cpkInfo, cpkScopeInfo, modifiedAccessConditions, leaseAccessConditions) if err != nil { - return blobClientCreateSnapshotResponse{}, err + return BlobClientCreateSnapshotResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientCreateSnapshotResponse{}, err + return BlobClientCreateSnapshotResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusCreated) { - return blobClientCreateSnapshotResponse{}, runtime.NewResponseError(resp) + return BlobClientCreateSnapshotResponse{}, runtime.NewResponseError(resp) } return client.createSnapshotHandleResponse(resp) } // createSnapshotCreateRequest creates the CreateSnapshot request. -func (client *blobClient) createSnapshotCreateRequest(ctx context.Context, blobClientCreateSnapshotOptions *blobClientCreateSnapshotOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { +func (client *BlobClient) createSnapshotCreateRequest(ctx context.Context, options *BlobClientCreateSnapshotOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("comp", "snapshot") - if blobClientCreateSnapshotOptions != nil && blobClientCreateSnapshotOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientCreateSnapshotOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - if blobClientCreateSnapshotOptions != nil && blobClientCreateSnapshotOptions.Metadata != nil { - for k, v := range blobClientCreateSnapshotOptions.Metadata { - req.Raw().Header.Set("x-ms-meta-"+k, v) + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + req.Raw().Header["x-ms-meta-"+k] = []string{v} } } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} } if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blobClientCreateSnapshotOptions != nil && blobClientCreateSnapshotOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blobClientCreateSnapshotOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // createSnapshotHandleResponse handles the CreateSnapshot response. -func (client *blobClient) createSnapshotHandleResponse(resp *http.Response) (blobClientCreateSnapshotResponse, error) { - result := blobClientCreateSnapshotResponse{RawResponse: resp} +func (client *BlobClient) createSnapshotHandleResponse(resp *http.Response) (BlobClientCreateSnapshotResponse, error) { + result := BlobClientCreateSnapshotResponse{} if val := resp.Header.Get("x-ms-snapshot"); val != "" { result.Snapshot = &val } if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientCreateSnapshotResponse{}, err + return BlobClientCreateSnapshotResponse{}, err } result.LastModified = &lastModified } @@ -665,14 +669,14 @@ func (client *blobClient) createSnapshotHandleResponse(resp *http.Response) (blo if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientCreateSnapshotResponse{}, err + return BlobClientCreateSnapshotResponse{}, err } result.Date = &date } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { - return blobClientCreateSnapshotResponse{}, err + return BlobClientCreateSnapshotResponse{}, err } result.IsServerEncrypted = &isServerEncrypted } @@ -691,76 +695,77 @@ func (client *blobClient) createSnapshotHandleResponse(resp *http.Response) (blo // All other operations on a soft-deleted blob or snapshot causes the service to // return an HTTP status code of 404 (ResourceNotFound). // If the operation fails it returns an *azcore.ResponseError type. -// blobClientDeleteOptions - blobClientDeleteOptions contains the optional parameters for the blobClient.Delete method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *blobClient) Delete(ctx context.Context, blobClientDeleteOptions *blobClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (blobClientDeleteResponse, error) { - req, err := client.deleteCreateRequest(ctx, blobClientDeleteOptions, leaseAccessConditions, modifiedAccessConditions) +// Generated from API version 2020-10-02 +// options - BlobClientDeleteOptions contains the optional parameters for the BlobClient.Delete method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) Delete(ctx context.Context, options *BlobClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientDeleteResponse, error) { + req, err := client.deleteCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { - return blobClientDeleteResponse{}, err + return BlobClientDeleteResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientDeleteResponse{}, err + return BlobClientDeleteResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return blobClientDeleteResponse{}, runtime.NewResponseError(resp) + return BlobClientDeleteResponse{}, runtime.NewResponseError(resp) } return client.deleteHandleResponse(resp) } // deleteCreateRequest creates the Delete request. -func (client *blobClient) deleteCreateRequest(ctx context.Context, blobClientDeleteOptions *blobClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *BlobClient) deleteCreateRequest(ctx context.Context, options *BlobClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodDelete, client.endpoint) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() - if blobClientDeleteOptions != nil && blobClientDeleteOptions.Snapshot != nil { - reqQP.Set("snapshot", *blobClientDeleteOptions.Snapshot) + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) } - if blobClientDeleteOptions != nil && blobClientDeleteOptions.VersionID != nil { - reqQP.Set("versionid", *blobClientDeleteOptions.VersionID) + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) } - if blobClientDeleteOptions != nil && blobClientDeleteOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientDeleteOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } - if blobClientDeleteOptions != nil && blobClientDeleteOptions.BlobDeleteType != nil { - reqQP.Set("deletetype", string(*blobClientDeleteOptions.BlobDeleteType)) + if options != nil && options.DeleteType != nil { + reqQP.Set("deletetype", string(*options.DeleteType)) } req.Raw().URL.RawQuery = reqQP.Encode() if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - if blobClientDeleteOptions != nil && blobClientDeleteOptions.DeleteSnapshots != nil { - req.Raw().Header.Set("x-ms-delete-snapshots", string(*blobClientDeleteOptions.DeleteSnapshots)) + if options != nil && options.DeleteSnapshots != nil { + req.Raw().Header["x-ms-delete-snapshots"] = []string{string(*options.DeleteSnapshots)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blobClientDeleteOptions != nil && blobClientDeleteOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blobClientDeleteOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // deleteHandleResponse handles the Delete response. -func (client *blobClient) deleteHandleResponse(resp *http.Response) (blobClientDeleteResponse, error) { - result := blobClientDeleteResponse{RawResponse: resp} +func (client *BlobClient) deleteHandleResponse(resp *http.Response) (BlobClientDeleteResponse, error) { + result := BlobClientDeleteResponse{} if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } @@ -773,7 +778,7 @@ func (client *blobClient) deleteHandleResponse(resp *http.Response) (blobClientD if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientDeleteResponse{}, err + return BlobClientDeleteResponse{}, err } result.Date = &date } @@ -782,25 +787,26 @@ func (client *blobClient) deleteHandleResponse(resp *http.Response) (blobClientD // DeleteImmutabilityPolicy - The Delete Immutability Policy operation deletes the immutability policy on the blob // If the operation fails it returns an *azcore.ResponseError type. -// options - blobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the blobClient.DeleteImmutabilityPolicy +// Generated from API version 2020-10-02 +// options - BlobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the BlobClient.DeleteImmutabilityPolicy // method. -func (client *blobClient) DeleteImmutabilityPolicy(ctx context.Context, options *blobClientDeleteImmutabilityPolicyOptions) (blobClientDeleteImmutabilityPolicyResponse, error) { +func (client *BlobClient) DeleteImmutabilityPolicy(ctx context.Context, options *BlobClientDeleteImmutabilityPolicyOptions) (BlobClientDeleteImmutabilityPolicyResponse, error) { req, err := client.deleteImmutabilityPolicyCreateRequest(ctx, options) if err != nil { - return blobClientDeleteImmutabilityPolicyResponse{}, err + return BlobClientDeleteImmutabilityPolicyResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientDeleteImmutabilityPolicyResponse{}, err + return BlobClientDeleteImmutabilityPolicyResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return blobClientDeleteImmutabilityPolicyResponse{}, runtime.NewResponseError(resp) + return BlobClientDeleteImmutabilityPolicyResponse{}, runtime.NewResponseError(resp) } return client.deleteImmutabilityPolicyHandleResponse(resp) } // deleteImmutabilityPolicyCreateRequest creates the DeleteImmutabilityPolicy request. -func (client *blobClient) deleteImmutabilityPolicyCreateRequest(ctx context.Context, options *blobClientDeleteImmutabilityPolicyOptions) (*policy.Request, error) { +func (client *BlobClient) deleteImmutabilityPolicyCreateRequest(ctx context.Context, options *BlobClientDeleteImmutabilityPolicyOptions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodDelete, client.endpoint) if err != nil { return nil, err @@ -811,17 +817,17 @@ func (client *blobClient) deleteImmutabilityPolicyCreateRequest(ctx context.Cont reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-version", "2020-10-02") + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} if options != nil && options.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // deleteImmutabilityPolicyHandleResponse handles the DeleteImmutabilityPolicy response. -func (client *blobClient) deleteImmutabilityPolicyHandleResponse(resp *http.Response) (blobClientDeleteImmutabilityPolicyResponse, error) { - result := blobClientDeleteImmutabilityPolicyResponse{RawResponse: resp} +func (client *BlobClient) deleteImmutabilityPolicyHandleResponse(resp *http.Response) (BlobClientDeleteImmutabilityPolicyResponse, error) { + result := BlobClientDeleteImmutabilityPolicyResponse{} if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } @@ -834,7 +840,7 @@ func (client *blobClient) deleteImmutabilityPolicyHandleResponse(resp *http.Resp if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientDeleteImmutabilityPolicyResponse{}, err + return BlobClientDeleteImmutabilityPolicyResponse{}, err } result.Date = &date } @@ -844,94 +850,95 @@ func (client *blobClient) deleteImmutabilityPolicyHandleResponse(resp *http.Resp // Download - The Download operation reads or downloads a blob from the system, including its metadata and properties. You // can also call Download to read a snapshot. // If the operation fails it returns an *azcore.ResponseError type. -// blobClientDownloadOptions - blobClientDownloadOptions contains the optional parameters for the blobClient.Download method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *blobClient) Download(ctx context.Context, blobClientDownloadOptions *blobClientDownloadOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (blobClientDownloadResponse, error) { - req, err := client.downloadCreateRequest(ctx, blobClientDownloadOptions, leaseAccessConditions, cpkInfo, modifiedAccessConditions) +// Generated from API version 2020-10-02 +// options - BlobClientDownloadOptions contains the optional parameters for the BlobClient.Download method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) Download(ctx context.Context, options *BlobClientDownloadOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientDownloadResponse, error) { + req, err := client.downloadCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, modifiedAccessConditions) if err != nil { - return blobClientDownloadResponse{}, err + return BlobClientDownloadResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientDownloadResponse{}, err + return BlobClientDownloadResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusPartialContent, http.StatusNotModified) { - return blobClientDownloadResponse{}, runtime.NewResponseError(resp) + return BlobClientDownloadResponse{}, runtime.NewResponseError(resp) } return client.downloadHandleResponse(resp) } // downloadCreateRequest creates the Download request. -func (client *blobClient) downloadCreateRequest(ctx context.Context, blobClientDownloadOptions *blobClientDownloadOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *BlobClient) downloadCreateRequest(ctx context.Context, options *BlobClientDownloadOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() - if blobClientDownloadOptions != nil && blobClientDownloadOptions.Snapshot != nil { - reqQP.Set("snapshot", *blobClientDownloadOptions.Snapshot) + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) } - if blobClientDownloadOptions != nil && blobClientDownloadOptions.VersionID != nil { - reqQP.Set("versionid", *blobClientDownloadOptions.VersionID) + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) } - if blobClientDownloadOptions != nil && blobClientDownloadOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientDownloadOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() runtime.SkipBodyDownload(req) - if blobClientDownloadOptions != nil && blobClientDownloadOptions.Range != nil { - req.Raw().Header.Set("x-ms-range", *blobClientDownloadOptions.Range) + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} } if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - if blobClientDownloadOptions != nil && blobClientDownloadOptions.RangeGetContentMD5 != nil { - req.Raw().Header.Set("x-ms-range-get-content-md5", strconv.FormatBool(*blobClientDownloadOptions.RangeGetContentMD5)) + if options != nil && options.RangeGetContentMD5 != nil { + req.Raw().Header["x-ms-range-get-content-md5"] = []string{strconv.FormatBool(*options.RangeGetContentMD5)} } - if blobClientDownloadOptions != nil && blobClientDownloadOptions.RangeGetContentCRC64 != nil { - req.Raw().Header.Set("x-ms-range-get-content-crc64", strconv.FormatBool(*blobClientDownloadOptions.RangeGetContentCRC64)) + if options != nil && options.RangeGetContentCRC64 != nil { + req.Raw().Header["x-ms-range-get-content-crc64"] = []string{strconv.FormatBool(*options.RangeGetContentCRC64)} } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} } if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blobClientDownloadOptions != nil && blobClientDownloadOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blobClientDownloadOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // downloadHandleResponse handles the Download response. -func (client *blobClient) downloadHandleResponse(resp *http.Response) (blobClientDownloadResponse, error) { - result := blobClientDownloadResponse{RawResponse: resp} +func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClientDownloadResponse, error) { + result := BlobClientDownloadResponse{Body: resp.Body} if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientDownloadResponse{}, err + return BlobClientDownloadResponse{}, err } result.LastModified = &lastModified } @@ -957,7 +964,7 @@ func (client *blobClient) downloadHandleResponse(resp *http.Response) (blobClien if val := resp.Header.Get("Content-Length"); val != "" { contentLength, err := strconv.ParseInt(val, 10, 64) if err != nil { - return blobClientDownloadResponse{}, err + return BlobClientDownloadResponse{}, err } result.ContentLength = &contentLength } @@ -968,12 +975,12 @@ func (client *blobClient) downloadHandleResponse(resp *http.Response) (blobClien result.ContentRange = &val } if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Content-MD5"); val != "" { contentMD5, err := base64.StdEncoding.DecodeString(val) if err != nil { - return blobClientDownloadResponse{}, err + return BlobClientDownloadResponse{}, err } result.ContentMD5 = contentMD5 } @@ -992,7 +999,7 @@ func (client *blobClient) downloadHandleResponse(resp *http.Response) (blobClien if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) if err != nil { - return blobClientDownloadResponse{}, err + return BlobClientDownloadResponse{}, err } result.BlobSequenceNumber = &blobSequenceNumber } @@ -1002,7 +1009,7 @@ func (client *blobClient) downloadHandleResponse(resp *http.Response) (blobClien if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { copyCompletionTime, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientDownloadResponse{}, err + return BlobClientDownloadResponse{}, err } result.CopyCompletionTime = ©CompletionTime } @@ -1045,7 +1052,7 @@ func (client *blobClient) downloadHandleResponse(resp *http.Response) (blobClien if val := resp.Header.Get("x-ms-is-current-version"); val != "" { isCurrentVersion, err := strconv.ParseBool(val) if err != nil { - return blobClientDownloadResponse{}, err + return BlobClientDownloadResponse{}, err } result.IsCurrentVersion = &isCurrentVersion } @@ -1055,7 +1062,7 @@ func (client *blobClient) downloadHandleResponse(resp *http.Response) (blobClien if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientDownloadResponse{}, err + return BlobClientDownloadResponse{}, err } result.Date = &date } @@ -1063,14 +1070,14 @@ func (client *blobClient) downloadHandleResponse(resp *http.Response) (blobClien blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) blobCommittedBlockCount := int32(blobCommittedBlockCount32) if err != nil { - return blobClientDownloadResponse{}, err + return BlobClientDownloadResponse{}, err } result.BlobCommittedBlockCount = &blobCommittedBlockCount } if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { - return blobClientDownloadResponse{}, err + return BlobClientDownloadResponse{}, err } result.IsServerEncrypted = &isServerEncrypted } @@ -1083,52 +1090,52 @@ func (client *blobClient) downloadHandleResponse(resp *http.Response) (blobClien if val := resp.Header.Get("x-ms-blob-content-md5"); val != "" { blobContentMD5, err := base64.StdEncoding.DecodeString(val) if err != nil { - return blobClientDownloadResponse{}, err + return BlobClientDownloadResponse{}, err } result.BlobContentMD5 = blobContentMD5 } if val := resp.Header.Get("x-ms-tag-count"); val != "" { tagCount, err := strconv.ParseInt(val, 10, 64) if err != nil { - return blobClientDownloadResponse{}, err + return BlobClientDownloadResponse{}, err } result.TagCount = &tagCount } if val := resp.Header.Get("x-ms-blob-sealed"); val != "" { isSealed, err := strconv.ParseBool(val) if err != nil { - return blobClientDownloadResponse{}, err + return BlobClientDownloadResponse{}, err } result.IsSealed = &isSealed } if val := resp.Header.Get("x-ms-last-access-time"); val != "" { lastAccessed, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientDownloadResponse{}, err + return BlobClientDownloadResponse{}, err } result.LastAccessed = &lastAccessed } if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" { immutabilityPolicyExpiresOn, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientDownloadResponse{}, err + return BlobClientDownloadResponse{}, err } result.ImmutabilityPolicyExpiresOn = &immutabilityPolicyExpiresOn } if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { - result.ImmutabilityPolicyMode = (*BlobImmutabilityPolicyMode)(&val) + result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val) } if val := resp.Header.Get("x-ms-legal-hold"); val != "" { legalHold, err := strconv.ParseBool(val) if err != nil { - return blobClientDownloadResponse{}, err + return BlobClientDownloadResponse{}, err } result.LegalHold = &legalHold } if val := resp.Header.Get("x-ms-content-crc64"); val != "" { contentCRC64, err := base64.StdEncoding.DecodeString(val) if err != nil { - return blobClientDownloadResponse{}, err + return BlobClientDownloadResponse{}, err } result.ContentCRC64 = contentCRC64 } @@ -1140,24 +1147,25 @@ func (client *blobClient) downloadHandleResponse(resp *http.Response) (blobClien // GetAccountInfo - Returns the sku name and account kind // If the operation fails it returns an *azcore.ResponseError type. -// options - blobClientGetAccountInfoOptions contains the optional parameters for the blobClient.GetAccountInfo method. -func (client *blobClient) GetAccountInfo(ctx context.Context, options *blobClientGetAccountInfoOptions) (blobClientGetAccountInfoResponse, error) { +// Generated from API version 2020-10-02 +// options - BlobClientGetAccountInfoOptions contains the optional parameters for the BlobClient.GetAccountInfo method. +func (client *BlobClient) GetAccountInfo(ctx context.Context, options *BlobClientGetAccountInfoOptions) (BlobClientGetAccountInfoResponse, error) { req, err := client.getAccountInfoCreateRequest(ctx, options) if err != nil { - return blobClientGetAccountInfoResponse{}, err + return BlobClientGetAccountInfoResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientGetAccountInfoResponse{}, err + return BlobClientGetAccountInfoResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return blobClientGetAccountInfoResponse{}, runtime.NewResponseError(resp) + return BlobClientGetAccountInfoResponse{}, runtime.NewResponseError(resp) } return client.getAccountInfoHandleResponse(resp) } // getAccountInfoCreateRequest creates the GetAccountInfo request. -func (client *blobClient) getAccountInfoCreateRequest(ctx context.Context, options *blobClientGetAccountInfoOptions) (*policy.Request, error) { +func (client *BlobClient) getAccountInfoCreateRequest(ctx context.Context, options *BlobClientGetAccountInfoOptions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) if err != nil { return nil, err @@ -1166,14 +1174,14 @@ func (client *blobClient) getAccountInfoCreateRequest(ctx context.Context, optio reqQP.Set("restype", "account") reqQP.Set("comp", "properties") req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-version", "2020-10-02") - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // getAccountInfoHandleResponse handles the GetAccountInfo response. -func (client *blobClient) getAccountInfoHandleResponse(resp *http.Response) (blobClientGetAccountInfoResponse, error) { - result := blobClientGetAccountInfoResponse{RawResponse: resp} +func (client *BlobClient) getAccountInfoHandleResponse(resp *http.Response) (BlobClientGetAccountInfoResponse, error) { + result := BlobClientGetAccountInfoResponse{} if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } @@ -1186,7 +1194,7 @@ func (client *blobClient) getAccountInfoHandleResponse(resp *http.Response) (blo if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientGetAccountInfoResponse{}, err + return BlobClientGetAccountInfoResponse{}, err } result.Date = &date } @@ -1202,92 +1210,92 @@ func (client *blobClient) getAccountInfoHandleResponse(resp *http.Response) (blo // GetProperties - The Get Properties operation returns all user-defined metadata, standard HTTP properties, and system properties // for the blob. It does not return the content of the blob. // If the operation fails it returns an *azcore.ResponseError type. -// blobClientGetPropertiesOptions - blobClientGetPropertiesOptions contains the optional parameters for the blobClient.GetProperties -// method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *blobClient) GetProperties(ctx context.Context, blobClientGetPropertiesOptions *blobClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (blobClientGetPropertiesResponse, error) { - req, err := client.getPropertiesCreateRequest(ctx, blobClientGetPropertiesOptions, leaseAccessConditions, cpkInfo, modifiedAccessConditions) +// Generated from API version 2020-10-02 +// options - BlobClientGetPropertiesOptions contains the optional parameters for the BlobClient.GetProperties method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) GetProperties(ctx context.Context, options *BlobClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientGetPropertiesResponse, error) { + req, err := client.getPropertiesCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, modifiedAccessConditions) if err != nil { - return blobClientGetPropertiesResponse{}, err + return BlobClientGetPropertiesResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientGetPropertiesResponse{}, err + return BlobClientGetPropertiesResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return blobClientGetPropertiesResponse{}, runtime.NewResponseError(resp) + return BlobClientGetPropertiesResponse{}, runtime.NewResponseError(resp) } return client.getPropertiesHandleResponse(resp) } // getPropertiesCreateRequest creates the GetProperties request. -func (client *blobClient) getPropertiesCreateRequest(ctx context.Context, blobClientGetPropertiesOptions *blobClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *BlobClient) getPropertiesCreateRequest(ctx context.Context, options *BlobClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodHead, client.endpoint) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() - if blobClientGetPropertiesOptions != nil && blobClientGetPropertiesOptions.Snapshot != nil { - reqQP.Set("snapshot", *blobClientGetPropertiesOptions.Snapshot) + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) } - if blobClientGetPropertiesOptions != nil && blobClientGetPropertiesOptions.VersionID != nil { - reqQP.Set("versionid", *blobClientGetPropertiesOptions.VersionID) + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) } - if blobClientGetPropertiesOptions != nil && blobClientGetPropertiesOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientGetPropertiesOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} } if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blobClientGetPropertiesOptions != nil && blobClientGetPropertiesOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blobClientGetPropertiesOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // getPropertiesHandleResponse handles the GetProperties response. -func (client *blobClient) getPropertiesHandleResponse(resp *http.Response) (blobClientGetPropertiesResponse, error) { - result := blobClientGetPropertiesResponse{RawResponse: resp} +func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (BlobClientGetPropertiesResponse, error) { + result := BlobClientGetPropertiesResponse{} if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientGetPropertiesResponse{}, err + return BlobClientGetPropertiesResponse{}, err } result.LastModified = &lastModified } if val := resp.Header.Get("x-ms-creation-time"); val != "" { creationTime, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientGetPropertiesResponse{}, err + return BlobClientGetPropertiesResponse{}, err } result.CreationTime = &creationTime } @@ -1316,7 +1324,7 @@ func (client *blobClient) getPropertiesHandleResponse(resp *http.Response) (blob if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { copyCompletionTime, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientGetPropertiesResponse{}, err + return BlobClientGetPropertiesResponse{}, err } result.CopyCompletionTime = ©CompletionTime } @@ -1338,7 +1346,7 @@ func (client *blobClient) getPropertiesHandleResponse(resp *http.Response) (blob if val := resp.Header.Get("x-ms-incremental-copy"); val != "" { isIncrementalCopy, err := strconv.ParseBool(val) if err != nil { - return blobClientGetPropertiesResponse{}, err + return BlobClientGetPropertiesResponse{}, err } result.IsIncrementalCopy = &isIncrementalCopy } @@ -1357,7 +1365,7 @@ func (client *blobClient) getPropertiesHandleResponse(resp *http.Response) (blob if val := resp.Header.Get("Content-Length"); val != "" { contentLength, err := strconv.ParseInt(val, 10, 64) if err != nil { - return blobClientGetPropertiesResponse{}, err + return BlobClientGetPropertiesResponse{}, err } result.ContentLength = &contentLength } @@ -1365,12 +1373,12 @@ func (client *blobClient) getPropertiesHandleResponse(resp *http.Response) (blob result.ContentType = &val } if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Content-MD5"); val != "" { contentMD5, err := base64.StdEncoding.DecodeString(val) if err != nil { - return blobClientGetPropertiesResponse{}, err + return BlobClientGetPropertiesResponse{}, err } result.ContentMD5 = contentMD5 } @@ -1389,7 +1397,7 @@ func (client *blobClient) getPropertiesHandleResponse(resp *http.Response) (blob if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) if err != nil { - return blobClientGetPropertiesResponse{}, err + return BlobClientGetPropertiesResponse{}, err } result.BlobSequenceNumber = &blobSequenceNumber } @@ -1405,7 +1413,7 @@ func (client *blobClient) getPropertiesHandleResponse(resp *http.Response) (blob if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientGetPropertiesResponse{}, err + return BlobClientGetPropertiesResponse{}, err } result.Date = &date } @@ -1416,14 +1424,14 @@ func (client *blobClient) getPropertiesHandleResponse(resp *http.Response) (blob blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) blobCommittedBlockCount := int32(blobCommittedBlockCount32) if err != nil { - return blobClientGetPropertiesResponse{}, err + return BlobClientGetPropertiesResponse{}, err } result.BlobCommittedBlockCount = &blobCommittedBlockCount } if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { - return blobClientGetPropertiesResponse{}, err + return BlobClientGetPropertiesResponse{}, err } result.IsServerEncrypted = &isServerEncrypted } @@ -1439,7 +1447,7 @@ func (client *blobClient) getPropertiesHandleResponse(resp *http.Response) (blob if val := resp.Header.Get("x-ms-access-tier-inferred"); val != "" { accessTierInferred, err := strconv.ParseBool(val) if err != nil { - return blobClientGetPropertiesResponse{}, err + return BlobClientGetPropertiesResponse{}, err } result.AccessTierInferred = &accessTierInferred } @@ -1449,7 +1457,7 @@ func (client *blobClient) getPropertiesHandleResponse(resp *http.Response) (blob if val := resp.Header.Get("x-ms-access-tier-change-time"); val != "" { accessTierChangeTime, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientGetPropertiesResponse{}, err + return BlobClientGetPropertiesResponse{}, err } result.AccessTierChangeTime = &accessTierChangeTime } @@ -1459,28 +1467,28 @@ func (client *blobClient) getPropertiesHandleResponse(resp *http.Response) (blob if val := resp.Header.Get("x-ms-is-current-version"); val != "" { isCurrentVersion, err := strconv.ParseBool(val) if err != nil { - return blobClientGetPropertiesResponse{}, err + return BlobClientGetPropertiesResponse{}, err } result.IsCurrentVersion = &isCurrentVersion } if val := resp.Header.Get("x-ms-tag-count"); val != "" { tagCount, err := strconv.ParseInt(val, 10, 64) if err != nil { - return blobClientGetPropertiesResponse{}, err + return BlobClientGetPropertiesResponse{}, err } result.TagCount = &tagCount } if val := resp.Header.Get("x-ms-expiry-time"); val != "" { expiresOn, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientGetPropertiesResponse{}, err + return BlobClientGetPropertiesResponse{}, err } result.ExpiresOn = &expiresOn } if val := resp.Header.Get("x-ms-blob-sealed"); val != "" { isSealed, err := strconv.ParseBool(val) if err != nil { - return blobClientGetPropertiesResponse{}, err + return BlobClientGetPropertiesResponse{}, err } result.IsSealed = &isSealed } @@ -1490,24 +1498,24 @@ func (client *blobClient) getPropertiesHandleResponse(resp *http.Response) (blob if val := resp.Header.Get("x-ms-last-access-time"); val != "" { lastAccessed, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientGetPropertiesResponse{}, err + return BlobClientGetPropertiesResponse{}, err } result.LastAccessed = &lastAccessed } if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" { immutabilityPolicyExpiresOn, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientGetPropertiesResponse{}, err + return BlobClientGetPropertiesResponse{}, err } result.ImmutabilityPolicyExpiresOn = &immutabilityPolicyExpiresOn } if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { - result.ImmutabilityPolicyMode = (*BlobImmutabilityPolicyMode)(&val) + result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val) } if val := resp.Header.Get("x-ms-legal-hold"); val != "" { legalHold, err := strconv.ParseBool(val) if err != nil { - return blobClientGetPropertiesResponse{}, err + return BlobClientGetPropertiesResponse{}, err } result.LegalHold = &legalHold } @@ -1516,59 +1524,60 @@ func (client *blobClient) getPropertiesHandleResponse(resp *http.Response) (blob // GetTags - The Get Tags operation enables users to get the tags associated with a blob. // If the operation fails it returns an *azcore.ResponseError type. -// blobClientGetTagsOptions - blobClientGetTagsOptions contains the optional parameters for the blobClient.GetTags method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -func (client *blobClient) GetTags(ctx context.Context, blobClientGetTagsOptions *blobClientGetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (blobClientGetTagsResponse, error) { - req, err := client.getTagsCreateRequest(ctx, blobClientGetTagsOptions, modifiedAccessConditions, leaseAccessConditions) +// Generated from API version 2020-10-02 +// options - BlobClientGetTagsOptions contains the optional parameters for the BlobClient.GetTags method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *BlobClient) GetTags(ctx context.Context, options *BlobClientGetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientGetTagsResponse, error) { + req, err := client.getTagsCreateRequest(ctx, options, modifiedAccessConditions, leaseAccessConditions) if err != nil { - return blobClientGetTagsResponse{}, err + return BlobClientGetTagsResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientGetTagsResponse{}, err + return BlobClientGetTagsResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return blobClientGetTagsResponse{}, runtime.NewResponseError(resp) + return BlobClientGetTagsResponse{}, runtime.NewResponseError(resp) } return client.getTagsHandleResponse(resp) } // getTagsCreateRequest creates the GetTags request. -func (client *blobClient) getTagsCreateRequest(ctx context.Context, blobClientGetTagsOptions *blobClientGetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { +func (client *BlobClient) getTagsCreateRequest(ctx context.Context, options *BlobClientGetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("comp", "tags") - if blobClientGetTagsOptions != nil && blobClientGetTagsOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientGetTagsOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } - if blobClientGetTagsOptions != nil && blobClientGetTagsOptions.Snapshot != nil { - reqQP.Set("snapshot", *blobClientGetTagsOptions.Snapshot) + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) } - if blobClientGetTagsOptions != nil && blobClientGetTagsOptions.VersionID != nil { - reqQP.Set("versionid", *blobClientGetTagsOptions.VersionID) + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blobClientGetTagsOptions != nil && blobClientGetTagsOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blobClientGetTagsOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // getTagsHandleResponse handles the GetTags response. -func (client *blobClient) getTagsHandleResponse(resp *http.Response) (blobClientGetTagsResponse, error) { - result := blobClientGetTagsResponse{RawResponse: resp} +func (client *BlobClient) getTagsHandleResponse(resp *http.Response) (BlobClientGetTagsResponse, error) { + result := BlobClientGetTagsResponse{} if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } @@ -1581,98 +1590,99 @@ func (client *blobClient) getTagsHandleResponse(resp *http.Response) (blobClient if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientGetTagsResponse{}, err + return BlobClientGetTagsResponse{}, err } result.Date = &date } if err := runtime.UnmarshalAsXML(resp, &result.BlobTags); err != nil { - return blobClientGetTagsResponse{}, err + return BlobClientGetTagsResponse{}, err } return result, nil } // Query - The Query operation enables users to select/project on blob data by providing simple query expressions. // If the operation fails it returns an *azcore.ResponseError type. -// blobClientQueryOptions - blobClientQueryOptions contains the optional parameters for the blobClient.Query method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *blobClient) Query(ctx context.Context, blobClientQueryOptions *blobClientQueryOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (blobClientQueryResponse, error) { - req, err := client.queryCreateRequest(ctx, blobClientQueryOptions, leaseAccessConditions, cpkInfo, modifiedAccessConditions) +// Generated from API version 2020-10-02 +// options - BlobClientQueryOptions contains the optional parameters for the BlobClient.Query method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) Query(ctx context.Context, options *BlobClientQueryOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientQueryResponse, error) { + req, err := client.queryCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, modifiedAccessConditions) if err != nil { - return blobClientQueryResponse{}, err + return BlobClientQueryResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientQueryResponse{}, err + return BlobClientQueryResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusPartialContent) { - return blobClientQueryResponse{}, runtime.NewResponseError(resp) + return BlobClientQueryResponse{}, runtime.NewResponseError(resp) } return client.queryHandleResponse(resp) } // queryCreateRequest creates the Query request. -func (client *blobClient) queryCreateRequest(ctx context.Context, blobClientQueryOptions *blobClientQueryOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *BlobClient) queryCreateRequest(ctx context.Context, options *BlobClientQueryOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("comp", "query") - if blobClientQueryOptions != nil && blobClientQueryOptions.Snapshot != nil { - reqQP.Set("snapshot", *blobClientQueryOptions.Snapshot) + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) } - if blobClientQueryOptions != nil && blobClientQueryOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientQueryOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() runtime.SkipBodyDownload(req) if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} } if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blobClientQueryOptions != nil && blobClientQueryOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blobClientQueryOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") - if blobClientQueryOptions != nil && blobClientQueryOptions.QueryRequest != nil { - return req, runtime.MarshalAsXML(req, *blobClientQueryOptions.QueryRequest) + req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.QueryRequest != nil { + return req, runtime.MarshalAsXML(req, *options.QueryRequest) } return req, nil } // queryHandleResponse handles the Query response. -func (client *blobClient) queryHandleResponse(resp *http.Response) (blobClientQueryResponse, error) { - result := blobClientQueryResponse{RawResponse: resp} +func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQueryResponse, error) { + result := BlobClientQueryResponse{Body: resp.Body} if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientQueryResponse{}, err + return BlobClientQueryResponse{}, err } result.LastModified = &lastModified } @@ -1687,7 +1697,7 @@ func (client *blobClient) queryHandleResponse(resp *http.Response) (blobClientQu if val := resp.Header.Get("Content-Length"); val != "" { contentLength, err := strconv.ParseInt(val, 10, 64) if err != nil { - return blobClientQueryResponse{}, err + return BlobClientQueryResponse{}, err } result.ContentLength = &contentLength } @@ -1698,12 +1708,12 @@ func (client *blobClient) queryHandleResponse(resp *http.Response) (blobClientQu result.ContentRange = &val } if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Content-MD5"); val != "" { contentMD5, err := base64.StdEncoding.DecodeString(val) if err != nil { - return blobClientQueryResponse{}, err + return BlobClientQueryResponse{}, err } result.ContentMD5 = contentMD5 } @@ -1722,7 +1732,7 @@ func (client *blobClient) queryHandleResponse(resp *http.Response) (blobClientQu if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) if err != nil { - return blobClientQueryResponse{}, err + return BlobClientQueryResponse{}, err } result.BlobSequenceNumber = &blobSequenceNumber } @@ -1732,7 +1742,7 @@ func (client *blobClient) queryHandleResponse(resp *http.Response) (blobClientQu if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { copyCompletionTime, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientQueryResponse{}, err + return BlobClientQueryResponse{}, err } result.CopyCompletionTime = ©CompletionTime } @@ -1775,7 +1785,7 @@ func (client *blobClient) queryHandleResponse(resp *http.Response) (blobClientQu if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientQueryResponse{}, err + return BlobClientQueryResponse{}, err } result.Date = &date } @@ -1783,14 +1793,14 @@ func (client *blobClient) queryHandleResponse(resp *http.Response) (blobClientQu blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) blobCommittedBlockCount := int32(blobCommittedBlockCount32) if err != nil { - return blobClientQueryResponse{}, err + return BlobClientQueryResponse{}, err } result.BlobCommittedBlockCount = &blobCommittedBlockCount } if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { - return blobClientQueryResponse{}, err + return BlobClientQueryResponse{}, err } result.IsServerEncrypted = &isServerEncrypted } @@ -1803,14 +1813,14 @@ func (client *blobClient) queryHandleResponse(resp *http.Response) (blobClientQu if val := resp.Header.Get("x-ms-blob-content-md5"); val != "" { blobContentMD5, err := base64.StdEncoding.DecodeString(val) if err != nil { - return blobClientQueryResponse{}, err + return BlobClientQueryResponse{}, err } result.BlobContentMD5 = blobContentMD5 } if val := resp.Header.Get("x-ms-content-crc64"); val != "" { contentCRC64, err := base64.StdEncoding.DecodeString(val) if err != nil { - return blobClientQueryResponse{}, err + return BlobClientQueryResponse{}, err } result.ContentCRC64 = contentCRC64 } @@ -1819,72 +1829,72 @@ func (client *blobClient) queryHandleResponse(resp *http.Response) (blobClientQu // ReleaseLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations // If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 // leaseID - Specifies the current lease ID on the resource. -// blobClientReleaseLeaseOptions - blobClientReleaseLeaseOptions contains the optional parameters for the blobClient.ReleaseLease -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *blobClient) ReleaseLease(ctx context.Context, leaseID string, blobClientReleaseLeaseOptions *blobClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (blobClientReleaseLeaseResponse, error) { - req, err := client.releaseLeaseCreateRequest(ctx, leaseID, blobClientReleaseLeaseOptions, modifiedAccessConditions) +// options - BlobClientReleaseLeaseOptions contains the optional parameters for the BlobClient.ReleaseLease method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) ReleaseLease(ctx context.Context, leaseID string, options *BlobClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientReleaseLeaseResponse, error) { + req, err := client.releaseLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) if err != nil { - return blobClientReleaseLeaseResponse{}, err + return BlobClientReleaseLeaseResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientReleaseLeaseResponse{}, err + return BlobClientReleaseLeaseResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return blobClientReleaseLeaseResponse{}, runtime.NewResponseError(resp) + return BlobClientReleaseLeaseResponse{}, runtime.NewResponseError(resp) } return client.releaseLeaseHandleResponse(resp) } // releaseLeaseCreateRequest creates the ReleaseLease request. -func (client *blobClient) releaseLeaseCreateRequest(ctx context.Context, leaseID string, blobClientReleaseLeaseOptions *blobClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *BlobClient) releaseLeaseCreateRequest(ctx context.Context, leaseID string, options *BlobClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("comp", "lease") - if blobClientReleaseLeaseOptions != nil && blobClientReleaseLeaseOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientReleaseLeaseOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-lease-action", "release") - req.Raw().Header.Set("x-ms-lease-id", leaseID) + req.Raw().Header["x-ms-lease-action"] = []string{"release"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blobClientReleaseLeaseOptions != nil && blobClientReleaseLeaseOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blobClientReleaseLeaseOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // releaseLeaseHandleResponse handles the ReleaseLease response. -func (client *blobClient) releaseLeaseHandleResponse(resp *http.Response) (blobClientReleaseLeaseResponse, error) { - result := blobClientReleaseLeaseResponse{RawResponse: resp} +func (client *BlobClient) releaseLeaseHandleResponse(resp *http.Response) (BlobClientReleaseLeaseResponse, error) { + result := BlobClientReleaseLeaseResponse{} if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientReleaseLeaseResponse{}, err + return BlobClientReleaseLeaseResponse{}, err } result.LastModified = &lastModified } @@ -1900,7 +1910,7 @@ func (client *blobClient) releaseLeaseHandleResponse(resp *http.Response) (blobC if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientReleaseLeaseResponse{}, err + return BlobClientReleaseLeaseResponse{}, err } result.Date = &date } @@ -1909,72 +1919,72 @@ func (client *blobClient) releaseLeaseHandleResponse(resp *http.Response) (blobC // RenewLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations // If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 // leaseID - Specifies the current lease ID on the resource. -// blobClientRenewLeaseOptions - blobClientRenewLeaseOptions contains the optional parameters for the blobClient.RenewLease -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *blobClient) RenewLease(ctx context.Context, leaseID string, blobClientRenewLeaseOptions *blobClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (blobClientRenewLeaseResponse, error) { - req, err := client.renewLeaseCreateRequest(ctx, leaseID, blobClientRenewLeaseOptions, modifiedAccessConditions) +// options - BlobClientRenewLeaseOptions contains the optional parameters for the BlobClient.RenewLease method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) RenewLease(ctx context.Context, leaseID string, options *BlobClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientRenewLeaseResponse, error) { + req, err := client.renewLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) if err != nil { - return blobClientRenewLeaseResponse{}, err + return BlobClientRenewLeaseResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientRenewLeaseResponse{}, err + return BlobClientRenewLeaseResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return blobClientRenewLeaseResponse{}, runtime.NewResponseError(resp) + return BlobClientRenewLeaseResponse{}, runtime.NewResponseError(resp) } return client.renewLeaseHandleResponse(resp) } // renewLeaseCreateRequest creates the RenewLease request. -func (client *blobClient) renewLeaseCreateRequest(ctx context.Context, leaseID string, blobClientRenewLeaseOptions *blobClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *BlobClient) renewLeaseCreateRequest(ctx context.Context, leaseID string, options *BlobClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("comp", "lease") - if blobClientRenewLeaseOptions != nil && blobClientRenewLeaseOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientRenewLeaseOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-lease-action", "renew") - req.Raw().Header.Set("x-ms-lease-id", leaseID) + req.Raw().Header["x-ms-lease-action"] = []string{"renew"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blobClientRenewLeaseOptions != nil && blobClientRenewLeaseOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blobClientRenewLeaseOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // renewLeaseHandleResponse handles the RenewLease response. -func (client *blobClient) renewLeaseHandleResponse(resp *http.Response) (blobClientRenewLeaseResponse, error) { - result := blobClientRenewLeaseResponse{RawResponse: resp} +func (client *BlobClient) renewLeaseHandleResponse(resp *http.Response) (BlobClientRenewLeaseResponse, error) { + result := BlobClientRenewLeaseResponse{} if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientRenewLeaseResponse{}, err + return BlobClientRenewLeaseResponse{}, err } result.LastModified = &lastModified } @@ -1993,7 +2003,7 @@ func (client *blobClient) renewLeaseHandleResponse(resp *http.Response) (blobCli if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientRenewLeaseResponse{}, err + return BlobClientRenewLeaseResponse{}, err } result.Date = &date } @@ -2002,25 +2012,26 @@ func (client *blobClient) renewLeaseHandleResponse(resp *http.Response) (blobCli // SetExpiry - Sets the time a blob will expire and be deleted. // If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 // expiryOptions - Required. Indicates mode of the expiry time -// options - blobClientSetExpiryOptions contains the optional parameters for the blobClient.SetExpiry method. -func (client *blobClient) SetExpiry(ctx context.Context, expiryOptions BlobExpiryOptions, options *blobClientSetExpiryOptions) (blobClientSetExpiryResponse, error) { +// options - BlobClientSetExpiryOptions contains the optional parameters for the BlobClient.SetExpiry method. +func (client *BlobClient) SetExpiry(ctx context.Context, expiryOptions ExpiryOptions, options *BlobClientSetExpiryOptions) (BlobClientSetExpiryResponse, error) { req, err := client.setExpiryCreateRequest(ctx, expiryOptions, options) if err != nil { - return blobClientSetExpiryResponse{}, err + return BlobClientSetExpiryResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientSetExpiryResponse{}, err + return BlobClientSetExpiryResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return blobClientSetExpiryResponse{}, runtime.NewResponseError(resp) + return BlobClientSetExpiryResponse{}, runtime.NewResponseError(resp) } return client.setExpiryHandleResponse(resp) } // setExpiryCreateRequest creates the SetExpiry request. -func (client *blobClient) setExpiryCreateRequest(ctx context.Context, expiryOptions BlobExpiryOptions, options *blobClientSetExpiryOptions) (*policy.Request, error) { +func (client *BlobClient) setExpiryCreateRequest(ctx context.Context, expiryOptions ExpiryOptions, options *BlobClientSetExpiryOptions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -2031,28 +2042,28 @@ func (client *blobClient) setExpiryCreateRequest(ctx context.Context, expiryOpti reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-version", "2020-10-02") + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} if options != nil && options.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("x-ms-expiry-option", string(expiryOptions)) + req.Raw().Header["x-ms-expiry-option"] = []string{string(expiryOptions)} if options != nil && options.ExpiresOn != nil { - req.Raw().Header.Set("x-ms-expiry-time", *options.ExpiresOn) + req.Raw().Header["x-ms-expiry-time"] = []string{*options.ExpiresOn} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // setExpiryHandleResponse handles the SetExpiry response. -func (client *blobClient) setExpiryHandleResponse(resp *http.Response) (blobClientSetExpiryResponse, error) { - result := blobClientSetExpiryResponse{RawResponse: resp} +func (client *BlobClient) setExpiryHandleResponse(resp *http.Response) (BlobClientSetExpiryResponse, error) { + result := BlobClientSetExpiryResponse{} if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientSetExpiryResponse{}, err + return BlobClientSetExpiryResponse{}, err } result.LastModified = &lastModified } @@ -2068,7 +2079,7 @@ func (client *blobClient) setExpiryHandleResponse(resp *http.Response) (blobClie if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientSetExpiryResponse{}, err + return BlobClientSetExpiryResponse{}, err } result.Date = &date } @@ -2077,99 +2088,99 @@ func (client *blobClient) setExpiryHandleResponse(resp *http.Response) (blobClie // SetHTTPHeaders - The Set HTTP Headers operation sets system properties on the blob // If the operation fails it returns an *azcore.ResponseError type. -// blobClientSetHTTPHeadersOptions - blobClientSetHTTPHeadersOptions contains the optional parameters for the blobClient.SetHTTPHeaders -// method. -// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the blobClient.SetHTTPHeaders method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *blobClient) SetHTTPHeaders(ctx context.Context, blobClientSetHTTPHeadersOptions *blobClientSetHTTPHeadersOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (blobClientSetHTTPHeadersResponse, error) { - req, err := client.setHTTPHeadersCreateRequest(ctx, blobClientSetHTTPHeadersOptions, blobHTTPHeaders, leaseAccessConditions, modifiedAccessConditions) +// Generated from API version 2020-10-02 +// options - BlobClientSetHTTPHeadersOptions contains the optional parameters for the BlobClient.SetHTTPHeaders method. +// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) SetHTTPHeaders(ctx context.Context, options *BlobClientSetHTTPHeadersOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetHTTPHeadersResponse, error) { + req, err := client.setHTTPHeadersCreateRequest(ctx, options, blobHTTPHeaders, leaseAccessConditions, modifiedAccessConditions) if err != nil { - return blobClientSetHTTPHeadersResponse{}, err + return BlobClientSetHTTPHeadersResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientSetHTTPHeadersResponse{}, err + return BlobClientSetHTTPHeadersResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return blobClientSetHTTPHeadersResponse{}, runtime.NewResponseError(resp) + return BlobClientSetHTTPHeadersResponse{}, runtime.NewResponseError(resp) } return client.setHTTPHeadersHandleResponse(resp) } // setHTTPHeadersCreateRequest creates the SetHTTPHeaders request. -func (client *blobClient) setHTTPHeadersCreateRequest(ctx context.Context, blobClientSetHTTPHeadersOptions *blobClientSetHTTPHeadersOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *BlobClient) setHTTPHeadersCreateRequest(ctx context.Context, options *BlobClientSetHTTPHeadersOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("comp", "properties") - if blobClientSetHTTPHeadersOptions != nil && blobClientSetHTTPHeadersOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientSetHTTPHeadersOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { - req.Raw().Header.Set("x-ms-blob-cache-control", *blobHTTPHeaders.BlobCacheControl) + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} } if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { - req.Raw().Header.Set("x-ms-blob-content-type", *blobHTTPHeaders.BlobContentType) + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} } if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { - req.Raw().Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)) + req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} } if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { - req.Raw().Header.Set("x-ms-blob-content-encoding", *blobHTTPHeaders.BlobContentEncoding) + req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} } if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { - req.Raw().Header.Set("x-ms-blob-content-language", *blobHTTPHeaders.BlobContentLanguage) + req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} } if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { - req.Raw().Header.Set("x-ms-blob-content-disposition", *blobHTTPHeaders.BlobContentDisposition) + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blobClientSetHTTPHeadersOptions != nil && blobClientSetHTTPHeadersOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blobClientSetHTTPHeadersOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // setHTTPHeadersHandleResponse handles the SetHTTPHeaders response. -func (client *blobClient) setHTTPHeadersHandleResponse(resp *http.Response) (blobClientSetHTTPHeadersResponse, error) { - result := blobClientSetHTTPHeadersResponse{RawResponse: resp} +func (client *BlobClient) setHTTPHeadersHandleResponse(resp *http.Response) (BlobClientSetHTTPHeadersResponse, error) { + result := BlobClientSetHTTPHeadersResponse{} if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientSetHTTPHeadersResponse{}, err + return BlobClientSetHTTPHeadersResponse{}, err } result.LastModified = &lastModified } if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) if err != nil { - return blobClientSetHTTPHeadersResponse{}, err + return BlobClientSetHTTPHeadersResponse{}, err } result.BlobSequenceNumber = &blobSequenceNumber } @@ -2185,7 +2196,7 @@ func (client *blobClient) setHTTPHeadersHandleResponse(resp *http.Response) (blo if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientSetHTTPHeadersResponse{}, err + return BlobClientSetHTTPHeadersResponse{}, err } result.Date = &date } @@ -2194,56 +2205,57 @@ func (client *blobClient) setHTTPHeadersHandleResponse(resp *http.Response) (blo // SetImmutabilityPolicy - The Set Immutability Policy operation sets the immutability policy on the blob // If the operation fails it returns an *azcore.ResponseError type. -// blobClientSetImmutabilityPolicyOptions - blobClientSetImmutabilityPolicyOptions contains the optional parameters for the -// blobClient.SetImmutabilityPolicy method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *blobClient) SetImmutabilityPolicy(ctx context.Context, blobClientSetImmutabilityPolicyOptions *blobClientSetImmutabilityPolicyOptions, modifiedAccessConditions *ModifiedAccessConditions) (blobClientSetImmutabilityPolicyResponse, error) { - req, err := client.setImmutabilityPolicyCreateRequest(ctx, blobClientSetImmutabilityPolicyOptions, modifiedAccessConditions) +// Generated from API version 2020-10-02 +// options - BlobClientSetImmutabilityPolicyOptions contains the optional parameters for the BlobClient.SetImmutabilityPolicy +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) SetImmutabilityPolicy(ctx context.Context, options *BlobClientSetImmutabilityPolicyOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetImmutabilityPolicyResponse, error) { + req, err := client.setImmutabilityPolicyCreateRequest(ctx, options, modifiedAccessConditions) if err != nil { - return blobClientSetImmutabilityPolicyResponse{}, err + return BlobClientSetImmutabilityPolicyResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientSetImmutabilityPolicyResponse{}, err + return BlobClientSetImmutabilityPolicyResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return blobClientSetImmutabilityPolicyResponse{}, runtime.NewResponseError(resp) + return BlobClientSetImmutabilityPolicyResponse{}, runtime.NewResponseError(resp) } return client.setImmutabilityPolicyHandleResponse(resp) } // setImmutabilityPolicyCreateRequest creates the SetImmutabilityPolicy request. -func (client *blobClient) setImmutabilityPolicyCreateRequest(ctx context.Context, blobClientSetImmutabilityPolicyOptions *blobClientSetImmutabilityPolicyOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *BlobClient) setImmutabilityPolicyCreateRequest(ctx context.Context, options *BlobClientSetImmutabilityPolicyOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("comp", "immutabilityPolicies") - if blobClientSetImmutabilityPolicyOptions != nil && blobClientSetImmutabilityPolicyOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientSetImmutabilityPolicyOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blobClientSetImmutabilityPolicyOptions != nil && blobClientSetImmutabilityPolicyOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blobClientSetImmutabilityPolicyOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} } - if blobClientSetImmutabilityPolicyOptions != nil && blobClientSetImmutabilityPolicyOptions.ImmutabilityPolicyExpiry != nil { - req.Raw().Header.Set("x-ms-immutability-policy-until-date", blobClientSetImmutabilityPolicyOptions.ImmutabilityPolicyExpiry.Format(time.RFC1123)) + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)} } - if blobClientSetImmutabilityPolicyOptions != nil && blobClientSetImmutabilityPolicyOptions.ImmutabilityPolicyMode != nil { - req.Raw().Header.Set("x-ms-immutability-policy-mode", string(*blobClientSetImmutabilityPolicyOptions.ImmutabilityPolicyMode)) + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // setImmutabilityPolicyHandleResponse handles the SetImmutabilityPolicy response. -func (client *blobClient) setImmutabilityPolicyHandleResponse(resp *http.Response) (blobClientSetImmutabilityPolicyResponse, error) { - result := blobClientSetImmutabilityPolicyResponse{RawResponse: resp} +func (client *BlobClient) setImmutabilityPolicyHandleResponse(resp *http.Response) (BlobClientSetImmutabilityPolicyResponse, error) { + result := BlobClientSetImmutabilityPolicyResponse{} if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } @@ -2256,44 +2268,45 @@ func (client *blobClient) setImmutabilityPolicyHandleResponse(resp *http.Respons if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientSetImmutabilityPolicyResponse{}, err + return BlobClientSetImmutabilityPolicyResponse{}, err } result.Date = &date } if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" { immutabilityPolicyExpiry, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientSetImmutabilityPolicyResponse{}, err + return BlobClientSetImmutabilityPolicyResponse{}, err } result.ImmutabilityPolicyExpiry = &immutabilityPolicyExpiry } if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { - result.ImmutabilityPolicyMode = (*BlobImmutabilityPolicyMode)(&val) + result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val) } return result, nil } // SetLegalHold - The Set Legal Hold operation sets a legal hold on the blob. // If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 // legalHold - Specified if a legal hold should be set on the blob. -// options - blobClientSetLegalHoldOptions contains the optional parameters for the blobClient.SetLegalHold method. -func (client *blobClient) SetLegalHold(ctx context.Context, legalHold bool, options *blobClientSetLegalHoldOptions) (blobClientSetLegalHoldResponse, error) { +// options - BlobClientSetLegalHoldOptions contains the optional parameters for the BlobClient.SetLegalHold method. +func (client *BlobClient) SetLegalHold(ctx context.Context, legalHold bool, options *BlobClientSetLegalHoldOptions) (BlobClientSetLegalHoldResponse, error) { req, err := client.setLegalHoldCreateRequest(ctx, legalHold, options) if err != nil { - return blobClientSetLegalHoldResponse{}, err + return BlobClientSetLegalHoldResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientSetLegalHoldResponse{}, err + return BlobClientSetLegalHoldResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return blobClientSetLegalHoldResponse{}, runtime.NewResponseError(resp) + return BlobClientSetLegalHoldResponse{}, runtime.NewResponseError(resp) } return client.setLegalHoldHandleResponse(resp) } // setLegalHoldCreateRequest creates the SetLegalHold request. -func (client *blobClient) setLegalHoldCreateRequest(ctx context.Context, legalHold bool, options *blobClientSetLegalHoldOptions) (*policy.Request, error) { +func (client *BlobClient) setLegalHoldCreateRequest(ctx context.Context, legalHold bool, options *BlobClientSetLegalHoldOptions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -2304,18 +2317,18 @@ func (client *blobClient) setLegalHoldCreateRequest(ctx context.Context, legalHo reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-version", "2020-10-02") + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} if options != nil && options.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("x-ms-legal-hold", strconv.FormatBool(legalHold)) - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(legalHold)} + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // setLegalHoldHandleResponse handles the SetLegalHold response. -func (client *blobClient) setLegalHoldHandleResponse(resp *http.Response) (blobClientSetLegalHoldResponse, error) { - result := blobClientSetLegalHoldResponse{RawResponse: resp} +func (client *BlobClient) setLegalHoldHandleResponse(resp *http.Response) (BlobClientSetLegalHoldResponse, error) { + result := BlobClientSetLegalHoldResponse{} if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } @@ -2328,14 +2341,14 @@ func (client *blobClient) setLegalHoldHandleResponse(resp *http.Response) (blobC if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientSetLegalHoldResponse{}, err + return BlobClientSetLegalHoldResponse{}, err } result.Date = &date } if val := resp.Header.Get("x-ms-legal-hold"); val != "" { legalHold, err := strconv.ParseBool(val) if err != nil { - return blobClientSetLegalHoldResponse{}, err + return BlobClientSetLegalHoldResponse{}, err } result.LegalHold = &legalHold } @@ -2345,92 +2358,92 @@ func (client *blobClient) setLegalHoldHandleResponse(resp *http.Response) (blobC // SetMetadata - The Set Blob Metadata operation sets user-defined metadata for the specified blob as one or more name-value // pairs // If the operation fails it returns an *azcore.ResponseError type. -// blobClientSetMetadataOptions - blobClientSetMetadataOptions contains the optional parameters for the blobClient.SetMetadata -// method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *blobClient) SetMetadata(ctx context.Context, blobClientSetMetadataOptions *blobClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (blobClientSetMetadataResponse, error) { - req, err := client.setMetadataCreateRequest(ctx, blobClientSetMetadataOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) +// Generated from API version 2020-10-02 +// options - BlobClientSetMetadataOptions contains the optional parameters for the BlobClient.SetMetadata method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) SetMetadata(ctx context.Context, options *BlobClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetMetadataResponse, error) { + req, err := client.setMetadataCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) if err != nil { - return blobClientSetMetadataResponse{}, err + return BlobClientSetMetadataResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientSetMetadataResponse{}, err + return BlobClientSetMetadataResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return blobClientSetMetadataResponse{}, runtime.NewResponseError(resp) + return BlobClientSetMetadataResponse{}, runtime.NewResponseError(resp) } return client.setMetadataHandleResponse(resp) } // setMetadataCreateRequest creates the SetMetadata request. -func (client *blobClient) setMetadataCreateRequest(ctx context.Context, blobClientSetMetadataOptions *blobClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *BlobClient) setMetadataCreateRequest(ctx context.Context, options *BlobClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("comp", "metadata") - if blobClientSetMetadataOptions != nil && blobClientSetMetadataOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientSetMetadataOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - if blobClientSetMetadataOptions != nil && blobClientSetMetadataOptions.Metadata != nil { - for k, v := range blobClientSetMetadataOptions.Metadata { - req.Raw().Header.Set("x-ms-meta-"+k, v) + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + req.Raw().Header["x-ms-meta-"+k] = []string{v} } } if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} } if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blobClientSetMetadataOptions != nil && blobClientSetMetadataOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blobClientSetMetadataOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // setMetadataHandleResponse handles the SetMetadata response. -func (client *blobClient) setMetadataHandleResponse(resp *http.Response) (blobClientSetMetadataResponse, error) { - result := blobClientSetMetadataResponse{RawResponse: resp} +func (client *BlobClient) setMetadataHandleResponse(resp *http.Response) (BlobClientSetMetadataResponse, error) { + result := BlobClientSetMetadataResponse{} if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientSetMetadataResponse{}, err + return BlobClientSetMetadataResponse{}, err } result.LastModified = &lastModified } @@ -2449,14 +2462,14 @@ func (client *blobClient) setMetadataHandleResponse(resp *http.Response) (blobCl if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientSetMetadataResponse{}, err + return BlobClientSetMetadataResponse{}, err } result.Date = &date } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { - return blobClientSetMetadataResponse{}, err + return BlobClientSetMetadataResponse{}, err } result.IsServerEncrypted = &isServerEncrypted } @@ -2471,65 +2484,64 @@ func (client *blobClient) setMetadataHandleResponse(resp *http.Response) (blobCl // SetTags - The Set Tags operation enables users to set tags on a blob. // If the operation fails it returns an *azcore.ResponseError type. -// blobClientSetTagsOptions - blobClientSetTagsOptions contains the optional parameters for the blobClient.SetTags method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -func (client *blobClient) SetTags(ctx context.Context, blobClientSetTagsOptions *blobClientSetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (blobClientSetTagsResponse, error) { - req, err := client.setTagsCreateRequest(ctx, blobClientSetTagsOptions, modifiedAccessConditions, leaseAccessConditions) +// Generated from API version 2020-10-02 +// tags - Blob tags +// options - BlobClientSetTagsOptions contains the optional parameters for the BlobClient.SetTags method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *BlobClient) SetTags(ctx context.Context, tags BlobTags, options *BlobClientSetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientSetTagsResponse, error) { + req, err := client.setTagsCreateRequest(ctx, tags, options, modifiedAccessConditions, leaseAccessConditions) if err != nil { - return blobClientSetTagsResponse{}, err + return BlobClientSetTagsResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientSetTagsResponse{}, err + return BlobClientSetTagsResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusNoContent) { - return blobClientSetTagsResponse{}, runtime.NewResponseError(resp) + return BlobClientSetTagsResponse{}, runtime.NewResponseError(resp) } return client.setTagsHandleResponse(resp) } // setTagsCreateRequest creates the SetTags request. -func (client *blobClient) setTagsCreateRequest(ctx context.Context, blobClientSetTagsOptions *blobClientSetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { +func (client *BlobClient) setTagsCreateRequest(ctx context.Context, tags BlobTags, options *BlobClientSetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("comp", "tags") - if blobClientSetTagsOptions != nil && blobClientSetTagsOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientSetTagsOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } - if blobClientSetTagsOptions != nil && blobClientSetTagsOptions.VersionID != nil { - reqQP.Set("versionid", *blobClientSetTagsOptions.VersionID) + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blobClientSetTagsOptions != nil && blobClientSetTagsOptions.TransactionalContentMD5 != nil { - req.Raw().Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(blobClientSetTagsOptions.TransactionalContentMD5)) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} } - if blobClientSetTagsOptions != nil && blobClientSetTagsOptions.TransactionalContentCRC64 != nil { - req.Raw().Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(blobClientSetTagsOptions.TransactionalContentCRC64)) + if options != nil && options.TransactionalContentCRC64 != nil { + req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} } - if blobClientSetTagsOptions != nil && blobClientSetTagsOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blobClientSetTagsOptions.RequestID) + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header.Set("Accept", "application/xml") - if blobClientSetTagsOptions != nil && blobClientSetTagsOptions.Tags != nil { - return req, runtime.MarshalAsXML(req, *blobClientSetTagsOptions.Tags) - } - return req, nil + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, runtime.MarshalAsXML(req, tags) } // setTagsHandleResponse handles the SetTags response. -func (client *blobClient) setTagsHandleResponse(resp *http.Response) (blobClientSetTagsResponse, error) { - result := blobClientSetTagsResponse{RawResponse: resp} +func (client *BlobClient) setTagsHandleResponse(resp *http.Response) (BlobClientSetTagsResponse, error) { + result := BlobClientSetTagsResponse{} if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } @@ -2542,7 +2554,7 @@ func (client *blobClient) setTagsHandleResponse(resp *http.Response) (blobClient if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientSetTagsResponse{}, err + return BlobClientSetTagsResponse{}, err } result.Date = &date } @@ -2554,64 +2566,65 @@ func (client *blobClient) setTagsHandleResponse(resp *http.Response) (blobClient // premium page blob's tier determines the allowed size, IOPS, and bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive // storage type. This operation does not update the blob's ETag. // If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 // tier - Indicates the tier to be set on the blob. -// blobClientSetTierOptions - blobClientSetTierOptions contains the optional parameters for the blobClient.SetTier method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *blobClient) SetTier(ctx context.Context, tier AccessTier, blobClientSetTierOptions *blobClientSetTierOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (blobClientSetTierResponse, error) { - req, err := client.setTierCreateRequest(ctx, tier, blobClientSetTierOptions, leaseAccessConditions, modifiedAccessConditions) +// options - BlobClientSetTierOptions contains the optional parameters for the BlobClient.SetTier method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) SetTier(ctx context.Context, tier AccessTier, options *BlobClientSetTierOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetTierResponse, error) { + req, err := client.setTierCreateRequest(ctx, tier, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { - return blobClientSetTierResponse{}, err + return BlobClientSetTierResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientSetTierResponse{}, err + return BlobClientSetTierResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { - return blobClientSetTierResponse{}, runtime.NewResponseError(resp) + return BlobClientSetTierResponse{}, runtime.NewResponseError(resp) } return client.setTierHandleResponse(resp) } // setTierCreateRequest creates the SetTier request. -func (client *blobClient) setTierCreateRequest(ctx context.Context, tier AccessTier, blobClientSetTierOptions *blobClientSetTierOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *BlobClient) setTierCreateRequest(ctx context.Context, tier AccessTier, options *BlobClientSetTierOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("comp", "tier") - if blobClientSetTierOptions != nil && blobClientSetTierOptions.Snapshot != nil { - reqQP.Set("snapshot", *blobClientSetTierOptions.Snapshot) + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) } - if blobClientSetTierOptions != nil && blobClientSetTierOptions.VersionID != nil { - reqQP.Set("versionid", *blobClientSetTierOptions.VersionID) + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) } - if blobClientSetTierOptions != nil && blobClientSetTierOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientSetTierOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-access-tier", string(tier)) - if blobClientSetTierOptions != nil && blobClientSetTierOptions.RehydratePriority != nil { - req.Raw().Header.Set("x-ms-rehydrate-priority", string(*blobClientSetTierOptions.RehydratePriority)) + req.Raw().Header["x-ms-access-tier"] = []string{string(tier)} + if options != nil && options.RehydratePriority != nil { + req.Raw().Header["x-ms-rehydrate-priority"] = []string{string(*options.RehydratePriority)} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blobClientSetTierOptions != nil && blobClientSetTierOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blobClientSetTierOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // setTierHandleResponse handles the SetTier response. -func (client *blobClient) setTierHandleResponse(resp *http.Response) (blobClientSetTierResponse, error) { - result := blobClientSetTierResponse{RawResponse: resp} +func (client *BlobClient) setTierHandleResponse(resp *http.Response) (BlobClientSetTierResponse, error) { + result := BlobClientSetTierResponse{} if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } @@ -2626,119 +2639,119 @@ func (client *blobClient) setTierHandleResponse(resp *http.Response) (blobClient // StartCopyFromURL - The Start Copy From URL operation copies a blob or an internet resource to a new blob. // If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 // copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies // a page blob snapshot. The value should be URL-encoded as it would appear in a request // URI. The source blob must either be public or must be authenticated via a shared access signature. -// blobClientStartCopyFromURLOptions - blobClientStartCopyFromURLOptions contains the optional parameters for the blobClient.StartCopyFromURL +// options - BlobClientStartCopyFromURLOptions contains the optional parameters for the BlobClient.StartCopyFromURL method. +// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL // method. -// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the blobClient.StartCopyFromURL -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -func (client *blobClient) StartCopyFromURL(ctx context.Context, copySource string, blobClientStartCopyFromURLOptions *blobClientStartCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (blobClientStartCopyFromURLResponse, error) { - req, err := client.startCopyFromURLCreateRequest(ctx, copySource, blobClientStartCopyFromURLOptions, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions) +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *BlobClient) StartCopyFromURL(ctx context.Context, copySource string, options *BlobClientStartCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientStartCopyFromURLResponse, error) { + req, err := client.startCopyFromURLCreateRequest(ctx, copySource, options, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions) if err != nil { - return blobClientStartCopyFromURLResponse{}, err + return BlobClientStartCopyFromURLResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientStartCopyFromURLResponse{}, err + return BlobClientStartCopyFromURLResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return blobClientStartCopyFromURLResponse{}, runtime.NewResponseError(resp) + return BlobClientStartCopyFromURLResponse{}, runtime.NewResponseError(resp) } return client.startCopyFromURLHandleResponse(resp) } // startCopyFromURLCreateRequest creates the StartCopyFromURL request. -func (client *blobClient) startCopyFromURLCreateRequest(ctx context.Context, copySource string, blobClientStartCopyFromURLOptions *blobClientStartCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { +func (client *BlobClient) startCopyFromURLCreateRequest(ctx context.Context, copySource string, options *BlobClientStartCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() - if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientStartCopyFromURLOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.Metadata != nil { - for k, v := range blobClientStartCopyFromURLOptions.Metadata { - req.Raw().Header.Set("x-ms-meta-"+k, v) + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + req.Raw().Header["x-ms-meta-"+k] = []string{v} } } - if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.Tier != nil { - req.Raw().Header.Set("x-ms-access-tier", string(*blobClientStartCopyFromURLOptions.Tier)) + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} } - if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.RehydratePriority != nil { - req.Raw().Header.Set("x-ms-rehydrate-priority", string(*blobClientStartCopyFromURLOptions.RehydratePriority)) + if options != nil && options.RehydratePriority != nil { + req.Raw().Header["x-ms-rehydrate-priority"] = []string{string(*options.RehydratePriority)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header.Set("x-ms-source-if-modified-since", sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["x-ms-source-if-modified-since"] = []string{sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header.Set("x-ms-source-if-unmodified-since", sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { - req.Raw().Header.Set("x-ms-source-if-match", *sourceModifiedAccessConditions.SourceIfMatch) + req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { - req.Raw().Header.Set("x-ms-source-if-none-match", *sourceModifiedAccessConditions.SourceIfNoneMatch) + req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfTags != nil { - req.Raw().Header.Set("x-ms-source-if-tags", *sourceModifiedAccessConditions.SourceIfTags) + req.Raw().Header["x-ms-source-if-tags"] = []string{*sourceModifiedAccessConditions.SourceIfTags} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header.Set("x-ms-copy-source", copySource) + req.Raw().Header["x-ms-copy-source"] = []string{copySource} if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blobClientStartCopyFromURLOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.BlobTagsString != nil { - req.Raw().Header.Set("x-ms-tags", *blobClientStartCopyFromURLOptions.BlobTagsString) + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} } - if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.SealBlob != nil { - req.Raw().Header.Set("x-ms-seal-blob", strconv.FormatBool(*blobClientStartCopyFromURLOptions.SealBlob)) + if options != nil && options.SealBlob != nil { + req.Raw().Header["x-ms-seal-blob"] = []string{strconv.FormatBool(*options.SealBlob)} } - if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.ImmutabilityPolicyExpiry != nil { - req.Raw().Header.Set("x-ms-immutability-policy-until-date", blobClientStartCopyFromURLOptions.ImmutabilityPolicyExpiry.Format(time.RFC1123)) + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)} } - if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.ImmutabilityPolicyMode != nil { - req.Raw().Header.Set("x-ms-immutability-policy-mode", string(*blobClientStartCopyFromURLOptions.ImmutabilityPolicyMode)) + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} } - if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.LegalHold != nil { - req.Raw().Header.Set("x-ms-legal-hold", strconv.FormatBool(*blobClientStartCopyFromURLOptions.LegalHold)) + if options != nil && options.LegalHold != nil { + req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // startCopyFromURLHandleResponse handles the StartCopyFromURL response. -func (client *blobClient) startCopyFromURLHandleResponse(resp *http.Response) (blobClientStartCopyFromURLResponse, error) { - result := blobClientStartCopyFromURLResponse{RawResponse: resp} +func (client *BlobClient) startCopyFromURLHandleResponse(resp *http.Response) (BlobClientStartCopyFromURLResponse, error) { + result := BlobClientStartCopyFromURLResponse{} if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientStartCopyFromURLResponse{}, err + return BlobClientStartCopyFromURLResponse{}, err } result.LastModified = &lastModified } @@ -2757,7 +2770,7 @@ func (client *blobClient) startCopyFromURLHandleResponse(resp *http.Response) (b if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientStartCopyFromURLResponse{}, err + return BlobClientStartCopyFromURLResponse{}, err } result.Date = &date } @@ -2772,24 +2785,25 @@ func (client *blobClient) startCopyFromURLHandleResponse(resp *http.Response) (b // Undelete - Undelete a blob that was previously soft deleted // If the operation fails it returns an *azcore.ResponseError type. -// options - blobClientUndeleteOptions contains the optional parameters for the blobClient.Undelete method. -func (client *blobClient) Undelete(ctx context.Context, options *blobClientUndeleteOptions) (blobClientUndeleteResponse, error) { +// Generated from API version 2020-10-02 +// options - BlobClientUndeleteOptions contains the optional parameters for the BlobClient.Undelete method. +func (client *BlobClient) Undelete(ctx context.Context, options *BlobClientUndeleteOptions) (BlobClientUndeleteResponse, error) { req, err := client.undeleteCreateRequest(ctx, options) if err != nil { - return blobClientUndeleteResponse{}, err + return BlobClientUndeleteResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return blobClientUndeleteResponse{}, err + return BlobClientUndeleteResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return blobClientUndeleteResponse{}, runtime.NewResponseError(resp) + return BlobClientUndeleteResponse{}, runtime.NewResponseError(resp) } return client.undeleteHandleResponse(resp) } // undeleteCreateRequest creates the Undelete request. -func (client *blobClient) undeleteCreateRequest(ctx context.Context, options *blobClientUndeleteOptions) (*policy.Request, error) { +func (client *BlobClient) undeleteCreateRequest(ctx context.Context, options *BlobClientUndeleteOptions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -2800,17 +2814,17 @@ func (client *blobClient) undeleteCreateRequest(ctx context.Context, options *bl reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-version", "2020-10-02") + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} if options != nil && options.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // undeleteHandleResponse handles the Undelete response. -func (client *blobClient) undeleteHandleResponse(resp *http.Response) (blobClientUndeleteResponse, error) { - result := blobClientUndeleteResponse{RawResponse: resp} +func (client *BlobClient) undeleteHandleResponse(resp *http.Response) (BlobClientUndeleteResponse, error) { + result := BlobClientUndeleteResponse{} if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } @@ -2823,7 +2837,7 @@ func (client *blobClient) undeleteHandleResponse(resp *http.Response) (blobClien if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return blobClientUndeleteResponse{}, err + return BlobClientUndeleteResponse{}, err } result.Date = &date } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go new file mode 100644 index 000000000..5f2d11016 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go @@ -0,0 +1,960 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "context" + "encoding/base64" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "io" + "net/http" + "strconv" + "time" +) + +// BlockBlobClient contains the methods for the BlockBlob group. +// Don't use this type directly, use NewBlockBlobClient() instead. +type BlockBlobClient struct { + endpoint string + pl runtime.Pipeline +} + +// NewBlockBlobClient creates a new instance of BlockBlobClient with the specified values. +// endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// pl - the pipeline used for sending requests and handling responses. +func NewBlockBlobClient(endpoint string, pl runtime.Pipeline) *BlockBlobClient { + client := &BlockBlobClient{ + endpoint: endpoint, + pl: pl, + } + return client +} + +// CommitBlockList - The Commit Block List operation writes a blob by specifying the list of block IDs that make up the blob. +// In order to be written as part of a blob, a block must have been successfully written to the +// server in a prior Put Block operation. You can call Put Block List to update a blob by uploading only those blocks that +// have changed, then committing the new and existing blocks together. You can do +// this by specifying whether to commit a block from the committed block list or from the uncommitted block list, or to commit +// the most recently uploaded version of the block, whichever list it may +// belong to. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// blocks - Blob Blocks. +// options - BlockBlobClientCommitBlockListOptions contains the optional parameters for the BlockBlobClient.CommitBlockList +// method. +// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlockBlobClient) CommitBlockList(ctx context.Context, blocks BlockLookupList, options *BlockBlobClientCommitBlockListOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientCommitBlockListResponse, error) { + req, err := client.commitBlockListCreateRequest(ctx, blocks, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return BlockBlobClientCommitBlockListResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return BlockBlobClientCommitBlockListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return BlockBlobClientCommitBlockListResponse{}, runtime.NewResponseError(resp) + } + return client.commitBlockListHandleResponse(resp) +} + +// commitBlockListCreateRequest creates the CommitBlockList request. +func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context, blocks BlockLookupList, options *BlockBlobClientCommitBlockListOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "blocklist") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { + req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { + req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} + } + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + if options != nil && options.TransactionalContentCRC64 != nil { + req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} + } + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + req.Raw().Header["x-ms-meta-"+k] = []string{v} + } + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)} + } + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + } + if options != nil && options.LegalHold != nil { + req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, runtime.MarshalAsXML(req, blocks) +} + +// commitBlockListHandleResponse handles the CommitBlockList response. +func (client *BlockBlobClient) commitBlockListHandleResponse(resp *http.Response) (BlockBlobClientCommitBlockListResponse, error) { + result := BlockBlobClientCommitBlockListResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientCommitBlockListResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientCommitBlockListResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientCommitBlockListResponse{}, err + } + result.XMSContentCRC64 = xMSContentCRC64 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientCommitBlockListResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlockBlobClientCommitBlockListResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// GetBlockList - The Get Block List operation retrieves the list of blocks that have been uploaded as part of a block blob +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// listType - Specifies whether to return the list of committed blocks, the list of uncommitted blocks, or both lists together. +// options - BlockBlobClientGetBlockListOptions contains the optional parameters for the BlockBlobClient.GetBlockList method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlockBlobClient) GetBlockList(ctx context.Context, listType BlockListType, options *BlockBlobClientGetBlockListOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientGetBlockListResponse, error) { + req, err := client.getBlockListCreateRequest(ctx, listType, options, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return BlockBlobClientGetBlockListResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return BlockBlobClientGetBlockListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return BlockBlobClientGetBlockListResponse{}, runtime.NewResponseError(resp) + } + return client.getBlockListHandleResponse(resp) +} + +// getBlockListCreateRequest creates the GetBlockList request. +func (client *BlockBlobClient) getBlockListCreateRequest(ctx context.Context, listType BlockListType, options *BlockBlobClientGetBlockListOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "blocklist") + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) + } + reqQP.Set("blocklisttype", string(listType)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getBlockListHandleResponse handles the GetBlockList response. +func (client *BlockBlobClient) getBlockListHandleResponse(resp *http.Response) (BlockBlobClientGetBlockListResponse, error) { + result := BlockBlobClientGetBlockListResponse{} + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientGetBlockListResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { + blobContentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlockBlobClientGetBlockListResponse{}, err + } + result.BlobContentLength = &blobContentLength + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientGetBlockListResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.BlockList); err != nil { + return BlockBlobClientGetBlockListResponse{}, err + } + return result, nil +} + +// PutBlobFromURL - The Put Blob from URL operation creates a new Block Blob where the contents of the blob are read from +// a given URL. This API is supported beginning with the 2020-04-08 version. Partial updates are not +// supported with Put Blob from URL; the content of an existing blob is overwritten with the content of the new blob. To perform +// partial updates to a block blob’s contents using a source URL, use the Put +// Block from URL API in conjunction with Put Block List. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// contentLength - The length of the request. +// copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies +// a page blob snapshot. The value should be URL-encoded as it would appear in a request +// URI. The source blob must either be public or must be authenticated via a shared access signature. +// options - BlockBlobClientPutBlobFromURLOptions contains the optional parameters for the BlockBlobClient.PutBlobFromURL +// method. +// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL +// method. +func (client *BlockBlobClient) PutBlobFromURL(ctx context.Context, contentLength int64, copySource string, options *BlockBlobClientPutBlobFromURLOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (BlockBlobClientPutBlobFromURLResponse, error) { + req, err := client.putBlobFromURLCreateRequest(ctx, contentLength, copySource, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions, sourceModifiedAccessConditions) + if err != nil { + return BlockBlobClientPutBlobFromURLResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return BlockBlobClientPutBlobFromURLResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return BlockBlobClientPutBlobFromURLResponse{}, runtime.NewResponseError(resp) + } + return client.putBlobFromURLHandleResponse(resp) +} + +// putBlobFromURLCreateRequest creates the PutBlobFromURL request. +func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context, contentLength int64, copySource string, options *BlockBlobClientPutBlobFromURLOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-blob-type"] = []string{"BlockBlob"} + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { + req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { + req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + } + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + req.Raw().Header["x-ms-meta-"+k] = []string{v} + } + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfTags != nil { + req.Raw().Header["x-ms-source-if-tags"] = []string{*sourceModifiedAccessConditions.SourceIfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.SourceContentMD5 != nil { + req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + req.Raw().Header["x-ms-copy-source"] = []string{copySource} + if options != nil && options.CopySourceBlobProperties != nil { + req.Raw().Header["x-ms-copy-source-blob-properties"] = []string{strconv.FormatBool(*options.CopySourceBlobProperties)} + } + if options != nil && options.CopySourceAuthorization != nil { + req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// putBlobFromURLHandleResponse handles the PutBlobFromURL response. +func (client *BlockBlobClient) putBlobFromURLHandleResponse(resp *http.Response) (BlockBlobClientPutBlobFromURLResponse, error) { + result := BlockBlobClientPutBlobFromURLResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientPutBlobFromURLResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientPutBlobFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientPutBlobFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlockBlobClientPutBlobFromURLResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// StageBlock - The Stage Block operation creates a new block to be committed as part of a blob +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal +// to 64 bytes in size. For a given blob, the length of the value specified for the blockid +// parameter must be the same size for each block. +// contentLength - The length of the request. +// body - Initial data +// options - BlockBlobClientStageBlockOptions contains the optional parameters for the BlockBlobClient.StageBlock method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +func (client *BlockBlobClient) StageBlock(ctx context.Context, blockID string, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientStageBlockOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo) (BlockBlobClientStageBlockResponse, error) { + req, err := client.stageBlockCreateRequest(ctx, blockID, contentLength, body, options, leaseAccessConditions, cpkInfo, cpkScopeInfo) + if err != nil { + return BlockBlobClientStageBlockResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return BlockBlobClientStageBlockResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return BlockBlobClientStageBlockResponse{}, runtime.NewResponseError(resp) + } + return client.stageBlockHandleResponse(resp) +} + +// stageBlockCreateRequest creates the StageBlock request. +func (client *BlockBlobClient) stageBlockCreateRequest(ctx context.Context, blockID string, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientStageBlockOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "block") + reqQP.Set("blockid", blockID) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + if options != nil && options.TransactionalContentCRC64 != nil { + req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, req.SetBody(body, "application/octet-stream") +} + +// stageBlockHandleResponse handles the StageBlock response. +func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (BlockBlobClientStageBlockResponse, error) { + result := BlockBlobClientStageBlockResponse{} + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientStageBlockResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientStageBlockResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientStageBlockResponse{}, err + } + result.XMSContentCRC64 = xMSContentCRC64 + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlockBlobClientStageBlockResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// StageBlockFromURL - The Stage Block operation creates a new block to be committed as part of a blob where the contents +// are read from a URL. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal +// to 64 bytes in size. For a given blob, the length of the value specified for the blockid +// parameter must be the same size for each block. +// contentLength - The length of the request. +// sourceURL - Specify a URL to the copy source. +// options - BlockBlobClientStageBlockFromURLOptions contains the optional parameters for the BlockBlobClient.StageBlockFromURL +// method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL +// method. +func (client *BlockBlobClient) StageBlockFromURL(ctx context.Context, blockID string, contentLength int64, sourceURL string, options *BlockBlobClientStageBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (BlockBlobClientStageBlockFromURLResponse, error) { + req, err := client.stageBlockFromURLCreateRequest(ctx, blockID, contentLength, sourceURL, options, cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions) + if err != nil { + return BlockBlobClientStageBlockFromURLResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return BlockBlobClientStageBlockFromURLResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return BlockBlobClientStageBlockFromURLResponse{}, runtime.NewResponseError(resp) + } + return client.stageBlockFromURLHandleResponse(resp) +} + +// stageBlockFromURLCreateRequest creates the StageBlockFromURL request. +func (client *BlockBlobClient) stageBlockFromURLCreateRequest(ctx context.Context, blockID string, contentLength int64, sourceURL string, options *BlockBlobClientStageBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "block") + reqQP.Set("blockid", blockID) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + req.Raw().Header["x-ms-copy-source"] = []string{sourceURL} + if options != nil && options.SourceRange != nil { + req.Raw().Header["x-ms-source-range"] = []string{*options.SourceRange} + } + if options != nil && options.SourceContentMD5 != nil { + req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} + } + if options != nil && options.SourceContentcrc64 != nil { + req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentcrc64)} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.CopySourceAuthorization != nil { + req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// stageBlockFromURLHandleResponse handles the StageBlockFromURL response. +func (client *BlockBlobClient) stageBlockFromURLHandleResponse(resp *http.Response) (BlockBlobClientStageBlockFromURLResponse, error) { + result := BlockBlobClientStageBlockFromURLResponse{} + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientStageBlockFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientStageBlockFromURLResponse{}, err + } + result.XMSContentCRC64 = xMSContentCRC64 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientStageBlockFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlockBlobClientStageBlockFromURLResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// Upload - The Upload Block Blob operation updates the content of an existing block blob. Updating an existing block blob +// overwrites any existing metadata on the blob. Partial updates are not supported with Put +// Blob; the content of the existing blob is overwritten with the content of the new blob. To perform a partial update of +// the content of a block blob, use the Put Block List operation. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// contentLength - The length of the request. +// body - Initial data +// options - BlockBlobClientUploadOptions contains the optional parameters for the BlockBlobClient.Upload method. +// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlockBlobClient) Upload(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientUploadOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientUploadResponse, error) { + req, err := client.uploadCreateRequest(ctx, contentLength, body, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return BlockBlobClientUploadResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return BlockBlobClientUploadResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return BlockBlobClientUploadResponse{}, runtime.NewResponseError(resp) + } + return client.uploadHandleResponse(resp) +} + +// uploadCreateRequest creates the Upload request. +func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientUploadOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-blob-type"] = []string{"BlockBlob"} + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { + req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { + req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + } + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + req.Raw().Header["x-ms-meta-"+k] = []string{v} + } + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)} + } + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + } + if options != nil && options.LegalHold != nil { + req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, req.SetBody(body, "application/octet-stream") +} + +// uploadHandleResponse handles the Upload response. +func (client *BlockBlobClient) uploadHandleResponse(resp *http.Response) (BlockBlobClientUploadResponse, error) { + result := BlockBlobClientUploadResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientUploadResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientUploadResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientUploadResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlockBlobClientUploadResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go similarity index 84% rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_constants.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go index 2348df04a..74e6cf1e8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go @@ -5,15 +5,10 @@ // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. -package azblob +package generated -const ( - moduleName = "azblob" - moduleVersion = "v0.4.1" -) - -// AccessTier enum type AccessTier string const ( @@ -31,6 +26,7 @@ const ( AccessTierP60 AccessTier = "P60" AccessTierP70 AccessTier = "P70" AccessTierP80 AccessTier = "P80" + AccessTierPremium AccessTier = "Premium" ) // PossibleAccessTierValues returns the possible values for the AccessTier const type. @@ -50,15 +46,10 @@ func PossibleAccessTierValues() []AccessTier { AccessTierP60, AccessTierP70, AccessTierP80, + AccessTierPremium, } } -// ToPtr returns a *AccessTier pointing to the current value. -func (c AccessTier) ToPtr() *AccessTier { - return &c -} - -// AccountKind enum type AccountKind string const ( @@ -80,12 +71,6 @@ func PossibleAccountKindValues() []AccountKind { } } -// ToPtr returns a *AccountKind pointing to the current value. -func (c AccountKind) ToPtr() *AccountKind { - return &c -} - -// ArchiveStatus enum type ArchiveStatus string const ( @@ -101,36 +86,6 @@ func PossibleArchiveStatusValues() []ArchiveStatus { } } -// ToPtr returns a *ArchiveStatus pointing to the current value. -func (c ArchiveStatus) ToPtr() *ArchiveStatus { - return &c -} - -// BlobExpiryOptions enum -type BlobExpiryOptions string - -const ( - BlobExpiryOptionsAbsolute BlobExpiryOptions = "Absolute" - BlobExpiryOptionsNeverExpire BlobExpiryOptions = "NeverExpire" - BlobExpiryOptionsRelativeToCreation BlobExpiryOptions = "RelativeToCreation" - BlobExpiryOptionsRelativeToNow BlobExpiryOptions = "RelativeToNow" -) - -// PossibleBlobExpiryOptionsValues returns the possible values for the BlobExpiryOptions const type. -func PossibleBlobExpiryOptionsValues() []BlobExpiryOptions { - return []BlobExpiryOptions{ - BlobExpiryOptionsAbsolute, - BlobExpiryOptionsNeverExpire, - BlobExpiryOptionsRelativeToCreation, - BlobExpiryOptionsRelativeToNow, - } -} - -// ToPtr returns a *BlobExpiryOptions pointing to the current value. -func (c BlobExpiryOptions) ToPtr() *BlobExpiryOptions { - return &c -} - // BlobGeoReplicationStatus - The status of the secondary location type BlobGeoReplicationStatus string @@ -149,35 +104,6 @@ func PossibleBlobGeoReplicationStatusValues() []BlobGeoReplicationStatus { } } -// ToPtr returns a *BlobGeoReplicationStatus pointing to the current value. -func (c BlobGeoReplicationStatus) ToPtr() *BlobGeoReplicationStatus { - return &c -} - -// BlobImmutabilityPolicyMode enum -type BlobImmutabilityPolicyMode string - -const ( - BlobImmutabilityPolicyModeMutable BlobImmutabilityPolicyMode = "Mutable" - BlobImmutabilityPolicyModeUnlocked BlobImmutabilityPolicyMode = "Unlocked" - BlobImmutabilityPolicyModeLocked BlobImmutabilityPolicyMode = "Locked" -) - -// PossibleBlobImmutabilityPolicyModeValues returns the possible values for the BlobImmutabilityPolicyMode const type. -func PossibleBlobImmutabilityPolicyModeValues() []BlobImmutabilityPolicyMode { - return []BlobImmutabilityPolicyMode{ - BlobImmutabilityPolicyModeMutable, - BlobImmutabilityPolicyModeUnlocked, - BlobImmutabilityPolicyModeLocked, - } -} - -// ToPtr returns a *BlobImmutabilityPolicyMode pointing to the current value. -func (c BlobImmutabilityPolicyMode) ToPtr() *BlobImmutabilityPolicyMode { - return &c -} - -// BlobType enum type BlobType string const ( @@ -195,12 +121,6 @@ func PossibleBlobTypeValues() []BlobType { } } -// ToPtr returns a *BlobType pointing to the current value. -func (c BlobType) ToPtr() *BlobType { - return &c -} - -// BlockListType enum type BlockListType string const ( @@ -218,12 +138,6 @@ func PossibleBlockListTypeValues() []BlockListType { } } -// ToPtr returns a *BlockListType pointing to the current value. -func (c BlockListType) ToPtr() *BlockListType { - return &c -} - -// CopyStatusType enum type CopyStatusType string const ( @@ -243,12 +157,6 @@ func PossibleCopyStatusTypeValues() []CopyStatusType { } } -// ToPtr returns a *CopyStatusType pointing to the current value. -func (c CopyStatusType) ToPtr() *CopyStatusType { - return &c -} - -// DeleteSnapshotsOptionType enum type DeleteSnapshotsOptionType string const ( @@ -264,12 +172,21 @@ func PossibleDeleteSnapshotsOptionTypeValues() []DeleteSnapshotsOptionType { } } -// ToPtr returns a *DeleteSnapshotsOptionType pointing to the current value. -func (c DeleteSnapshotsOptionType) ToPtr() *DeleteSnapshotsOptionType { - return &c +type DeleteType string + +const ( + DeleteTypeNone DeleteType = "None" + DeleteTypePermanent DeleteType = "Permanent" +) + +// PossibleDeleteTypeValues returns the possible values for the DeleteType const type. +func PossibleDeleteTypeValues() []DeleteType { + return []DeleteType{ + DeleteTypeNone, + DeleteTypePermanent, + } } -// EncryptionAlgorithmType enum type EncryptionAlgorithmType string const ( @@ -285,12 +202,57 @@ func PossibleEncryptionAlgorithmTypeValues() []EncryptionAlgorithmType { } } -// ToPtr returns a *EncryptionAlgorithmType pointing to the current value. -func (c EncryptionAlgorithmType) ToPtr() *EncryptionAlgorithmType { - return &c +type ExpiryOptions string + +const ( + ExpiryOptionsAbsolute ExpiryOptions = "Absolute" + ExpiryOptionsNeverExpire ExpiryOptions = "NeverExpire" + ExpiryOptionsRelativeToCreation ExpiryOptions = "RelativeToCreation" + ExpiryOptionsRelativeToNow ExpiryOptions = "RelativeToNow" +) + +// PossibleExpiryOptionsValues returns the possible values for the ExpiryOptions const type. +func PossibleExpiryOptionsValues() []ExpiryOptions { + return []ExpiryOptions{ + ExpiryOptionsAbsolute, + ExpiryOptionsNeverExpire, + ExpiryOptionsRelativeToCreation, + ExpiryOptionsRelativeToNow, + } +} + +type ImmutabilityPolicyMode string + +const ( + ImmutabilityPolicyModeMutable ImmutabilityPolicyMode = "Mutable" + ImmutabilityPolicyModeUnlocked ImmutabilityPolicyMode = "Unlocked" + ImmutabilityPolicyModeLocked ImmutabilityPolicyMode = "Locked" +) + +// PossibleImmutabilityPolicyModeValues returns the possible values for the ImmutabilityPolicyMode const type. +func PossibleImmutabilityPolicyModeValues() []ImmutabilityPolicyMode { + return []ImmutabilityPolicyMode{ + ImmutabilityPolicyModeMutable, + ImmutabilityPolicyModeUnlocked, + ImmutabilityPolicyModeLocked, + } +} + +type ImmutabilityPolicySetting string + +const ( + ImmutabilityPolicySettingUnlocked ImmutabilityPolicySetting = "Unlocked" + ImmutabilityPolicySettingLocked ImmutabilityPolicySetting = "Locked" +) + +// PossibleImmutabilityPolicySettingValues returns the possible values for the ImmutabilityPolicySetting const type. +func PossibleImmutabilityPolicySettingValues() []ImmutabilityPolicySetting { + return []ImmutabilityPolicySetting{ + ImmutabilityPolicySettingUnlocked, + ImmutabilityPolicySettingLocked, + } } -// LeaseDurationType enum type LeaseDurationType string const ( @@ -306,12 +268,6 @@ func PossibleLeaseDurationTypeValues() []LeaseDurationType { } } -// ToPtr returns a *LeaseDurationType pointing to the current value. -func (c LeaseDurationType) ToPtr() *LeaseDurationType { - return &c -} - -// LeaseStateType enum type LeaseStateType string const ( @@ -333,12 +289,6 @@ func PossibleLeaseStateTypeValues() []LeaseStateType { } } -// ToPtr returns a *LeaseStateType pointing to the current value. -func (c LeaseStateType) ToPtr() *LeaseStateType { - return &c -} - -// LeaseStatusType enum type LeaseStatusType string const ( @@ -354,12 +304,6 @@ func PossibleLeaseStatusTypeValues() []LeaseStatusType { } } -// ToPtr returns a *LeaseStatusType pointing to the current value. -func (c LeaseStatusType) ToPtr() *LeaseStatusType { - return &c -} - -// ListBlobsIncludeItem enum type ListBlobsIncludeItem string const ( @@ -391,12 +335,6 @@ func PossibleListBlobsIncludeItemValues() []ListBlobsIncludeItem { } } -// ToPtr returns a *ListBlobsIncludeItem pointing to the current value. -func (c ListBlobsIncludeItem) ToPtr() *ListBlobsIncludeItem { - return &c -} - -// ListContainersIncludeType enum type ListContainersIncludeType string const ( @@ -414,12 +352,6 @@ func PossibleListContainersIncludeTypeValues() []ListContainersIncludeType { } } -// ToPtr returns a *ListContainersIncludeType pointing to the current value. -func (c ListContainersIncludeType) ToPtr() *ListContainersIncludeType { - return &c -} - -// PremiumPageBlobAccessTier enum type PremiumPageBlobAccessTier string const ( @@ -453,12 +385,6 @@ func PossiblePremiumPageBlobAccessTierValues() []PremiumPageBlobAccessTier { } } -// ToPtr returns a *PremiumPageBlobAccessTier pointing to the current value. -func (c PremiumPageBlobAccessTier) ToPtr() *PremiumPageBlobAccessTier { - return &c -} - -// PublicAccessType enum type PublicAccessType string const ( @@ -474,11 +400,6 @@ func PossiblePublicAccessTypeValues() []PublicAccessType { } } -// ToPtr returns a *PublicAccessType pointing to the current value. -func (c PublicAccessType) ToPtr() *PublicAccessType { - return &c -} - // QueryFormatType - The quick query format type. type QueryFormatType string @@ -499,11 +420,6 @@ func PossibleQueryFormatTypeValues() []QueryFormatType { } } -// ToPtr returns a *QueryFormatType pointing to the current value. -func (c QueryFormatType) ToPtr() *QueryFormatType { - return &c -} - // RehydratePriority - If an object is in rehydrate pending state then this header is returned with priority of rehydrate. // Valid values are High and Standard. type RehydratePriority string @@ -521,12 +437,6 @@ func PossibleRehydratePriorityValues() []RehydratePriority { } } -// ToPtr returns a *RehydratePriority pointing to the current value. -func (c RehydratePriority) ToPtr() *RehydratePriority { - return &c -} - -// SKUName enum type SKUName string const ( @@ -548,12 +458,6 @@ func PossibleSKUNameValues() []SKUName { } } -// ToPtr returns a *SKUName pointing to the current value. -func (c SKUName) ToPtr() *SKUName { - return &c -} - -// SequenceNumberActionType enum type SequenceNumberActionType string const ( @@ -571,11 +475,6 @@ func PossibleSequenceNumberActionTypeValues() []SequenceNumberActionType { } } -// ToPtr returns a *SequenceNumberActionType pointing to the current value. -func (c SequenceNumberActionType) ToPtr() *SequenceNumberActionType { - return &c -} - // StorageErrorCode - Error codes returned by the service type StorageErrorCode string @@ -616,7 +515,7 @@ const ( StorageErrorCodeEmptyMetadataKey StorageErrorCode = "EmptyMetadataKey" StorageErrorCodeFeatureVersionMismatch StorageErrorCode = "FeatureVersionMismatch" StorageErrorCodeIncrementalCopyBlobMismatch StorageErrorCode = "IncrementalCopyBlobMismatch" - StorageErrorCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed StorageErrorCode = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" + StorageErrorCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed StorageErrorCode = "IncrementalCopyOfEarlierVersionSnapshotNotAllowed" StorageErrorCodeIncrementalCopySourceMustBeSnapshot StorageErrorCode = "IncrementalCopySourceMustBeSnapshot" StorageErrorCodeInfiniteLeaseDurationRequired StorageErrorCode = "InfiniteLeaseDurationRequired" StorageErrorCodeInsufficientAccountPermissions StorageErrorCode = "InsufficientAccountPermissions" @@ -734,7 +633,7 @@ func PossibleStorageErrorCodeValues() []StorageErrorCode { StorageErrorCodeEmptyMetadataKey, StorageErrorCodeFeatureVersionMismatch, StorageErrorCodeIncrementalCopyBlobMismatch, - StorageErrorCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed, + StorageErrorCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed, StorageErrorCodeIncrementalCopySourceMustBeSnapshot, StorageErrorCodeInfiniteLeaseDurationRequired, StorageErrorCodeInsufficientAccountPermissions, @@ -813,29 +712,3 @@ func PossibleStorageErrorCodeValues() []StorageErrorCode { StorageErrorCodeUnsupportedXMLNode, } } - -// ToPtr returns a *StorageErrorCode pointing to the current value. -func (c StorageErrorCode) ToPtr() *StorageErrorCode { - return &c -} - -// BlobDeleteType enum -type BlobDeleteType string - -const ( - BlobDeleteTypeNone BlobDeleteType = "None" - BlobDeleteTypePermanent BlobDeleteType = "Permanent" -) - -// PossibleBlobDeleteTypeValues returns the possible values for the BlobDeleteType const type. -func PossibleBlobDeleteTypeValues() []BlobDeleteType { - return []BlobDeleteType{ - BlobDeleteTypeNone, - BlobDeleteTypePermanent, - } -} - -// ToPtr returns a *BlobDeleteType pointing to the current value. -func (c BlobDeleteType) ToPtr() *BlobDeleteType { - return &c -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_container_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go similarity index 50% rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_container_client.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go index c9245ce10..31e671f14 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_container_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go @@ -5,13 +5,15 @@ // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. -package azblob +package generated import ( "context" "encoding/xml" "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "io" @@ -21,16 +23,18 @@ import ( "time" ) -type containerClient struct { +// ContainerClient contains the methods for the Container group. +// Don't use this type directly, use NewContainerClient() instead. +type ContainerClient struct { endpoint string pl runtime.Pipeline } -// newContainerClient creates a new instance of containerClient with the specified values. +// NewContainerClient creates a new instance of ContainerClient with the specified values. // endpoint - The URL of the service account, container, or blob that is the target of the desired operation. // pl - the pipeline used for sending requests and handling responses. -func newContainerClient(endpoint string, pl runtime.Pipeline) *containerClient { - client := &containerClient{ +func NewContainerClient(endpoint string, pl runtime.Pipeline) *ContainerClient { + client := &ContainerClient{ endpoint: endpoint, pl: pl, } @@ -40,26 +44,26 @@ func newContainerClient(endpoint string, pl runtime.Pipeline) *containerClient { // AcquireLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 // to 60 seconds, or can be infinite // If the operation fails it returns an *azcore.ResponseError type. -// containerClientAcquireLeaseOptions - containerClientAcquireLeaseOptions contains the optional parameters for the containerClient.AcquireLease -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *containerClient) AcquireLease(ctx context.Context, containerClientAcquireLeaseOptions *containerClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (containerClientAcquireLeaseResponse, error) { - req, err := client.acquireLeaseCreateRequest(ctx, containerClientAcquireLeaseOptions, modifiedAccessConditions) +// Generated from API version 2020-10-02 +// options - ContainerClientAcquireLeaseOptions contains the optional parameters for the ContainerClient.AcquireLease method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) AcquireLease(ctx context.Context, options *ContainerClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientAcquireLeaseResponse, error) { + req, err := client.acquireLeaseCreateRequest(ctx, options, modifiedAccessConditions) if err != nil { - return containerClientAcquireLeaseResponse{}, err + return ContainerClientAcquireLeaseResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return containerClientAcquireLeaseResponse{}, err + return ContainerClientAcquireLeaseResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusCreated) { - return containerClientAcquireLeaseResponse{}, runtime.NewResponseError(resp) + return ContainerClientAcquireLeaseResponse{}, runtime.NewResponseError(resp) } return client.acquireLeaseHandleResponse(resp) } // acquireLeaseCreateRequest creates the AcquireLease request. -func (client *containerClient) acquireLeaseCreateRequest(ctx context.Context, containerClientAcquireLeaseOptions *containerClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *ContainerClient) acquireLeaseCreateRequest(ctx context.Context, options *ContainerClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -67,41 +71,41 @@ func (client *containerClient) acquireLeaseCreateRequest(ctx context.Context, co reqQP := req.Raw().URL.Query() reqQP.Set("comp", "lease") reqQP.Set("restype", "container") - if containerClientAcquireLeaseOptions != nil && containerClientAcquireLeaseOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientAcquireLeaseOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-lease-action", "acquire") - if containerClientAcquireLeaseOptions != nil && containerClientAcquireLeaseOptions.Duration != nil { - req.Raw().Header.Set("x-ms-lease-duration", strconv.FormatInt(int64(*containerClientAcquireLeaseOptions.Duration), 10)) + req.Raw().Header["x-ms-lease-action"] = []string{"acquire"} + if options != nil && options.Duration != nil { + req.Raw().Header["x-ms-lease-duration"] = []string{strconv.FormatInt(int64(*options.Duration), 10)} } - if containerClientAcquireLeaseOptions != nil && containerClientAcquireLeaseOptions.ProposedLeaseID != nil { - req.Raw().Header.Set("x-ms-proposed-lease-id", *containerClientAcquireLeaseOptions.ProposedLeaseID) + if options != nil && options.ProposedLeaseID != nil { + req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if containerClientAcquireLeaseOptions != nil && containerClientAcquireLeaseOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *containerClientAcquireLeaseOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // acquireLeaseHandleResponse handles the AcquireLease response. -func (client *containerClient) acquireLeaseHandleResponse(resp *http.Response) (containerClientAcquireLeaseResponse, error) { - result := containerClientAcquireLeaseResponse{RawResponse: resp} +func (client *ContainerClient) acquireLeaseHandleResponse(resp *http.Response) (ContainerClientAcquireLeaseResponse, error) { + result := ContainerClientAcquireLeaseResponse{} if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientAcquireLeaseResponse{}, err + return ContainerClientAcquireLeaseResponse{}, err } result.LastModified = &lastModified } @@ -120,7 +124,7 @@ func (client *containerClient) acquireLeaseHandleResponse(resp *http.Response) ( if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientAcquireLeaseResponse{}, err + return ContainerClientAcquireLeaseResponse{}, err } result.Date = &date } @@ -130,26 +134,26 @@ func (client *containerClient) acquireLeaseHandleResponse(resp *http.Response) ( // BreakLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 // to 60 seconds, or can be infinite // If the operation fails it returns an *azcore.ResponseError type. -// containerClientBreakLeaseOptions - containerClientBreakLeaseOptions contains the optional parameters for the containerClient.BreakLease -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *containerClient) BreakLease(ctx context.Context, containerClientBreakLeaseOptions *containerClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (containerClientBreakLeaseResponse, error) { - req, err := client.breakLeaseCreateRequest(ctx, containerClientBreakLeaseOptions, modifiedAccessConditions) +// Generated from API version 2020-10-02 +// options - ContainerClientBreakLeaseOptions contains the optional parameters for the ContainerClient.BreakLease method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) BreakLease(ctx context.Context, options *ContainerClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientBreakLeaseResponse, error) { + req, err := client.breakLeaseCreateRequest(ctx, options, modifiedAccessConditions) if err != nil { - return containerClientBreakLeaseResponse{}, err + return ContainerClientBreakLeaseResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return containerClientBreakLeaseResponse{}, err + return ContainerClientBreakLeaseResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return containerClientBreakLeaseResponse{}, runtime.NewResponseError(resp) + return ContainerClientBreakLeaseResponse{}, runtime.NewResponseError(resp) } return client.breakLeaseHandleResponse(resp) } // breakLeaseCreateRequest creates the BreakLease request. -func (client *containerClient) breakLeaseCreateRequest(ctx context.Context, containerClientBreakLeaseOptions *containerClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *ContainerClient) breakLeaseCreateRequest(ctx context.Context, options *ContainerClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -157,38 +161,38 @@ func (client *containerClient) breakLeaseCreateRequest(ctx context.Context, cont reqQP := req.Raw().URL.Query() reqQP.Set("comp", "lease") reqQP.Set("restype", "container") - if containerClientBreakLeaseOptions != nil && containerClientBreakLeaseOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientBreakLeaseOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-lease-action", "break") - if containerClientBreakLeaseOptions != nil && containerClientBreakLeaseOptions.BreakPeriod != nil { - req.Raw().Header.Set("x-ms-lease-break-period", strconv.FormatInt(int64(*containerClientBreakLeaseOptions.BreakPeriod), 10)) + req.Raw().Header["x-ms-lease-action"] = []string{"break"} + if options != nil && options.BreakPeriod != nil { + req.Raw().Header["x-ms-lease-break-period"] = []string{strconv.FormatInt(int64(*options.BreakPeriod), 10)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if containerClientBreakLeaseOptions != nil && containerClientBreakLeaseOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *containerClientBreakLeaseOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // breakLeaseHandleResponse handles the BreakLease response. -func (client *containerClient) breakLeaseHandleResponse(resp *http.Response) (containerClientBreakLeaseResponse, error) { - result := containerClientBreakLeaseResponse{RawResponse: resp} +func (client *ContainerClient) breakLeaseHandleResponse(resp *http.Response) (ContainerClientBreakLeaseResponse, error) { + result := ContainerClientBreakLeaseResponse{} if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientBreakLeaseResponse{}, err + return ContainerClientBreakLeaseResponse{}, err } result.LastModified = &lastModified } @@ -196,7 +200,7 @@ func (client *containerClient) breakLeaseHandleResponse(resp *http.Response) (co leaseTime32, err := strconv.ParseInt(val, 10, 32) leaseTime := int32(leaseTime32) if err != nil { - return containerClientBreakLeaseResponse{}, err + return ContainerClientBreakLeaseResponse{}, err } result.LeaseTime = &leaseTime } @@ -212,7 +216,7 @@ func (client *containerClient) breakLeaseHandleResponse(resp *http.Response) (co if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientBreakLeaseResponse{}, err + return ContainerClientBreakLeaseResponse{}, err } result.Date = &date } @@ -222,30 +226,30 @@ func (client *containerClient) breakLeaseHandleResponse(resp *http.Response) (co // ChangeLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 // to 60 seconds, or can be infinite // If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 // leaseID - Specifies the current lease ID on the resource. // proposedLeaseID - Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed // lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID // string formats. -// containerClientChangeLeaseOptions - containerClientChangeLeaseOptions contains the optional parameters for the containerClient.ChangeLease -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *containerClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, containerClientChangeLeaseOptions *containerClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (containerClientChangeLeaseResponse, error) { - req, err := client.changeLeaseCreateRequest(ctx, leaseID, proposedLeaseID, containerClientChangeLeaseOptions, modifiedAccessConditions) +// options - ContainerClientChangeLeaseOptions contains the optional parameters for the ContainerClient.ChangeLease method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, options *ContainerClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientChangeLeaseResponse, error) { + req, err := client.changeLeaseCreateRequest(ctx, leaseID, proposedLeaseID, options, modifiedAccessConditions) if err != nil { - return containerClientChangeLeaseResponse{}, err + return ContainerClientChangeLeaseResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return containerClientChangeLeaseResponse{}, err + return ContainerClientChangeLeaseResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return containerClientChangeLeaseResponse{}, runtime.NewResponseError(resp) + return ContainerClientChangeLeaseResponse{}, runtime.NewResponseError(resp) } return client.changeLeaseHandleResponse(resp) } // changeLeaseCreateRequest creates the ChangeLease request. -func (client *containerClient) changeLeaseCreateRequest(ctx context.Context, leaseID string, proposedLeaseID string, containerClientChangeLeaseOptions *containerClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *ContainerClient) changeLeaseCreateRequest(ctx context.Context, leaseID string, proposedLeaseID string, options *ContainerClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -253,37 +257,37 @@ func (client *containerClient) changeLeaseCreateRequest(ctx context.Context, lea reqQP := req.Raw().URL.Query() reqQP.Set("comp", "lease") reqQP.Set("restype", "container") - if containerClientChangeLeaseOptions != nil && containerClientChangeLeaseOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientChangeLeaseOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-lease-action", "change") - req.Raw().Header.Set("x-ms-lease-id", leaseID) - req.Raw().Header.Set("x-ms-proposed-lease-id", proposedLeaseID) + req.Raw().Header["x-ms-lease-action"] = []string{"change"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + req.Raw().Header["x-ms-proposed-lease-id"] = []string{proposedLeaseID} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if containerClientChangeLeaseOptions != nil && containerClientChangeLeaseOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *containerClientChangeLeaseOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // changeLeaseHandleResponse handles the ChangeLease response. -func (client *containerClient) changeLeaseHandleResponse(resp *http.Response) (containerClientChangeLeaseResponse, error) { - result := containerClientChangeLeaseResponse{RawResponse: resp} +func (client *ContainerClient) changeLeaseHandleResponse(resp *http.Response) (ContainerClientChangeLeaseResponse, error) { + result := ContainerClientChangeLeaseResponse{} if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientChangeLeaseResponse{}, err + return ContainerClientChangeLeaseResponse{}, err } result.LastModified = &lastModified } @@ -302,7 +306,7 @@ func (client *containerClient) changeLeaseHandleResponse(resp *http.Response) (c if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientChangeLeaseResponse{}, err + return ContainerClientChangeLeaseResponse{}, err } result.Date = &date } @@ -312,68 +316,68 @@ func (client *containerClient) changeLeaseHandleResponse(resp *http.Response) (c // Create - creates a new container under the specified account. If the container with the same name already exists, the operation // fails // If the operation fails it returns an *azcore.ResponseError type. -// containerClientCreateOptions - containerClientCreateOptions contains the optional parameters for the containerClient.Create -// method. -// ContainerCpkScopeInfo - ContainerCpkScopeInfo contains a group of parameters for the containerClient.Create method. -func (client *containerClient) Create(ctx context.Context, containerClientCreateOptions *containerClientCreateOptions, containerCpkScopeInfo *ContainerCpkScopeInfo) (containerClientCreateResponse, error) { - req, err := client.createCreateRequest(ctx, containerClientCreateOptions, containerCpkScopeInfo) +// Generated from API version 2020-10-02 +// options - ContainerClientCreateOptions contains the optional parameters for the ContainerClient.Create method. +// ContainerCpkScopeInfo - ContainerCpkScopeInfo contains a group of parameters for the ContainerClient.Create method. +func (client *ContainerClient) Create(ctx context.Context, options *ContainerClientCreateOptions, containerCpkScopeInfo *ContainerCpkScopeInfo) (ContainerClientCreateResponse, error) { + req, err := client.createCreateRequest(ctx, options, containerCpkScopeInfo) if err != nil { - return containerClientCreateResponse{}, err + return ContainerClientCreateResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return containerClientCreateResponse{}, err + return ContainerClientCreateResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusCreated) { - return containerClientCreateResponse{}, runtime.NewResponseError(resp) + return ContainerClientCreateResponse{}, runtime.NewResponseError(resp) } return client.createHandleResponse(resp) } // createCreateRequest creates the Create request. -func (client *containerClient) createCreateRequest(ctx context.Context, containerClientCreateOptions *containerClientCreateOptions, containerCpkScopeInfo *ContainerCpkScopeInfo) (*policy.Request, error) { +func (client *ContainerClient) createCreateRequest(ctx context.Context, options *ContainerClientCreateOptions, containerCpkScopeInfo *ContainerCpkScopeInfo) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("restype", "container") - if containerClientCreateOptions != nil && containerClientCreateOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientCreateOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - if containerClientCreateOptions != nil && containerClientCreateOptions.Metadata != nil { - for k, v := range containerClientCreateOptions.Metadata { - req.Raw().Header.Set("x-ms-meta-"+k, v) + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + req.Raw().Header["x-ms-meta-"+k] = []string{v} } } - if containerClientCreateOptions != nil && containerClientCreateOptions.Access != nil { - req.Raw().Header.Set("x-ms-blob-public-access", string(*containerClientCreateOptions.Access)) + if options != nil && options.Access != nil { + req.Raw().Header["x-ms-blob-public-access"] = []string{string(*options.Access)} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if containerClientCreateOptions != nil && containerClientCreateOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *containerClientCreateOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } if containerCpkScopeInfo != nil && containerCpkScopeInfo.DefaultEncryptionScope != nil { - req.Raw().Header.Set("x-ms-default-encryption-scope", *containerCpkScopeInfo.DefaultEncryptionScope) + req.Raw().Header["x-ms-default-encryption-scope"] = []string{*containerCpkScopeInfo.DefaultEncryptionScope} } if containerCpkScopeInfo != nil && containerCpkScopeInfo.PreventEncryptionScopeOverride != nil { - req.Raw().Header.Set("x-ms-deny-encryption-scope-override", strconv.FormatBool(*containerCpkScopeInfo.PreventEncryptionScopeOverride)) + req.Raw().Header["x-ms-deny-encryption-scope-override"] = []string{strconv.FormatBool(*containerCpkScopeInfo.PreventEncryptionScopeOverride)} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // createHandleResponse handles the Create response. -func (client *containerClient) createHandleResponse(resp *http.Response) (containerClientCreateResponse, error) { - result := containerClientCreateResponse{RawResponse: resp} +func (client *ContainerClient) createHandleResponse(resp *http.Response) (ContainerClientCreateResponse, error) { + result := ContainerClientCreateResponse{} if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientCreateResponse{}, err + return ContainerClientCreateResponse{}, err } result.LastModified = &lastModified } @@ -389,7 +393,7 @@ func (client *containerClient) createHandleResponse(resp *http.Response) (contai if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientCreateResponse{}, err + return ContainerClientCreateResponse{}, err } result.Date = &date } @@ -399,57 +403,57 @@ func (client *containerClient) createHandleResponse(resp *http.Response) (contai // Delete - operation marks the specified container for deletion. The container and any blobs contained within it are later // deleted during garbage collection // If the operation fails it returns an *azcore.ResponseError type. -// containerClientDeleteOptions - containerClientDeleteOptions contains the optional parameters for the containerClient.Delete -// method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *containerClient) Delete(ctx context.Context, containerClientDeleteOptions *containerClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (containerClientDeleteResponse, error) { - req, err := client.deleteCreateRequest(ctx, containerClientDeleteOptions, leaseAccessConditions, modifiedAccessConditions) +// Generated from API version 2020-10-02 +// options - ContainerClientDeleteOptions contains the optional parameters for the ContainerClient.Delete method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) Delete(ctx context.Context, options *ContainerClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientDeleteResponse, error) { + req, err := client.deleteCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { - return containerClientDeleteResponse{}, err + return ContainerClientDeleteResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return containerClientDeleteResponse{}, err + return ContainerClientDeleteResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return containerClientDeleteResponse{}, runtime.NewResponseError(resp) + return ContainerClientDeleteResponse{}, runtime.NewResponseError(resp) } return client.deleteHandleResponse(resp) } // deleteCreateRequest creates the Delete request. -func (client *containerClient) deleteCreateRequest(ctx context.Context, containerClientDeleteOptions *containerClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *ContainerClient) deleteCreateRequest(ctx context.Context, options *ContainerClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodDelete, client.endpoint) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("restype", "container") - if containerClientDeleteOptions != nil && containerClientDeleteOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientDeleteOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if containerClientDeleteOptions != nil && containerClientDeleteOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *containerClientDeleteOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // deleteHandleResponse handles the Delete response. -func (client *containerClient) deleteHandleResponse(resp *http.Response) (containerClientDeleteResponse, error) { - result := containerClientDeleteResponse{RawResponse: resp} +func (client *ContainerClient) deleteHandleResponse(resp *http.Response) (ContainerClientDeleteResponse, error) { + result := ContainerClientDeleteResponse{} if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } @@ -462,7 +466,7 @@ func (client *containerClient) deleteHandleResponse(resp *http.Response) (contai if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientDeleteResponse{}, err + return ContainerClientDeleteResponse{}, err } result.Date = &date } @@ -472,26 +476,27 @@ func (client *containerClient) deleteHandleResponse(resp *http.Response) (contai // GetAccessPolicy - gets the permissions for the specified container. The permissions indicate whether container data may // be accessed publicly. // If the operation fails it returns an *azcore.ResponseError type. -// containerClientGetAccessPolicyOptions - containerClientGetAccessPolicyOptions contains the optional parameters for the -// containerClient.GetAccessPolicy method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -func (client *containerClient) GetAccessPolicy(ctx context.Context, containerClientGetAccessPolicyOptions *containerClientGetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions) (containerClientGetAccessPolicyResponse, error) { - req, err := client.getAccessPolicyCreateRequest(ctx, containerClientGetAccessPolicyOptions, leaseAccessConditions) +// Generated from API version 2020-10-02 +// options - ContainerClientGetAccessPolicyOptions contains the optional parameters for the ContainerClient.GetAccessPolicy +// method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *ContainerClient) GetAccessPolicy(ctx context.Context, options *ContainerClientGetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions) (ContainerClientGetAccessPolicyResponse, error) { + req, err := client.getAccessPolicyCreateRequest(ctx, options, leaseAccessConditions) if err != nil { - return containerClientGetAccessPolicyResponse{}, err + return ContainerClientGetAccessPolicyResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return containerClientGetAccessPolicyResponse{}, err + return ContainerClientGetAccessPolicyResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return containerClientGetAccessPolicyResponse{}, runtime.NewResponseError(resp) + return ContainerClientGetAccessPolicyResponse{}, runtime.NewResponseError(resp) } return client.getAccessPolicyHandleResponse(resp) } // getAccessPolicyCreateRequest creates the GetAccessPolicy request. -func (client *containerClient) getAccessPolicyCreateRequest(ctx context.Context, containerClientGetAccessPolicyOptions *containerClientGetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { +func (client *ContainerClient) getAccessPolicyCreateRequest(ctx context.Context, options *ContainerClientGetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) if err != nil { return nil, err @@ -499,34 +504,34 @@ func (client *containerClient) getAccessPolicyCreateRequest(ctx context.Context, reqQP := req.Raw().URL.Query() reqQP.Set("restype", "container") reqQP.Set("comp", "acl") - if containerClientGetAccessPolicyOptions != nil && containerClientGetAccessPolicyOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientGetAccessPolicyOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if containerClientGetAccessPolicyOptions != nil && containerClientGetAccessPolicyOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *containerClientGetAccessPolicyOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // getAccessPolicyHandleResponse handles the GetAccessPolicy response. -func (client *containerClient) getAccessPolicyHandleResponse(resp *http.Response) (containerClientGetAccessPolicyResponse, error) { - result := containerClientGetAccessPolicyResponse{RawResponse: resp} +func (client *ContainerClient) getAccessPolicyHandleResponse(resp *http.Response) (ContainerClientGetAccessPolicyResponse, error) { + result := ContainerClientGetAccessPolicyResponse{} if val := resp.Header.Get("x-ms-blob-public-access"); val != "" { result.BlobPublicAccess = (*PublicAccessType)(&val) } if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientGetAccessPolicyResponse{}, err + return ContainerClientGetAccessPolicyResponse{}, err } result.LastModified = &lastModified } @@ -542,37 +547,38 @@ func (client *containerClient) getAccessPolicyHandleResponse(resp *http.Response if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientGetAccessPolicyResponse{}, err + return ContainerClientGetAccessPolicyResponse{}, err } result.Date = &date } if err := runtime.UnmarshalAsXML(resp, &result); err != nil { - return containerClientGetAccessPolicyResponse{}, err + return ContainerClientGetAccessPolicyResponse{}, err } return result, nil } // GetAccountInfo - Returns the sku name and account kind // If the operation fails it returns an *azcore.ResponseError type. -// options - containerClientGetAccountInfoOptions contains the optional parameters for the containerClient.GetAccountInfo +// Generated from API version 2020-10-02 +// options - ContainerClientGetAccountInfoOptions contains the optional parameters for the ContainerClient.GetAccountInfo // method. -func (client *containerClient) GetAccountInfo(ctx context.Context, options *containerClientGetAccountInfoOptions) (containerClientGetAccountInfoResponse, error) { +func (client *ContainerClient) GetAccountInfo(ctx context.Context, options *ContainerClientGetAccountInfoOptions) (ContainerClientGetAccountInfoResponse, error) { req, err := client.getAccountInfoCreateRequest(ctx, options) if err != nil { - return containerClientGetAccountInfoResponse{}, err + return ContainerClientGetAccountInfoResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return containerClientGetAccountInfoResponse{}, err + return ContainerClientGetAccountInfoResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return containerClientGetAccountInfoResponse{}, runtime.NewResponseError(resp) + return ContainerClientGetAccountInfoResponse{}, runtime.NewResponseError(resp) } return client.getAccountInfoHandleResponse(resp) } // getAccountInfoCreateRequest creates the GetAccountInfo request. -func (client *containerClient) getAccountInfoCreateRequest(ctx context.Context, options *containerClientGetAccountInfoOptions) (*policy.Request, error) { +func (client *ContainerClient) getAccountInfoCreateRequest(ctx context.Context, options *ContainerClientGetAccountInfoOptions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) if err != nil { return nil, err @@ -581,14 +587,14 @@ func (client *containerClient) getAccountInfoCreateRequest(ctx context.Context, reqQP.Set("restype", "account") reqQP.Set("comp", "properties") req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-version", "2020-10-02") - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // getAccountInfoHandleResponse handles the GetAccountInfo response. -func (client *containerClient) getAccountInfoHandleResponse(resp *http.Response) (containerClientGetAccountInfoResponse, error) { - result := containerClientGetAccountInfoResponse{RawResponse: resp} +func (client *ContainerClient) getAccountInfoHandleResponse(resp *http.Response) (ContainerClientGetAccountInfoResponse, error) { + result := ContainerClientGetAccountInfoResponse{} if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } @@ -601,7 +607,7 @@ func (client *containerClient) getAccountInfoHandleResponse(resp *http.Response) if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientGetAccountInfoResponse{}, err + return ContainerClientGetAccountInfoResponse{}, err } result.Date = &date } @@ -617,50 +623,50 @@ func (client *containerClient) getAccountInfoHandleResponse(resp *http.Response) // GetProperties - returns all user-defined metadata and system properties for the specified container. The data returned // does not include the container's list of blobs // If the operation fails it returns an *azcore.ResponseError type. -// containerClientGetPropertiesOptions - containerClientGetPropertiesOptions contains the optional parameters for the containerClient.GetProperties -// method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -func (client *containerClient) GetProperties(ctx context.Context, containerClientGetPropertiesOptions *containerClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (containerClientGetPropertiesResponse, error) { - req, err := client.getPropertiesCreateRequest(ctx, containerClientGetPropertiesOptions, leaseAccessConditions) +// Generated from API version 2020-10-02 +// options - ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *ContainerClient) GetProperties(ctx context.Context, options *ContainerClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (ContainerClientGetPropertiesResponse, error) { + req, err := client.getPropertiesCreateRequest(ctx, options, leaseAccessConditions) if err != nil { - return containerClientGetPropertiesResponse{}, err + return ContainerClientGetPropertiesResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return containerClientGetPropertiesResponse{}, err + return ContainerClientGetPropertiesResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return containerClientGetPropertiesResponse{}, runtime.NewResponseError(resp) + return ContainerClientGetPropertiesResponse{}, runtime.NewResponseError(resp) } return client.getPropertiesHandleResponse(resp) } // getPropertiesCreateRequest creates the GetProperties request. -func (client *containerClient) getPropertiesCreateRequest(ctx context.Context, containerClientGetPropertiesOptions *containerClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { +func (client *ContainerClient) getPropertiesCreateRequest(ctx context.Context, options *ContainerClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() reqQP.Set("restype", "container") - if containerClientGetPropertiesOptions != nil && containerClientGetPropertiesOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientGetPropertiesOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if containerClientGetPropertiesOptions != nil && containerClientGetPropertiesOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *containerClientGetPropertiesOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // getPropertiesHandleResponse handles the GetProperties response. -func (client *containerClient) getPropertiesHandleResponse(resp *http.Response) (containerClientGetPropertiesResponse, error) { - result := containerClientGetPropertiesResponse{RawResponse: resp} +func (client *ContainerClient) getPropertiesHandleResponse(resp *http.Response) (ContainerClientGetPropertiesResponse, error) { + result := ContainerClientGetPropertiesResponse{} for hh := range resp.Header { if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { if result.Metadata == nil { @@ -670,12 +676,12 @@ func (client *containerClient) getPropertiesHandleResponse(resp *http.Response) } } if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientGetPropertiesResponse{}, err + return ContainerClientGetPropertiesResponse{}, err } result.LastModified = &lastModified } @@ -700,7 +706,7 @@ func (client *containerClient) getPropertiesHandleResponse(resp *http.Response) if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientGetPropertiesResponse{}, err + return ContainerClientGetPropertiesResponse{}, err } result.Date = &date } @@ -710,14 +716,14 @@ func (client *containerClient) getPropertiesHandleResponse(resp *http.Response) if val := resp.Header.Get("x-ms-has-immutability-policy"); val != "" { hasImmutabilityPolicy, err := strconv.ParseBool(val) if err != nil { - return containerClientGetPropertiesResponse{}, err + return ContainerClientGetPropertiesResponse{}, err } result.HasImmutabilityPolicy = &hasImmutabilityPolicy } if val := resp.Header.Get("x-ms-has-legal-hold"); val != "" { hasLegalHold, err := strconv.ParseBool(val) if err != nil { - return containerClientGetPropertiesResponse{}, err + return ContainerClientGetPropertiesResponse{}, err } result.HasLegalHold = &hasLegalHold } @@ -727,38 +733,27 @@ func (client *containerClient) getPropertiesHandleResponse(resp *http.Response) if val := resp.Header.Get("x-ms-deny-encryption-scope-override"); val != "" { denyEncryptionScopeOverride, err := strconv.ParseBool(val) if err != nil { - return containerClientGetPropertiesResponse{}, err + return ContainerClientGetPropertiesResponse{}, err } result.DenyEncryptionScopeOverride = &denyEncryptionScopeOverride } if val := resp.Header.Get("x-ms-immutable-storage-with-versioning-enabled"); val != "" { isImmutableStorageWithVersioningEnabled, err := strconv.ParseBool(val) if err != nil { - return containerClientGetPropertiesResponse{}, err + return ContainerClientGetPropertiesResponse{}, err } result.IsImmutableStorageWithVersioningEnabled = &isImmutableStorageWithVersioningEnabled } return result, nil } -// ListBlobFlatSegment - [Update] The List Blobs operation returns a list of the blobs under the specified container +// NewListBlobFlatSegmentPager - [Update] The List Blobs operation returns a list of the blobs under the specified container // If the operation fails it returns an *azcore.ResponseError type. -// options - containerClientListBlobFlatSegmentOptions contains the optional parameters for the containerClient.ListBlobFlatSegment +// Generated from API version 2020-10-02 +// options - ContainerClientListBlobFlatSegmentOptions contains the optional parameters for the ContainerClient.ListBlobFlatSegment // method. -func (client *containerClient) ListBlobFlatSegment(options *containerClientListBlobFlatSegmentOptions) *containerClientListBlobFlatSegmentPager { - return &containerClientListBlobFlatSegmentPager{ - client: client, - requester: func(ctx context.Context) (*policy.Request, error) { - return client.listBlobFlatSegmentCreateRequest(ctx, options) - }, - advancer: func(ctx context.Context, resp containerClientListBlobFlatSegmentResponse) (*policy.Request, error) { - return runtime.NewRequest(ctx, http.MethodGet, *resp.ListBlobsFlatSegmentResponse.NextMarker) - }, - } -} - // listBlobFlatSegmentCreateRequest creates the ListBlobFlatSegment request. -func (client *containerClient) listBlobFlatSegmentCreateRequest(ctx context.Context, options *containerClientListBlobFlatSegmentOptions) (*policy.Request, error) { +func (client *ContainerClient) ListBlobFlatSegmentCreateRequest(ctx context.Context, options *ContainerClientListBlobFlatSegmentOptions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) if err != nil { return nil, err @@ -782,17 +777,17 @@ func (client *containerClient) listBlobFlatSegmentCreateRequest(ctx context.Cont reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-version", "2020-10-02") + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} if options != nil && options.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // listBlobFlatSegmentHandleResponse handles the ListBlobFlatSegment response. -func (client *containerClient) listBlobFlatSegmentHandleResponse(resp *http.Response) (containerClientListBlobFlatSegmentResponse, error) { - result := containerClientListBlobFlatSegmentResponse{RawResponse: resp} +func (client *ContainerClient) ListBlobFlatSegmentHandleResponse(resp *http.Response) (ContainerClientListBlobFlatSegmentResponse, error) { + result := ContainerClientListBlobFlatSegmentResponse{} if val := resp.Header.Get("Content-Type"); val != "" { result.ContentType = &val } @@ -808,37 +803,54 @@ func (client *containerClient) listBlobFlatSegmentHandleResponse(resp *http.Resp if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientListBlobFlatSegmentResponse{}, err + return ContainerClientListBlobFlatSegmentResponse{}, err } result.Date = &date } if err := runtime.UnmarshalAsXML(resp, &result.ListBlobsFlatSegmentResponse); err != nil { - return containerClientListBlobFlatSegmentResponse{}, err + return ContainerClientListBlobFlatSegmentResponse{}, err } return result, nil } -// ListBlobHierarchySegment - [Update] The List Blobs operation returns a list of the blobs under the specified container +// NewListBlobHierarchySegmentPager - [Update] The List Blobs operation returns a list of the blobs under the specified container // If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 // delimiter - When the request includes this parameter, the operation returns a BlobPrefix element in the response body that // acts as a placeholder for all blobs whose names begin with the same substring up to the // appearance of the delimiter character. The delimiter may be a single character or a string. -// options - containerClientListBlobHierarchySegmentOptions contains the optional parameters for the containerClient.ListBlobHierarchySegment +// options - ContainerClientListBlobHierarchySegmentOptions contains the optional parameters for the ContainerClient.ListBlobHierarchySegment // method. -func (client *containerClient) ListBlobHierarchySegment(delimiter string, options *containerClientListBlobHierarchySegmentOptions) *containerClientListBlobHierarchySegmentPager { - return &containerClientListBlobHierarchySegmentPager{ - client: client, - requester: func(ctx context.Context) (*policy.Request, error) { - return client.listBlobHierarchySegmentCreateRequest(ctx, delimiter, options) +func (client *ContainerClient) NewListBlobHierarchySegmentPager(delimiter string, options *ContainerClientListBlobHierarchySegmentOptions) *runtime.Pager[ContainerClientListBlobHierarchySegmentResponse] { + return runtime.NewPager(runtime.PagingHandler[ContainerClientListBlobHierarchySegmentResponse]{ + More: func(page ContainerClientListBlobHierarchySegmentResponse) bool { + return page.NextMarker != nil && len(*page.NextMarker) > 0 }, - advancer: func(ctx context.Context, resp containerClientListBlobHierarchySegmentResponse) (*policy.Request, error) { - return runtime.NewRequest(ctx, http.MethodGet, *resp.ListBlobsHierarchySegmentResponse.NextMarker) + Fetcher: func(ctx context.Context, page *ContainerClientListBlobHierarchySegmentResponse) (ContainerClientListBlobHierarchySegmentResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.ListBlobHierarchySegmentCreateRequest(ctx, delimiter, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextMarker) + } + if err != nil { + return ContainerClientListBlobHierarchySegmentResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ContainerClientListBlobHierarchySegmentResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ContainerClientListBlobHierarchySegmentResponse{}, runtime.NewResponseError(resp) + } + return client.ListBlobHierarchySegmentHandleResponse(resp) }, - } + }) } -// listBlobHierarchySegmentCreateRequest creates the ListBlobHierarchySegment request. -func (client *containerClient) listBlobHierarchySegmentCreateRequest(ctx context.Context, delimiter string, options *containerClientListBlobHierarchySegmentOptions) (*policy.Request, error) { +// ListBlobHierarchySegmentCreateRequest creates the ListBlobHierarchySegment request. +func (client *ContainerClient) ListBlobHierarchySegmentCreateRequest(ctx context.Context, delimiter string, options *ContainerClientListBlobHierarchySegmentOptions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) if err != nil { return nil, err @@ -863,17 +875,17 @@ func (client *containerClient) listBlobHierarchySegmentCreateRequest(ctx context reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-version", "2020-10-02") + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} if options != nil && options.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } -// listBlobHierarchySegmentHandleResponse handles the ListBlobHierarchySegment response. -func (client *containerClient) listBlobHierarchySegmentHandleResponse(resp *http.Response) (containerClientListBlobHierarchySegmentResponse, error) { - result := containerClientListBlobHierarchySegmentResponse{RawResponse: resp} +// ListBlobHierarchySegmentHandleResponse handles the ListBlobHierarchySegment response. +func (client *ContainerClient) ListBlobHierarchySegmentHandleResponse(resp *http.Response) (ContainerClientListBlobHierarchySegmentResponse, error) { + result := ContainerClientListBlobHierarchySegmentResponse{} if val := resp.Header.Get("Content-Type"); val != "" { result.ContentType = &val } @@ -889,12 +901,12 @@ func (client *containerClient) listBlobHierarchySegmentHandleResponse(resp *http if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientListBlobHierarchySegmentResponse{}, err + return ContainerClientListBlobHierarchySegmentResponse{}, err } result.Date = &date } if err := runtime.UnmarshalAsXML(resp, &result.ListBlobsHierarchySegmentResponse); err != nil { - return containerClientListBlobHierarchySegmentResponse{}, err + return ContainerClientListBlobHierarchySegmentResponse{}, err } return result, nil } @@ -902,27 +914,27 @@ func (client *containerClient) listBlobHierarchySegmentHandleResponse(resp *http // ReleaseLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 // to 60 seconds, or can be infinite // If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 // leaseID - Specifies the current lease ID on the resource. -// containerClientReleaseLeaseOptions - containerClientReleaseLeaseOptions contains the optional parameters for the containerClient.ReleaseLease -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *containerClient) ReleaseLease(ctx context.Context, leaseID string, containerClientReleaseLeaseOptions *containerClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (containerClientReleaseLeaseResponse, error) { - req, err := client.releaseLeaseCreateRequest(ctx, leaseID, containerClientReleaseLeaseOptions, modifiedAccessConditions) +// options - ContainerClientReleaseLeaseOptions contains the optional parameters for the ContainerClient.ReleaseLease method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) ReleaseLease(ctx context.Context, leaseID string, options *ContainerClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientReleaseLeaseResponse, error) { + req, err := client.releaseLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) if err != nil { - return containerClientReleaseLeaseResponse{}, err + return ContainerClientReleaseLeaseResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return containerClientReleaseLeaseResponse{}, err + return ContainerClientReleaseLeaseResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return containerClientReleaseLeaseResponse{}, runtime.NewResponseError(resp) + return ContainerClientReleaseLeaseResponse{}, runtime.NewResponseError(resp) } return client.releaseLeaseHandleResponse(resp) } // releaseLeaseCreateRequest creates the ReleaseLease request. -func (client *containerClient) releaseLeaseCreateRequest(ctx context.Context, leaseID string, containerClientReleaseLeaseOptions *containerClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *ContainerClient) releaseLeaseCreateRequest(ctx context.Context, leaseID string, options *ContainerClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -930,36 +942,36 @@ func (client *containerClient) releaseLeaseCreateRequest(ctx context.Context, le reqQP := req.Raw().URL.Query() reqQP.Set("comp", "lease") reqQP.Set("restype", "container") - if containerClientReleaseLeaseOptions != nil && containerClientReleaseLeaseOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientReleaseLeaseOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-lease-action", "release") - req.Raw().Header.Set("x-ms-lease-id", leaseID) + req.Raw().Header["x-ms-lease-action"] = []string{"release"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if containerClientReleaseLeaseOptions != nil && containerClientReleaseLeaseOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *containerClientReleaseLeaseOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // releaseLeaseHandleResponse handles the ReleaseLease response. -func (client *containerClient) releaseLeaseHandleResponse(resp *http.Response) (containerClientReleaseLeaseResponse, error) { - result := containerClientReleaseLeaseResponse{RawResponse: resp} +func (client *ContainerClient) releaseLeaseHandleResponse(resp *http.Response) (ContainerClientReleaseLeaseResponse, error) { + result := ContainerClientReleaseLeaseResponse{} if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientReleaseLeaseResponse{}, err + return ContainerClientReleaseLeaseResponse{}, err } result.LastModified = &lastModified } @@ -975,7 +987,7 @@ func (client *containerClient) releaseLeaseHandleResponse(resp *http.Response) ( if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientReleaseLeaseResponse{}, err + return ContainerClientReleaseLeaseResponse{}, err } result.Date = &date } @@ -984,25 +996,26 @@ func (client *containerClient) releaseLeaseHandleResponse(resp *http.Response) ( // Rename - Renames an existing container. // If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 // sourceContainerName - Required. Specifies the name of the container to rename. -// options - containerClientRenameOptions contains the optional parameters for the containerClient.Rename method. -func (client *containerClient) Rename(ctx context.Context, sourceContainerName string, options *containerClientRenameOptions) (containerClientRenameResponse, error) { +// options - ContainerClientRenameOptions contains the optional parameters for the ContainerClient.Rename method. +func (client *ContainerClient) Rename(ctx context.Context, sourceContainerName string, options *ContainerClientRenameOptions) (ContainerClientRenameResponse, error) { req, err := client.renameCreateRequest(ctx, sourceContainerName, options) if err != nil { - return containerClientRenameResponse{}, err + return ContainerClientRenameResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return containerClientRenameResponse{}, err + return ContainerClientRenameResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return containerClientRenameResponse{}, runtime.NewResponseError(resp) + return ContainerClientRenameResponse{}, runtime.NewResponseError(resp) } return client.renameHandleResponse(resp) } // renameCreateRequest creates the Rename request. -func (client *containerClient) renameCreateRequest(ctx context.Context, sourceContainerName string, options *containerClientRenameOptions) (*policy.Request, error) { +func (client *ContainerClient) renameCreateRequest(ctx context.Context, sourceContainerName string, options *ContainerClientRenameOptions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -1014,21 +1027,21 @@ func (client *containerClient) renameCreateRequest(ctx context.Context, sourceCo reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-version", "2020-10-02") + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} if options != nil && options.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("x-ms-source-container-name", sourceContainerName) + req.Raw().Header["x-ms-source-container-name"] = []string{sourceContainerName} if options != nil && options.SourceLeaseID != nil { - req.Raw().Header.Set("x-ms-source-lease-id", *options.SourceLeaseID) + req.Raw().Header["x-ms-source-lease-id"] = []string{*options.SourceLeaseID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // renameHandleResponse handles the Rename response. -func (client *containerClient) renameHandleResponse(resp *http.Response) (containerClientRenameResponse, error) { - result := containerClientRenameResponse{RawResponse: resp} +func (client *ContainerClient) renameHandleResponse(resp *http.Response) (ContainerClientRenameResponse, error) { + result := ContainerClientRenameResponse{} if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } @@ -1041,7 +1054,7 @@ func (client *containerClient) renameHandleResponse(resp *http.Response) (contai if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientRenameResponse{}, err + return ContainerClientRenameResponse{}, err } result.Date = &date } @@ -1051,27 +1064,27 @@ func (client *containerClient) renameHandleResponse(resp *http.Response) (contai // RenewLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 // to 60 seconds, or can be infinite // If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 // leaseID - Specifies the current lease ID on the resource. -// containerClientRenewLeaseOptions - containerClientRenewLeaseOptions contains the optional parameters for the containerClient.RenewLease -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *containerClient) RenewLease(ctx context.Context, leaseID string, containerClientRenewLeaseOptions *containerClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (containerClientRenewLeaseResponse, error) { - req, err := client.renewLeaseCreateRequest(ctx, leaseID, containerClientRenewLeaseOptions, modifiedAccessConditions) +// options - ContainerClientRenewLeaseOptions contains the optional parameters for the ContainerClient.RenewLease method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) RenewLease(ctx context.Context, leaseID string, options *ContainerClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientRenewLeaseResponse, error) { + req, err := client.renewLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) if err != nil { - return containerClientRenewLeaseResponse{}, err + return ContainerClientRenewLeaseResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return containerClientRenewLeaseResponse{}, err + return ContainerClientRenewLeaseResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return containerClientRenewLeaseResponse{}, runtime.NewResponseError(resp) + return ContainerClientRenewLeaseResponse{}, runtime.NewResponseError(resp) } return client.renewLeaseHandleResponse(resp) } // renewLeaseCreateRequest creates the RenewLease request. -func (client *containerClient) renewLeaseCreateRequest(ctx context.Context, leaseID string, containerClientRenewLeaseOptions *containerClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *ContainerClient) renewLeaseCreateRequest(ctx context.Context, leaseID string, options *ContainerClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -1079,36 +1092,36 @@ func (client *containerClient) renewLeaseCreateRequest(ctx context.Context, leas reqQP := req.Raw().URL.Query() reqQP.Set("comp", "lease") reqQP.Set("restype", "container") - if containerClientRenewLeaseOptions != nil && containerClientRenewLeaseOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientRenewLeaseOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-lease-action", "renew") - req.Raw().Header.Set("x-ms-lease-id", leaseID) + req.Raw().Header["x-ms-lease-action"] = []string{"renew"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if containerClientRenewLeaseOptions != nil && containerClientRenewLeaseOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *containerClientRenewLeaseOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // renewLeaseHandleResponse handles the RenewLease response. -func (client *containerClient) renewLeaseHandleResponse(resp *http.Response) (containerClientRenewLeaseResponse, error) { - result := containerClientRenewLeaseResponse{RawResponse: resp} +func (client *ContainerClient) renewLeaseHandleResponse(resp *http.Response) (ContainerClientRenewLeaseResponse, error) { + result := ContainerClientRenewLeaseResponse{} if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientRenewLeaseResponse{}, err + return ContainerClientRenewLeaseResponse{}, err } result.LastModified = &lastModified } @@ -1127,7 +1140,7 @@ func (client *containerClient) renewLeaseHandleResponse(resp *http.Response) (co if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientRenewLeaseResponse{}, err + return ContainerClientRenewLeaseResponse{}, err } result.Date = &date } @@ -1136,24 +1149,25 @@ func (client *containerClient) renewLeaseHandleResponse(resp *http.Response) (co // Restore - Restores a previously-deleted container. // If the operation fails it returns an *azcore.ResponseError type. -// options - containerClientRestoreOptions contains the optional parameters for the containerClient.Restore method. -func (client *containerClient) Restore(ctx context.Context, options *containerClientRestoreOptions) (containerClientRestoreResponse, error) { +// Generated from API version 2020-10-02 +// options - ContainerClientRestoreOptions contains the optional parameters for the ContainerClient.Restore method. +func (client *ContainerClient) Restore(ctx context.Context, options *ContainerClientRestoreOptions) (ContainerClientRestoreResponse, error) { req, err := client.restoreCreateRequest(ctx, options) if err != nil { - return containerClientRestoreResponse{}, err + return ContainerClientRestoreResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return containerClientRestoreResponse{}, err + return ContainerClientRestoreResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusCreated) { - return containerClientRestoreResponse{}, runtime.NewResponseError(resp) + return ContainerClientRestoreResponse{}, runtime.NewResponseError(resp) } return client.restoreHandleResponse(resp) } // restoreCreateRequest creates the Restore request. -func (client *containerClient) restoreCreateRequest(ctx context.Context, options *containerClientRestoreOptions) (*policy.Request, error) { +func (client *ContainerClient) restoreCreateRequest(ctx context.Context, options *ContainerClientRestoreOptions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -1165,23 +1179,23 @@ func (client *containerClient) restoreCreateRequest(ctx context.Context, options reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-version", "2020-10-02") + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} if options != nil && options.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } if options != nil && options.DeletedContainerName != nil { - req.Raw().Header.Set("x-ms-deleted-container-name", *options.DeletedContainerName) + req.Raw().Header["x-ms-deleted-container-name"] = []string{*options.DeletedContainerName} } if options != nil && options.DeletedContainerVersion != nil { - req.Raw().Header.Set("x-ms-deleted-container-version", *options.DeletedContainerVersion) + req.Raw().Header["x-ms-deleted-container-version"] = []string{*options.DeletedContainerVersion} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // restoreHandleResponse handles the Restore response. -func (client *containerClient) restoreHandleResponse(resp *http.Response) (containerClientRestoreResponse, error) { - result := containerClientRestoreResponse{RawResponse: resp} +func (client *ContainerClient) restoreHandleResponse(resp *http.Response) (ContainerClientRestoreResponse, error) { + result := ContainerClientRestoreResponse{} if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } @@ -1194,7 +1208,7 @@ func (client *containerClient) restoreHandleResponse(resp *http.Response) (conta if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientRestoreResponse{}, err + return ContainerClientRestoreResponse{}, err } result.Date = &date } @@ -1204,27 +1218,29 @@ func (client *containerClient) restoreHandleResponse(resp *http.Response) (conta // SetAccessPolicy - sets the permissions for the specified container. The permissions indicate whether blobs in a container // may be accessed publicly. // If the operation fails it returns an *azcore.ResponseError type. -// containerClientSetAccessPolicyOptions - containerClientSetAccessPolicyOptions contains the optional parameters for the -// containerClient.SetAccessPolicy method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *containerClient) SetAccessPolicy(ctx context.Context, containerClientSetAccessPolicyOptions *containerClientSetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (containerClientSetAccessPolicyResponse, error) { - req, err := client.setAccessPolicyCreateRequest(ctx, containerClientSetAccessPolicyOptions, leaseAccessConditions, modifiedAccessConditions) +// Generated from API version 2020-10-02 +// containerACL - the acls for the container +// options - ContainerClientSetAccessPolicyOptions contains the optional parameters for the ContainerClient.SetAccessPolicy +// method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) SetAccessPolicy(ctx context.Context, containerACL []*SignedIdentifier, options *ContainerClientSetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientSetAccessPolicyResponse, error) { + req, err := client.setAccessPolicyCreateRequest(ctx, containerACL, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { - return containerClientSetAccessPolicyResponse{}, err + return ContainerClientSetAccessPolicyResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return containerClientSetAccessPolicyResponse{}, err + return ContainerClientSetAccessPolicyResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return containerClientSetAccessPolicyResponse{}, runtime.NewResponseError(resp) + return ContainerClientSetAccessPolicyResponse{}, runtime.NewResponseError(resp) } return client.setAccessPolicyHandleResponse(resp) } // setAccessPolicyCreateRequest creates the SetAccessPolicy request. -func (client *containerClient) setAccessPolicyCreateRequest(ctx context.Context, containerClientSetAccessPolicyOptions *containerClientSetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *ContainerClient) setAccessPolicyCreateRequest(ctx context.Context, containerACL []*SignedIdentifier, options *ContainerClientSetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -1232,47 +1248,44 @@ func (client *containerClient) setAccessPolicyCreateRequest(ctx context.Context, reqQP := req.Raw().URL.Query() reqQP.Set("restype", "container") reqQP.Set("comp", "acl") - if containerClientSetAccessPolicyOptions != nil && containerClientSetAccessPolicyOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientSetAccessPolicyOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - if containerClientSetAccessPolicyOptions != nil && containerClientSetAccessPolicyOptions.Access != nil { - req.Raw().Header.Set("x-ms-blob-public-access", string(*containerClientSetAccessPolicyOptions.Access)) + if options != nil && options.Access != nil { + req.Raw().Header["x-ms-blob-public-access"] = []string{string(*options.Access)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if containerClientSetAccessPolicyOptions != nil && containerClientSetAccessPolicyOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *containerClientSetAccessPolicyOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} type wrapper struct { XMLName xml.Name `xml:"SignedIdentifiers"` ContainerACL *[]*SignedIdentifier `xml:"SignedIdentifier"` } - if containerClientSetAccessPolicyOptions != nil && containerClientSetAccessPolicyOptions.ContainerACL != nil { - return req, runtime.MarshalAsXML(req, wrapper{ContainerACL: &containerClientSetAccessPolicyOptions.ContainerACL}) - } - return req, nil + return req, runtime.MarshalAsXML(req, wrapper{ContainerACL: &containerACL}) } // setAccessPolicyHandleResponse handles the SetAccessPolicy response. -func (client *containerClient) setAccessPolicyHandleResponse(resp *http.Response) (containerClientSetAccessPolicyResponse, error) { - result := containerClientSetAccessPolicyResponse{RawResponse: resp} +func (client *ContainerClient) setAccessPolicyHandleResponse(resp *http.Response) (ContainerClientSetAccessPolicyResponse, error) { + result := ContainerClientSetAccessPolicyResponse{} if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientSetAccessPolicyResponse{}, err + return ContainerClientSetAccessPolicyResponse{}, err } result.LastModified = &lastModified } @@ -1288,7 +1301,7 @@ func (client *containerClient) setAccessPolicyHandleResponse(resp *http.Response if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientSetAccessPolicyResponse{}, err + return ContainerClientSetAccessPolicyResponse{}, err } result.Date = &date } @@ -1297,27 +1310,27 @@ func (client *containerClient) setAccessPolicyHandleResponse(resp *http.Response // SetMetadata - operation sets one or more user-defined name-value pairs for the specified container. // If the operation fails it returns an *azcore.ResponseError type. -// containerClientSetMetadataOptions - containerClientSetMetadataOptions contains the optional parameters for the containerClient.SetMetadata -// method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *containerClient) SetMetadata(ctx context.Context, containerClientSetMetadataOptions *containerClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (containerClientSetMetadataResponse, error) { - req, err := client.setMetadataCreateRequest(ctx, containerClientSetMetadataOptions, leaseAccessConditions, modifiedAccessConditions) +// Generated from API version 2020-10-02 +// options - ContainerClientSetMetadataOptions contains the optional parameters for the ContainerClient.SetMetadata method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) SetMetadata(ctx context.Context, options *ContainerClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientSetMetadataResponse, error) { + req, err := client.setMetadataCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { - return containerClientSetMetadataResponse{}, err + return ContainerClientSetMetadataResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return containerClientSetMetadataResponse{}, err + return ContainerClientSetMetadataResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return containerClientSetMetadataResponse{}, runtime.NewResponseError(resp) + return ContainerClientSetMetadataResponse{}, runtime.NewResponseError(resp) } return client.setMetadataHandleResponse(resp) } // setMetadataCreateRequest creates the SetMetadata request. -func (client *containerClient) setMetadataCreateRequest(ctx context.Context, containerClientSetMetadataOptions *containerClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *ContainerClient) setMetadataCreateRequest(ctx context.Context, options *ContainerClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -1325,39 +1338,39 @@ func (client *containerClient) setMetadataCreateRequest(ctx context.Context, con reqQP := req.Raw().URL.Query() reqQP.Set("restype", "container") reqQP.Set("comp", "metadata") - if containerClientSetMetadataOptions != nil && containerClientSetMetadataOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientSetMetadataOptions.Timeout), 10)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - if containerClientSetMetadataOptions != nil && containerClientSetMetadataOptions.Metadata != nil { - for k, v := range containerClientSetMetadataOptions.Metadata { - req.Raw().Header.Set("x-ms-meta-"+k, v) + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + req.Raw().Header["x-ms-meta-"+k] = []string{v} } } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if containerClientSetMetadataOptions != nil && containerClientSetMetadataOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *containerClientSetMetadataOptions.RequestID) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // setMetadataHandleResponse handles the SetMetadata response. -func (client *containerClient) setMetadataHandleResponse(resp *http.Response) (containerClientSetMetadataResponse, error) { - result := containerClientSetMetadataResponse{RawResponse: resp} +func (client *ContainerClient) setMetadataHandleResponse(resp *http.Response) (ContainerClientSetMetadataResponse, error) { + result := ContainerClientSetMetadataResponse{} if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientSetMetadataResponse{}, err + return ContainerClientSetMetadataResponse{}, err } result.LastModified = &lastModified } @@ -1373,7 +1386,7 @@ func (client *containerClient) setMetadataHandleResponse(resp *http.Response) (c if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return containerClientSetMetadataResponse{}, err + return ContainerClientSetMetadataResponse{}, err } result.Date = &date } @@ -1382,28 +1395,29 @@ func (client *containerClient) setMetadataHandleResponse(resp *http.Response) (c // SubmitBatch - The Batch operation allows multiple API calls to be embedded into a single HTTP request. // If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 // contentLength - The length of the request. // multipartContentType - Required. The value of this header must be multipart/mixed with a batch boundary. Example header // value: multipart/mixed; boundary=batch_ // body - Initial data -// options - containerClientSubmitBatchOptions contains the optional parameters for the containerClient.SubmitBatch method. -func (client *containerClient) SubmitBatch(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *containerClientSubmitBatchOptions) (containerClientSubmitBatchResponse, error) { +// options - ContainerClientSubmitBatchOptions contains the optional parameters for the ContainerClient.SubmitBatch method. +func (client *ContainerClient) SubmitBatch(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *ContainerClientSubmitBatchOptions) (ContainerClientSubmitBatchResponse, error) { req, err := client.submitBatchCreateRequest(ctx, contentLength, multipartContentType, body, options) if err != nil { - return containerClientSubmitBatchResponse{}, err + return ContainerClientSubmitBatchResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return containerClientSubmitBatchResponse{}, err + return ContainerClientSubmitBatchResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return containerClientSubmitBatchResponse{}, runtime.NewResponseError(resp) + return ContainerClientSubmitBatchResponse{}, runtime.NewResponseError(resp) } return client.submitBatchHandleResponse(resp) } // submitBatchCreateRequest creates the SubmitBatch request. -func (client *containerClient) submitBatchCreateRequest(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *containerClientSubmitBatchOptions) (*policy.Request, error) { +func (client *ContainerClient) submitBatchCreateRequest(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *ContainerClientSubmitBatchOptions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint) if err != nil { return nil, err @@ -1416,19 +1430,19 @@ func (client *containerClient) submitBatchCreateRequest(ctx context.Context, con } req.Raw().URL.RawQuery = reqQP.Encode() runtime.SkipBodyDownload(req) - req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) - req.Raw().Header.Set("Content-Type", multipartContentType) - req.Raw().Header.Set("x-ms-version", "2020-10-02") + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + req.Raw().Header["Content-Type"] = []string{multipartContentType} + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} if options != nil && options.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") - return req, runtime.MarshalAsXML(req, body) + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, req.SetBody(body, "application/xml") } // submitBatchHandleResponse handles the SubmitBatch response. -func (client *containerClient) submitBatchHandleResponse(resp *http.Response) (containerClientSubmitBatchResponse, error) { - result := containerClientSubmitBatchResponse{RawResponse: resp} +func (client *ContainerClient) submitBatchHandleResponse(resp *http.Response) (ContainerClientSubmitBatchResponse, error) { + result := ContainerClientSubmitBatchResponse{Body: resp.Body} if val := resp.Header.Get("Content-Type"); val != "" { result.ContentType = &val } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go similarity index 75% rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_models.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go index d40d63b1b..f8df7338b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go @@ -5,11 +5,12 @@ // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. -package azblob +package generated import ( - "encoding/xml" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" "time" ) @@ -25,40 +26,77 @@ type AccessPolicy struct { Start *time.Time `xml:"Start"` } -// MarshalXML implements the xml.Marshaller interface for type AccessPolicy. -func (a AccessPolicy) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - type alias AccessPolicy - aux := &struct { - *alias - Expiry *timeRFC3339 `xml:"Expiry"` - Start *timeRFC3339 `xml:"Start"` - }{ - alias: (*alias)(&a), - Expiry: (*timeRFC3339)(a.Expiry), - Start: (*timeRFC3339)(a.Start), - } - return e.EncodeElement(aux, start) -} - -// UnmarshalXML implements the xml.Unmarshaller interface for type AccessPolicy. -func (a *AccessPolicy) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - type alias AccessPolicy - aux := &struct { - *alias - Expiry *timeRFC3339 `xml:"Expiry"` - Start *timeRFC3339 `xml:"Start"` - }{ - alias: (*alias)(a), - } - if err := d.DecodeElement(aux, &start); err != nil { - return err - } - a.Expiry = (*time.Time)(aux.Expiry) - a.Start = (*time.Time)(aux.Start) - return nil -} - -// AppendPositionAccessConditions contains a group of parameters for the appendBlobClient.AppendBlock method. +// AppendBlobClientAppendBlockFromURLOptions contains the optional parameters for the AppendBlobClient.AppendBlockFromURL +// method. +type AppendBlobClientAppendBlockFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentcrc64 []byte + // Bytes of source data in the specified range. + SourceRange *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// AppendBlobClientAppendBlockOptions contains the optional parameters for the AppendBlobClient.AppendBlock method. +type AppendBlobClientAppendBlockOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// AppendBlobClientCreateOptions contains the optional parameters for the AppendBlobClient.Create method. +type AppendBlobClientCreateOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + // Specified if a legal hold should be set on the blob. + LegalHold *bool + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// AppendBlobClientSealOptions contains the optional parameters for the AppendBlobClient.Seal method. +type AppendBlobClientSealOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock method. type AppendPositionAccessConditions struct { // Optional conditional header, used only for the Append Block operation. A number indicating the byte offset to compare. // Append Block will succeed only if the append position is equal to this number. If @@ -77,21 +115,6 @@ type ArrowConfiguration struct { Schema []*ArrowField `xml:"Schema>Field"` } -// MarshalXML implements the xml.Marshaller interface for type ArrowConfiguration. -func (a ArrowConfiguration) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - type alias ArrowConfiguration - aux := &struct { - *alias - Schema *[]*ArrowField `xml:"Schema>Field"` - }{ - alias: (*alias)(&a), - } - if a.Schema != nil { - aux.Schema = &a.Schema - } - return e.EncodeElement(aux, start) -} - // ArrowField - Groups settings regarding specific field of an arrow schema type ArrowField struct { // REQUIRED @@ -101,127 +124,449 @@ type ArrowField struct { Scale *int32 `xml:"Scale"` } -// BlobFlatListSegment struct -type BlobFlatListSegment struct { - // REQUIRED - BlobItems []*BlobItemInternal `xml:"Blob"` +// BlobClientAbortCopyFromURLOptions contains the optional parameters for the BlobClient.AbortCopyFromURL method. +type BlobClientAbortCopyFromURLOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 } -// MarshalXML implements the xml.Marshaller interface for type BlobFlatListSegment. -func (b BlobFlatListSegment) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - type alias BlobFlatListSegment - aux := &struct { - *alias - BlobItems *[]*BlobItemInternal `xml:"Blob"` - }{ - alias: (*alias)(&b), - } - if b.BlobItems != nil { - aux.BlobItems = &b.BlobItems - } - return e.EncodeElement(aux, start) +// BlobClientAcquireLeaseOptions contains the optional parameters for the BlobClient.AcquireLease method. +type BlobClientAcquireLeaseOptions struct { + // Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite lease + // can be between 15 and 60 seconds. A lease duration cannot be changed using + // renew or change. + Duration *int32 + // Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is + // not in the correct format. See Guid Constructor (String) for a list of valid GUID + // string formats. + ProposedLeaseID *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 } -// BlobHTTPHeaders contains a group of parameters for the blobClient.SetHTTPHeaders method. -type BlobHTTPHeaders struct { - // Optional. Sets the blob's cache control. If specified, this property is stored with the blob and returned with a read request. - BlobCacheControl *string - // Optional. Sets the blob's Content-Disposition header. - BlobContentDisposition *string - // Optional. Sets the blob's content encoding. If specified, this property is stored with the blob and returned with a read - // request. - BlobContentEncoding *string - // Optional. Set the blob's content language. If specified, this property is stored with the blob and returned with a read - // request. - BlobContentLanguage *string - // Optional. An MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual blocks - // were validated when each was uploaded. - BlobContentMD5 []byte - // Optional. Sets the blob's content type. If specified, this property is stored with the blob and returned with a read request. - BlobContentType *string +// BlobClientBreakLeaseOptions contains the optional parameters for the BlobClient.BreakLease method. +type BlobClientBreakLeaseOptions struct { + // For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This + // break period is only used if it is shorter than the time remaining on the + // lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has + // expired, but the lease may be held for longer than the break period. If this + // header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, + // and an infinite lease breaks immediately. + BreakPeriod *int32 + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 } -// BlobHierarchyListSegment struct -type BlobHierarchyListSegment struct { - // REQUIRED - BlobItems []*BlobItemInternal `xml:"Blob"` - BlobPrefixes []*BlobPrefix `xml:"BlobPrefix"` +// BlobClientChangeLeaseOptions contains the optional parameters for the BlobClient.ChangeLease method. +type BlobClientChangeLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 } -// MarshalXML implements the xml.Marshaller interface for type BlobHierarchyListSegment. -func (b BlobHierarchyListSegment) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - type alias BlobHierarchyListSegment - aux := &struct { - *alias - BlobItems *[]*BlobItemInternal `xml:"Blob"` - BlobPrefixes *[]*BlobPrefix `xml:"BlobPrefix"` - }{ - alias: (*alias)(&b), - } - if b.BlobItems != nil { - aux.BlobItems = &b.BlobItems - } - if b.BlobPrefixes != nil { - aux.BlobPrefixes = &b.BlobPrefixes - } - return e.EncodeElement(aux, start) +// BlobClientCopyFromURLOptions contains the optional parameters for the BlobClient.CopyFromURL method. +type BlobClientCopyFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + // Specified if a legal hold should be set on the blob. + LegalHold *bool + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 } -// BlobItemInternal - An Azure Storage blob -type BlobItemInternal struct { - // REQUIRED - Deleted *bool `xml:"Deleted"` - - // REQUIRED - Name *string `xml:"Name"` - - // REQUIRED; Properties of a blob - Properties *BlobPropertiesInternal `xml:"Properties"` - - // REQUIRED - Snapshot *string `xml:"Snapshot"` +// BlobClientCreateSnapshotOptions contains the optional parameters for the BlobClient.CreateSnapshot method. +type BlobClientCreateSnapshotOptions struct { + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} - // Blob tags - BlobTags *BlobTags `xml:"Tags"` - HasVersionsOnly *bool `xml:"HasVersionsOnly"` - IsCurrentVersion *bool `xml:"IsCurrentVersion"` +// BlobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the BlobClient.DeleteImmutabilityPolicy +// method. +type BlobClientDeleteImmutabilityPolicyOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} - // Dictionary of - Metadata map[string]*string `xml:"Metadata"` +// BlobClientDeleteOptions contains the optional parameters for the BlobClient.Delete method. +type BlobClientDeleteOptions struct { + // Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob + // and all of its snapshots. only: Delete only the blob's snapshots and not the blob + // itself + DeleteSnapshots *DeleteSnapshotsOptionType + // Optional. Only possible value is 'permanent', which specifies to permanently delete a blob if blob soft delete is enabled. + DeleteType *DeleteType + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} - // Dictionary of - OrMetadata map[string]*string `xml:"OrMetadata"` - VersionID *string `xml:"VersionId"` +// BlobClientDownloadOptions contains the optional parameters for the BlobClient.Download method. +type BlobClientDownloadOptions struct { + // Return only the bytes of the blob in the specified range. + Range *string + // When set to true and specified together with the Range, the service returns the CRC64 hash for the range, as long as the + // range is less than or equal to 4 MB in size. + RangeGetContentCRC64 *bool + // When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the + // range is less than or equal to 4 MB in size. + RangeGetContentMD5 *bool + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string } -// UnmarshalXML implements the xml.Unmarshaller interface for type BlobItemInternal. -func (b *BlobItemInternal) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - type alias BlobItemInternal - aux := &struct { - *alias - Metadata additionalProperties `xml:"Metadata"` - OrMetadata additionalProperties `xml:"OrMetadata"` - }{ - alias: (*alias)(b), - } - if err := d.DecodeElement(aux, &start); err != nil { - return err - } - b.Metadata = (map[string]*string)(aux.Metadata) - b.OrMetadata = (map[string]*string)(aux.OrMetadata) - return nil -} - -// BlobPrefix struct -type BlobPrefix struct { - // REQUIRED - Name *string `xml:"Name"` +// BlobClientGetAccountInfoOptions contains the optional parameters for the BlobClient.GetAccountInfo method. +type BlobClientGetAccountInfoOptions struct { + // placeholder for future optional parameters } -// BlobPropertiesInternal - Properties of a blob -type BlobPropertiesInternal struct { - // REQUIRED - Etag *string `xml:"Etag"` +// BlobClientGetPropertiesOptions contains the optional parameters for the BlobClient.GetProperties method. +type BlobClientGetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientGetTagsOptions contains the optional parameters for the BlobClient.GetTags method. +type BlobClientGetTagsOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientQueryOptions contains the optional parameters for the BlobClient.Query method. +type BlobClientQueryOptions struct { + // the query request + QueryRequest *QueryRequest + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientReleaseLeaseOptions contains the optional parameters for the BlobClient.ReleaseLease method. +type BlobClientReleaseLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientRenewLeaseOptions contains the optional parameters for the BlobClient.RenewLease method. +type BlobClientRenewLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetExpiryOptions contains the optional parameters for the BlobClient.SetExpiry method. +type BlobClientSetExpiryOptions struct { + // The time to set the blob to expiry + ExpiresOn *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetHTTPHeadersOptions contains the optional parameters for the BlobClient.SetHTTPHeaders method. +type BlobClientSetHTTPHeadersOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetImmutabilityPolicyOptions contains the optional parameters for the BlobClient.SetImmutabilityPolicy method. +type BlobClientSetImmutabilityPolicyOptions struct { + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetLegalHoldOptions contains the optional parameters for the BlobClient.SetLegalHold method. +type BlobClientSetLegalHoldOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetMetadataOptions contains the optional parameters for the BlobClient.SetMetadata method. +type BlobClientSetMetadataOptions struct { + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetTagsOptions contains the optional parameters for the BlobClient.SetTags method. +type BlobClientSetTagsOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientSetTierOptions contains the optional parameters for the BlobClient.SetTier method. +type BlobClientSetTierOptions struct { + // Optional: Indicates the priority with which to rehydrate an archived blob. + RehydratePriority *RehydratePriority + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientStartCopyFromURLOptions contains the optional parameters for the BlobClient.StartCopyFromURL method. +type BlobClientStartCopyFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + // Specified if a legal hold should be set on the blob. + LegalHold *bool + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Optional: Indicates the priority with which to rehydrate an archived blob. + RehydratePriority *RehydratePriority + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Overrides the sealed state of the destination blob. Service version 2019-12-12 and newer. + SealBlob *bool + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientUndeleteOptions contains the optional parameters for the BlobClient.Undelete method. +type BlobClientUndeleteOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +type BlobFlatListSegment struct { + // REQUIRED + BlobItems []*BlobItemInternal `xml:"Blob"` +} + +// BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +type BlobHTTPHeaders struct { + // Optional. Sets the blob's cache control. If specified, this property is stored with the blob and returned with a read request. + BlobCacheControl *string + // Optional. Sets the blob's Content-Disposition header. + BlobContentDisposition *string + // Optional. Sets the blob's content encoding. If specified, this property is stored with the blob and returned with a read + // request. + BlobContentEncoding *string + // Optional. Set the blob's content language. If specified, this property is stored with the blob and returned with a read + // request. + BlobContentLanguage *string + // Optional. An MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual blocks + // were validated when each was uploaded. + BlobContentMD5 []byte + // Optional. Sets the blob's content type. If specified, this property is stored with the blob and returned with a read request. + BlobContentType *string +} + +type BlobHierarchyListSegment struct { + // REQUIRED + BlobItems []*BlobItemInternal `xml:"Blob"` + BlobPrefixes []*BlobPrefix `xml:"BlobPrefix"` +} + +// BlobItemInternal - An Azure Storage blob +type BlobItemInternal struct { + // REQUIRED + Deleted *bool `xml:"Deleted"` + + // REQUIRED + Name *string `xml:"Name"` + + // REQUIRED; Properties of a blob + Properties *BlobPropertiesInternal `xml:"Properties"` + + // REQUIRED + Snapshot *string `xml:"Snapshot"` + + // Blob tags + BlobTags *BlobTags `xml:"Tags"` + HasVersionsOnly *bool `xml:"HasVersionsOnly"` + IsCurrentVersion *bool `xml:"IsCurrentVersion"` + + // Dictionary of + Metadata map[string]*string `xml:"Metadata"` + + // Dictionary of + OrMetadata map[string]*string `xml:"OrMetadata"` + VersionID *string `xml:"VersionId"` +} + +type BlobPrefix struct { + // REQUIRED + Name *string `xml:"Name"` +} + +// BlobPropertiesInternal - Properties of a blob +type BlobPropertiesInternal struct { + // REQUIRED + ETag *azcore.ETag `xml:"Etag"` // REQUIRED LastModified *time.Time `xml:"Last-Modified"` @@ -252,17 +597,17 @@ type BlobPropertiesInternal struct { DestinationSnapshot *string `xml:"DestinationSnapshot"` // The name of the encryption scope under which the blob is encrypted. - EncryptionScope *string `xml:"EncryptionScope"` - ExpiresOn *time.Time `xml:"Expiry-Time"` - ImmutabilityPolicyExpiresOn *time.Time `xml:"ImmutabilityPolicyUntilDate"` - ImmutabilityPolicyMode *BlobImmutabilityPolicyMode `xml:"ImmutabilityPolicyMode"` - IncrementalCopy *bool `xml:"IncrementalCopy"` - IsSealed *bool `xml:"Sealed"` - LastAccessedOn *time.Time `xml:"LastAccessTime"` - LeaseDuration *LeaseDurationType `xml:"LeaseDuration"` - LeaseState *LeaseStateType `xml:"LeaseState"` - LeaseStatus *LeaseStatusType `xml:"LeaseStatus"` - LegalHold *bool `xml:"LegalHold"` + EncryptionScope *string `xml:"EncryptionScope"` + ExpiresOn *time.Time `xml:"Expiry-Time"` + ImmutabilityPolicyExpiresOn *time.Time `xml:"ImmutabilityPolicyUntilDate"` + ImmutabilityPolicyMode *ImmutabilityPolicyMode `xml:"ImmutabilityPolicyMode"` + IncrementalCopy *bool `xml:"IncrementalCopy"` + IsSealed *bool `xml:"Sealed"` + LastAccessedOn *time.Time `xml:"LastAccessTime"` + LeaseDuration *LeaseDurationType `xml:"LeaseDuration"` + LeaseState *LeaseStateType `xml:"LeaseState"` + LeaseStatus *LeaseStatusType `xml:"LeaseStatus"` + LegalHold *bool `xml:"LegalHold"` // If an object is in rehydrate pending state then this header is returned with priority of rehydrate. Valid values are High // and Standard. @@ -272,69 +617,6 @@ type BlobPropertiesInternal struct { TagCount *int32 `xml:"TagCount"` } -// MarshalXML implements the xml.Marshaller interface for type BlobPropertiesInternal. -func (b BlobPropertiesInternal) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - type alias BlobPropertiesInternal - aux := &struct { - *alias - AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"` - ContentMD5 *[]byte `xml:"Content-MD5"` - CopyCompletionTime *timeRFC1123 `xml:"CopyCompletionTime"` - CreationTime *timeRFC1123 `xml:"Creation-Time"` - DeletedTime *timeRFC1123 `xml:"DeletedTime"` - ExpiresOn *timeRFC1123 `xml:"Expiry-Time"` - ImmutabilityPolicyExpiresOn *timeRFC1123 `xml:"ImmutabilityPolicyUntilDate"` - LastAccessedOn *timeRFC1123 `xml:"LastAccessTime"` - LastModified *timeRFC1123 `xml:"Last-Modified"` - }{ - alias: (*alias)(&b), - AccessTierChangeTime: (*timeRFC1123)(b.AccessTierChangeTime), - CopyCompletionTime: (*timeRFC1123)(b.CopyCompletionTime), - CreationTime: (*timeRFC1123)(b.CreationTime), - DeletedTime: (*timeRFC1123)(b.DeletedTime), - ExpiresOn: (*timeRFC1123)(b.ExpiresOn), - ImmutabilityPolicyExpiresOn: (*timeRFC1123)(b.ImmutabilityPolicyExpiresOn), - LastAccessedOn: (*timeRFC1123)(b.LastAccessedOn), - LastModified: (*timeRFC1123)(b.LastModified), - } - if b.ContentMD5 != nil { - aux.ContentMD5 = &b.ContentMD5 - } - return e.EncodeElement(aux, start) -} - -// UnmarshalXML implements the xml.Unmarshaller interface for type BlobPropertiesInternal. -func (b *BlobPropertiesInternal) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - type alias BlobPropertiesInternal - aux := &struct { - *alias - AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"` - ContentMD5 *[]byte `xml:"Content-MD5"` - CopyCompletionTime *timeRFC1123 `xml:"CopyCompletionTime"` - CreationTime *timeRFC1123 `xml:"Creation-Time"` - DeletedTime *timeRFC1123 `xml:"DeletedTime"` - ExpiresOn *timeRFC1123 `xml:"Expiry-Time"` - ImmutabilityPolicyExpiresOn *timeRFC1123 `xml:"ImmutabilityPolicyUntilDate"` - LastAccessedOn *timeRFC1123 `xml:"LastAccessTime"` - LastModified *timeRFC1123 `xml:"Last-Modified"` - }{ - alias: (*alias)(b), - } - if err := d.DecodeElement(aux, &start); err != nil { - return err - } - b.AccessTierChangeTime = (*time.Time)(aux.AccessTierChangeTime) - b.CopyCompletionTime = (*time.Time)(aux.CopyCompletionTime) - b.CreationTime = (*time.Time)(aux.CreationTime) - b.DeletedTime = (*time.Time)(aux.DeletedTime) - b.ExpiresOn = (*time.Time)(aux.ExpiresOn) - b.ImmutabilityPolicyExpiresOn = (*time.Time)(aux.ImmutabilityPolicyExpiresOn) - b.LastAccessedOn = (*time.Time)(aux.LastAccessedOn) - b.LastModified = (*time.Time)(aux.LastModified) - return nil -} - -// BlobTag struct type BlobTag struct { // REQUIRED Key *string `xml:"Key"` @@ -349,22 +631,6 @@ type BlobTags struct { BlobTagSet []*BlobTag `xml:"TagSet>Tag"` } -// MarshalXML implements the xml.Marshaller interface for type BlobTags. -func (b BlobTags) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - start.Name.Local = "Tags" - type alias BlobTags - aux := &struct { - *alias - BlobTagSet *[]*BlobTag `xml:"TagSet>Tag"` - }{ - alias: (*alias)(&b), - } - if b.BlobTagSet != nil { - aux.BlobTagSet = &b.BlobTagSet - } - return e.EncodeElement(aux, start) -} - // Block - Represents a single block in a block blob. It describes the block's ID and size. type Block struct { // REQUIRED; The base64 encoded block ID. @@ -374,1142 +640,14 @@ type Block struct { Size *int64 `xml:"Size"` } -// BlockList struct -type BlockList struct { - CommittedBlocks []*Block `xml:"CommittedBlocks>Block"` - UncommittedBlocks []*Block `xml:"UncommittedBlocks>Block"` -} - -// MarshalXML implements the xml.Marshaller interface for type BlockList. -func (b BlockList) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - type alias BlockList - aux := &struct { - *alias - CommittedBlocks *[]*Block `xml:"CommittedBlocks>Block"` - UncommittedBlocks *[]*Block `xml:"UncommittedBlocks>Block"` - }{ - alias: (*alias)(&b), - } - if b.CommittedBlocks != nil { - aux.CommittedBlocks = &b.CommittedBlocks - } - if b.UncommittedBlocks != nil { - aux.UncommittedBlocks = &b.UncommittedBlocks - } - return e.EncodeElement(aux, start) -} - -// BlockLookupList struct -type BlockLookupList struct { - Committed []*string `xml:"Committed"` - Latest []*string `xml:"Latest"` - Uncommitted []*string `xml:"Uncommitted"` -} - -// MarshalXML implements the xml.Marshaller interface for type BlockLookupList. -func (b BlockLookupList) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - start.Name.Local = "BlockList" - type alias BlockLookupList - aux := &struct { - *alias - Committed *[]*string `xml:"Committed"` - Latest *[]*string `xml:"Latest"` - Uncommitted *[]*string `xml:"Uncommitted"` - }{ - alias: (*alias)(&b), - } - if b.Committed != nil { - aux.Committed = &b.Committed - } - if b.Latest != nil { - aux.Latest = &b.Latest - } - if b.Uncommitted != nil { - aux.Uncommitted = &b.Uncommitted - } - return e.EncodeElement(aux, start) -} - -// ClearRange enum -type ClearRange struct { - // REQUIRED - End *int64 `xml:"End"` - - // REQUIRED - Start *int64 `xml:"Start"` -} - -// ContainerCpkScopeInfo contains a group of parameters for the containerClient.Create method. -type ContainerCpkScopeInfo struct { - // Optional. Version 2019-07-07 and later. Specifies the default encryption scope to set on the container and use for all - // future writes. - DefaultEncryptionScope *string - // Optional. Version 2019-07-07 and newer. If true, prevents any request from specifying a different encryption scope than - // the scope set on the container. - PreventEncryptionScopeOverride *bool -} - -// ContainerItem - An Azure Storage container -type ContainerItem struct { - // REQUIRED - Name *string `xml:"Name"` - - // REQUIRED; Properties of a container - Properties *ContainerProperties `xml:"Properties"` - Deleted *bool `xml:"Deleted"` - - // Dictionary of - Metadata map[string]*string `xml:"Metadata"` - Version *string `xml:"Version"` -} - -// UnmarshalXML implements the xml.Unmarshaller interface for type ContainerItem. -func (c *ContainerItem) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - type alias ContainerItem - aux := &struct { - *alias - Metadata additionalProperties `xml:"Metadata"` - }{ - alias: (*alias)(c), - } - if err := d.DecodeElement(aux, &start); err != nil { - return err - } - c.Metadata = (map[string]*string)(aux.Metadata) - return nil -} - -// ContainerProperties - Properties of a container -type ContainerProperties struct { - // REQUIRED - Etag *string `xml:"Etag"` - - // REQUIRED - LastModified *time.Time `xml:"Last-Modified"` - DefaultEncryptionScope *string `xml:"DefaultEncryptionScope"` - DeletedTime *time.Time `xml:"DeletedTime"` - HasImmutabilityPolicy *bool `xml:"HasImmutabilityPolicy"` - HasLegalHold *bool `xml:"HasLegalHold"` - - // Indicates if version level worm is enabled on this container. - IsImmutableStorageWithVersioningEnabled *bool `xml:"ImmutableStorageWithVersioningEnabled"` - LeaseDuration *LeaseDurationType `xml:"LeaseDuration"` - LeaseState *LeaseStateType `xml:"LeaseState"` - LeaseStatus *LeaseStatusType `xml:"LeaseStatus"` - PreventEncryptionScopeOverride *bool `xml:"DenyEncryptionScopeOverride"` - PublicAccess *PublicAccessType `xml:"PublicAccess"` - RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` -} - -// MarshalXML implements the xml.Marshaller interface for type ContainerProperties. -func (c ContainerProperties) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - type alias ContainerProperties - aux := &struct { - *alias - DeletedTime *timeRFC1123 `xml:"DeletedTime"` - LastModified *timeRFC1123 `xml:"Last-Modified"` - }{ - alias: (*alias)(&c), - DeletedTime: (*timeRFC1123)(c.DeletedTime), - LastModified: (*timeRFC1123)(c.LastModified), - } - return e.EncodeElement(aux, start) -} - -// UnmarshalXML implements the xml.Unmarshaller interface for type ContainerProperties. -func (c *ContainerProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - type alias ContainerProperties - aux := &struct { - *alias - DeletedTime *timeRFC1123 `xml:"DeletedTime"` - LastModified *timeRFC1123 `xml:"Last-Modified"` - }{ - alias: (*alias)(c), - } - if err := d.DecodeElement(aux, &start); err != nil { - return err - } - c.DeletedTime = (*time.Time)(aux.DeletedTime) - c.LastModified = (*time.Time)(aux.LastModified) - return nil -} - -// CorsRule - CORS is an HTTP feature that enables a web application running under one domain to access resources in another -// domain. Web browsers implement a security restriction known as same-origin policy that -// prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin -// domain) to call APIs in another domain -type CorsRule struct { - // REQUIRED; the request headers that the origin domain may specify on the CORS request. - AllowedHeaders *string `xml:"AllowedHeaders"` - - // REQUIRED; The methods (HTTP request verbs) that the origin domain may use for a CORS request. (comma separated) - AllowedMethods *string `xml:"AllowedMethods"` - - // REQUIRED; The origin domains that are permitted to make a request against the storage service via CORS. The origin domain - // is the domain from which the request originates. Note that the origin must be an exact - // case-sensitive match with the origin that the user age sends to the service. You can also use the wildcard character '*' - // to allow all origin domains to make requests via CORS. - AllowedOrigins *string `xml:"AllowedOrigins"` - - // REQUIRED; The response headers that may be sent in the response to the CORS request and exposed by the browser to the request - // issuer - ExposedHeaders *string `xml:"ExposedHeaders"` - - // REQUIRED; The maximum amount time that a browser should cache the preflight OPTIONS request. - MaxAgeInSeconds *int32 `xml:"MaxAgeInSeconds"` -} - -// CpkInfo contains a group of parameters for the blobClient.Download method. -type CpkInfo struct { - // The algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided - // if the x-ms-encryption-key header is provided. - EncryptionAlgorithm *EncryptionAlgorithmType - // Optional. Specifies the encryption key to use to encrypt the data provided in the request. If not specified, encryption - // is performed with the root account encryption key. For more information, see - // Encryption at Rest for Azure Storage Services. - EncryptionKey *string - // The SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. - EncryptionKeySHA256 *string -} - -// CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. -type CpkScopeInfo struct { - // Optional. Version 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data provided - // in the request. If not specified, encryption is performed with the default - // account encryption scope. For more information, see Encryption at Rest for Azure Storage Services. - EncryptionScope *string -} - -// DelimitedTextConfiguration - Groups the settings used for interpreting the blob data if the blob is delimited text formatted. -type DelimitedTextConfiguration struct { - // The string used to separate columns. - ColumnSeparator *string `xml:"ColumnSeparator"` - - // The string used as an escape character. - EscapeChar *string `xml:"EscapeChar"` - - // The string used to quote a specific field. - FieldQuote *string `xml:"FieldQuote"` - - // Represents whether the data has headers. - HeadersPresent *bool `xml:"HasHeaders"` - - // The string used to separate records. - RecordSeparator *string `xml:"RecordSeparator"` -} - -// FilterBlobItem - Blob info from a Filter Blobs API call -type FilterBlobItem struct { - // REQUIRED - ContainerName *string `xml:"ContainerName"` - - // REQUIRED - Name *string `xml:"Name"` - - // Blob tags - Tags *BlobTags `xml:"Tags"` -} - -// FilterBlobSegment - The result of a Filter Blobs API call -type FilterBlobSegment struct { - // REQUIRED - Blobs []*FilterBlobItem `xml:"Blobs>Blob"` - - // REQUIRED - ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` - - // REQUIRED - Where *string `xml:"Where"` - NextMarker *string `xml:"NextMarker"` -} - -// MarshalXML implements the xml.Marshaller interface for type FilterBlobSegment. -func (f FilterBlobSegment) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - type alias FilterBlobSegment - aux := &struct { - *alias - Blobs *[]*FilterBlobItem `xml:"Blobs>Blob"` - }{ - alias: (*alias)(&f), - } - if f.Blobs != nil { - aux.Blobs = &f.Blobs - } - return e.EncodeElement(aux, start) -} - -// GeoReplication - Geo-Replication information for the Secondary Storage Service -type GeoReplication struct { - // REQUIRED; A GMT date/time value, to the second. All primary writes preceding this value are guaranteed to be available - // for read operations at the secondary. Primary writes after this point in time may or may - // not be available for reads. - LastSyncTime *time.Time `xml:"LastSyncTime"` - - // REQUIRED; The status of the secondary location - Status *BlobGeoReplicationStatus `xml:"Status"` -} - -// MarshalXML implements the xml.Marshaller interface for type GeoReplication. -func (g GeoReplication) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - type alias GeoReplication - aux := &struct { - *alias - LastSyncTime *timeRFC1123 `xml:"LastSyncTime"` - }{ - alias: (*alias)(&g), - LastSyncTime: (*timeRFC1123)(g.LastSyncTime), - } - return e.EncodeElement(aux, start) -} - -// UnmarshalXML implements the xml.Unmarshaller interface for type GeoReplication. -func (g *GeoReplication) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - type alias GeoReplication - aux := &struct { - *alias - LastSyncTime *timeRFC1123 `xml:"LastSyncTime"` - }{ - alias: (*alias)(g), - } - if err := d.DecodeElement(aux, &start); err != nil { - return err - } - g.LastSyncTime = (*time.Time)(aux.LastSyncTime) - return nil -} - -// JSONTextConfiguration - json text configuration -type JSONTextConfiguration struct { - // The string used to separate records. - RecordSeparator *string `xml:"RecordSeparator"` -} - -// KeyInfo - Key information -type KeyInfo struct { - // REQUIRED; The date-time the key expires in ISO 8601 UTC time - Expiry *string `xml:"Expiry"` - - // REQUIRED; The date-time the key is active in ISO 8601 UTC time - Start *string `xml:"Start"` -} - -// LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -type LeaseAccessConditions struct { - // If specified, the operation only succeeds if the resource's lease is active and matches this ID. - LeaseID *string -} - -// ListBlobsFlatSegmentResponse - An enumeration of blobs -type ListBlobsFlatSegmentResponse struct { - // REQUIRED - ContainerName *string `xml:"ContainerName,attr"` - - // REQUIRED - Segment *BlobFlatListSegment `xml:"Blobs"` - - // REQUIRED - ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` - Marker *string `xml:"Marker"` - MaxResults *int32 `xml:"MaxResults"` - NextMarker *string `xml:"NextMarker"` - Prefix *string `xml:"Prefix"` -} - -// ListBlobsHierarchySegmentResponse - An enumeration of blobs -type ListBlobsHierarchySegmentResponse struct { - // REQUIRED - ContainerName *string `xml:"ContainerName,attr"` - - // REQUIRED - Segment *BlobHierarchyListSegment `xml:"Blobs"` - - // REQUIRED - ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` - Delimiter *string `xml:"Delimiter"` - Marker *string `xml:"Marker"` - MaxResults *int32 `xml:"MaxResults"` - NextMarker *string `xml:"NextMarker"` - Prefix *string `xml:"Prefix"` -} - -// ListContainersSegmentResponse - An enumeration of containers -type ListContainersSegmentResponse struct { - // REQUIRED - ContainerItems []*ContainerItem `xml:"Containers>Container"` - - // REQUIRED - ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` - Marker *string `xml:"Marker"` - MaxResults *int32 `xml:"MaxResults"` - NextMarker *string `xml:"NextMarker"` - Prefix *string `xml:"Prefix"` -} - -// MarshalXML implements the xml.Marshaller interface for type ListContainersSegmentResponse. -func (l ListContainersSegmentResponse) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - type alias ListContainersSegmentResponse - aux := &struct { - *alias - ContainerItems *[]*ContainerItem `xml:"Containers>Container"` - }{ - alias: (*alias)(&l), - } - if l.ContainerItems != nil { - aux.ContainerItems = &l.ContainerItems - } - return e.EncodeElement(aux, start) -} - -// Logging - Azure Analytics Logging settings. -type Logging struct { - // REQUIRED; Indicates whether all delete requests should be logged. - Delete *bool `xml:"Delete"` - - // REQUIRED; Indicates whether all read requests should be logged. - Read *bool `xml:"Read"` - - // REQUIRED; the retention policy which determines how long the associated data should persist - RetentionPolicy *RetentionPolicy `xml:"RetentionPolicy"` - - // REQUIRED; The version of Storage Analytics to configure. - Version *string `xml:"Version"` - - // REQUIRED; Indicates whether all write requests should be logged. - Write *bool `xml:"Write"` -} - -// Metrics - a summary of request statistics grouped by API in hour or minute aggregates for blobs -type Metrics struct { - // REQUIRED; Indicates whether metrics are enabled for the Blob service. - Enabled *bool `xml:"Enabled"` - - // Indicates whether metrics should generate summary statistics for called API operations. - IncludeAPIs *bool `xml:"IncludeAPIs"` - - // the retention policy which determines how long the associated data should persist - RetentionPolicy *RetentionPolicy `xml:"RetentionPolicy"` - - // The version of Storage Analytics to configure. - Version *string `xml:"Version"` -} - -// ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -type ModifiedAccessConditions struct { - // Specify an ETag value to operate only on blobs with a matching value. - IfMatch *string - // Specify this header value to operate only on a blob if it has been modified since the specified date/time. - IfModifiedSince *time.Time - // Specify an ETag value to operate only on blobs without a matching value. - IfNoneMatch *string - // Specify a SQL where clause on blob tags to operate only on blobs with a matching value. - IfTags *string - // Specify this header value to operate only on a blob if it has not been modified since the specified date/time. - IfUnmodifiedSince *time.Time -} - -// PageList - the list of pages -type PageList struct { - ClearRange []*ClearRange `xml:"ClearRange"` - NextMarker *string `xml:"NextMarker"` - PageRange []*PageRange `xml:"PageRange"` -} - -// MarshalXML implements the xml.Marshaller interface for type PageList. -func (p PageList) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - type alias PageList - aux := &struct { - *alias - ClearRange *[]*ClearRange `xml:"ClearRange"` - PageRange *[]*PageRange `xml:"PageRange"` - }{ - alias: (*alias)(&p), - } - if p.ClearRange != nil { - aux.ClearRange = &p.ClearRange - } - if p.PageRange != nil { - aux.PageRange = &p.PageRange - } - return e.EncodeElement(aux, start) -} - -// PageRange struct -type PageRange struct { - // REQUIRED - End *int64 `xml:"End"` - - // REQUIRED - Start *int64 `xml:"Start"` -} - -// QueryFormat struct -type QueryFormat struct { - // REQUIRED; The quick query format type. - Type *QueryFormatType `xml:"Type"` - - // Groups the settings used for formatting the response if the response should be Arrow formatted. - ArrowConfiguration *ArrowConfiguration `xml:"ArrowConfiguration"` - - // Groups the settings used for interpreting the blob data if the blob is delimited text formatted. - DelimitedTextConfiguration *DelimitedTextConfiguration `xml:"DelimitedTextConfiguration"` - - // json text configuration - JSONTextConfiguration *JSONTextConfiguration `xml:"JsonTextConfiguration"` - - // Anything - ParquetTextConfiguration interface{} `xml:"ParquetTextConfiguration"` -} - -// QueryRequest - Groups the set of query request settings. -type QueryRequest struct { - // REQUIRED; The query expression in SQL. The maximum size of the query expression is 256KiB. - Expression *string `xml:"Expression"` - - // REQUIRED; Required. The type of the provided query expression. - QueryType *string `xml:"QueryType"` - InputSerialization *QuerySerialization `xml:"InputSerialization"` - OutputSerialization *QuerySerialization `xml:"OutputSerialization"` -} - -// MarshalXML implements the xml.Marshaller interface for type QueryRequest. -func (q QueryRequest) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - start.Name.Local = "QueryRequest" - type alias QueryRequest - aux := &struct { - *alias - }{ - alias: (*alias)(&q), - } - return e.EncodeElement(aux, start) -} - -//QuerySerialization struct -type QuerySerialization struct { - // REQUIRED - Format *QueryFormat `xml:"Format"` -} - -// RetentionPolicy - the retention policy which determines how long the associated data should persist -type RetentionPolicy struct { - // REQUIRED; Indicates whether a retention policy is enabled for the storage service - Enabled *bool `xml:"Enabled"` - - // Indicates whether permanent delete is allowed on this storage account. - AllowPermanentDelete *bool `xml:"AllowPermanentDelete"` - - // Indicates the number of days that metrics or logging or soft-deleted data should be retained. All data older than this - // value will be deleted - Days *int32 `xml:"Days"` -} - -// SequenceNumberAccessConditions contains a group of parameters for the pageBlobClient.UploadPages method. -type SequenceNumberAccessConditions struct { - // Specify this header value to operate only on a blob if it has the specified sequence number. - IfSequenceNumberEqualTo *int64 - // Specify this header value to operate only on a blob if it has a sequence number less than the specified. - IfSequenceNumberLessThan *int64 - // Specify this header value to operate only on a blob if it has a sequence number less than or equal to the specified. - IfSequenceNumberLessThanOrEqualTo *int64 -} - -// SignedIdentifier - signed identifier -type SignedIdentifier struct { - // REQUIRED; An Access policy - AccessPolicy *AccessPolicy `xml:"AccessPolicy"` - - // REQUIRED; a unique id - ID *string `xml:"Id"` -} - -// SourceModifiedAccessConditions contains a group of parameters for the blobClient.StartCopyFromURL method. -type SourceModifiedAccessConditions struct { - // Specify an ETag value to operate only on blobs with a matching value. - SourceIfMatch *string - // Specify this header value to operate only on a blob if it has been modified since the specified date/time. - SourceIfModifiedSince *time.Time - // Specify an ETag value to operate only on blobs without a matching value. - SourceIfNoneMatch *string - // Specify a SQL where clause on blob tags to operate only on blobs with a matching value. - SourceIfTags *string - // Specify this header value to operate only on a blob if it has not been modified since the specified date/time. - SourceIfUnmodifiedSince *time.Time -} - -// StaticWebsite - The properties that enable an account to host a static website -type StaticWebsite struct { - // REQUIRED; Indicates whether this account is hosting a static website - Enabled *bool `xml:"Enabled"` - - // Absolute path of the default index page - DefaultIndexDocumentPath *string `xml:"DefaultIndexDocumentPath"` - - // The absolute path of the custom 404 page - ErrorDocument404Path *string `xml:"ErrorDocument404Path"` - - // The default name of the index page under each directory - IndexDocument *string `xml:"IndexDocument"` -} - -// StorageServiceProperties - Storage Service Properties. -type StorageServiceProperties struct { - // The set of CORS rules. - Cors []*CorsRule `xml:"Cors>CorsRule"` - - // The default version to use for requests to the Blob service if an incoming request's version is not specified. Possible - // values include version 2008-10-27 and all more recent versions - DefaultServiceVersion *string `xml:"DefaultServiceVersion"` - - // the retention policy which determines how long the associated data should persist - DeleteRetentionPolicy *RetentionPolicy `xml:"DeleteRetentionPolicy"` - - // a summary of request statistics grouped by API in hour or minute aggregates for blobs - HourMetrics *Metrics `xml:"HourMetrics"` - - // Azure Analytics Logging settings. - Logging *Logging `xml:"Logging"` - - // a summary of request statistics grouped by API in hour or minute aggregates for blobs - MinuteMetrics *Metrics `xml:"MinuteMetrics"` - - // The properties that enable an account to host a static website - StaticWebsite *StaticWebsite `xml:"StaticWebsite"` -} - -// MarshalXML implements the xml.Marshaller interface for type StorageServiceProperties. -func (s StorageServiceProperties) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - type alias StorageServiceProperties - aux := &struct { - *alias - Cors *[]*CorsRule `xml:"Cors>CorsRule"` - }{ - alias: (*alias)(&s), - } - if s.Cors != nil { - aux.Cors = &s.Cors - } - return e.EncodeElement(aux, start) -} - -// StorageServiceStats - Stats for the storage service. -type StorageServiceStats struct { - // Geo-Replication information for the Secondary Storage Service - GeoReplication *GeoReplication `xml:"GeoReplication"` -} - -// UserDelegationKey - A user delegation key -type UserDelegationKey struct { - // REQUIRED; The date-time the key expires - SignedExpiry *time.Time `xml:"SignedExpiry"` - - // REQUIRED; The Azure Active Directory object ID in GUID format. - SignedOid *string `xml:"SignedOid"` - - // REQUIRED; Abbreviation of the Azure Storage service that accepts the key - SignedService *string `xml:"SignedService"` - - // REQUIRED; The date-time the key is active - SignedStart *time.Time `xml:"SignedStart"` - - // REQUIRED; The Azure Active Directory tenant ID in GUID format - SignedTid *string `xml:"SignedTid"` - - // REQUIRED; The service version that created the key - SignedVersion *string `xml:"SignedVersion"` - - // REQUIRED; The key as a base64 string - Value *string `xml:"Value"` -} - -// MarshalXML implements the xml.Marshaller interface for type UserDelegationKey. -func (u UserDelegationKey) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - type alias UserDelegationKey - aux := &struct { - *alias - SignedExpiry *timeRFC3339 `xml:"SignedExpiry"` - SignedStart *timeRFC3339 `xml:"SignedStart"` - }{ - alias: (*alias)(&u), - SignedExpiry: (*timeRFC3339)(u.SignedExpiry), - SignedStart: (*timeRFC3339)(u.SignedStart), - } - return e.EncodeElement(aux, start) -} - -// UnmarshalXML implements the xml.Unmarshaller interface for type UserDelegationKey. -func (u *UserDelegationKey) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - type alias UserDelegationKey - aux := &struct { - *alias - SignedExpiry *timeRFC3339 `xml:"SignedExpiry"` - SignedStart *timeRFC3339 `xml:"SignedStart"` - }{ - alias: (*alias)(u), - } - if err := d.DecodeElement(aux, &start); err != nil { - return err - } - u.SignedExpiry = (*time.Time)(aux.SignedExpiry) - u.SignedStart = (*time.Time)(aux.SignedStart) - return nil -} - -// appendBlobClientAppendBlockFromURLOptions contains the optional parameters for the appendBlobClient.AppendBlockFromURL -// method. -type appendBlobClientAppendBlockFromURLOptions struct { - // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. - CopySourceAuthorization *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Specify the md5 calculated for the range of bytes that must be read from the copy source. - SourceContentMD5 []byte - // Specify the crc64 calculated for the range of bytes that must be read from the copy source. - SourceContentcrc64 []byte - // Bytes of source data in the specified range. - SourceRange *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - -// appendBlobClientAppendBlockOptions contains the optional parameters for the appendBlobClient.AppendBlock method. -type appendBlobClientAppendBlockOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // Specify the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 []byte - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - -// appendBlobClientCreateOptions contains the optional parameters for the appendBlobClient.Create method. -type appendBlobClientCreateOptions struct { - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *BlobImmutabilityPolicyMode - // Specified if a legal hold should be set on the blob. - LegalHold *bool - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// appendBlobClientSealOptions contains the optional parameters for the appendBlobClient.Seal method. -type appendBlobClientSealOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// blobClientAbortCopyFromURLOptions contains the optional parameters for the blobClient.AbortCopyFromURL method. -type blobClientAbortCopyFromURLOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// blobClientAcquireLeaseOptions contains the optional parameters for the blobClient.AcquireLease method. -type blobClientAcquireLeaseOptions struct { - // Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite lease - // can be between 15 and 60 seconds. A lease duration cannot be changed using - // renew or change. - Duration *int32 - // Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is - // not in the correct format. See Guid Constructor (String) for a list of valid GUID - // string formats. - ProposedLeaseID *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// blobClientBreakLeaseOptions contains the optional parameters for the blobClient.BreakLease method. -type blobClientBreakLeaseOptions struct { - // For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This - // break period is only used if it is shorter than the time remaining on the - // lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has - // expired, but the lease may be held for longer than the break period. If this - // header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, - // and an infinite lease breaks immediately. - BreakPeriod *int32 - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// blobClientChangeLeaseOptions contains the optional parameters for the blobClient.ChangeLease method. -type blobClientChangeLeaseOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// blobClientCopyFromURLOptions contains the optional parameters for the blobClient.CopyFromURL method. -type blobClientCopyFromURLOptions struct { - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. - CopySourceAuthorization *string - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *BlobImmutabilityPolicyMode - // Specified if a legal hold should be set on the blob. - LegalHold *bool - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Specify the md5 calculated for the range of bytes that must be read from the copy source. - SourceContentMD5 []byte - // Optional. Indicates the tier to be set on the blob. - Tier *AccessTier - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// blobClientCreateSnapshotOptions contains the optional parameters for the blobClient.CreateSnapshot method. -type blobClientCreateSnapshotOptions struct { - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// blobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the blobClient.DeleteImmutabilityPolicy -// method. -type blobClientDeleteImmutabilityPolicyOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// blobClientDeleteOptions contains the optional parameters for the blobClient.Delete method. -type blobClientDeleteOptions struct { - // Optional. Only possible value is 'permanent', which specifies to permanently delete a blob if blob soft delete is enabled. - BlobDeleteType *BlobDeleteType - // Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob - // and all of its snapshots. only: Delete only the blob's snapshots and not the blob - // itself - DeleteSnapshots *DeleteSnapshotsOptionType - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// blobClientDownloadOptions contains the optional parameters for the blobClient.Download method. -type blobClientDownloadOptions struct { - // Return only the bytes of the blob in the specified range. - Range *string - // When set to true and specified together with the Range, the service returns the CRC64 hash for the range, as long as the - // range is less than or equal to 4 MB in size. - RangeGetContentCRC64 *bool - // When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the - // range is less than or equal to 4 MB in size. - RangeGetContentMD5 *bool - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// blobClientGetAccountInfoOptions contains the optional parameters for the blobClient.GetAccountInfo method. -type blobClientGetAccountInfoOptions struct { - // placeholder for future optional parameters -} - -// blobClientGetPropertiesOptions contains the optional parameters for the blobClient.GetProperties method. -type blobClientGetPropertiesOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// blobClientGetTagsOptions contains the optional parameters for the blobClient.GetTags method. -type blobClientGetTagsOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// blobClientQueryOptions contains the optional parameters for the blobClient.Query method. -type blobClientQueryOptions struct { - // the query request - QueryRequest *QueryRequest - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// blobClientReleaseLeaseOptions contains the optional parameters for the blobClient.ReleaseLease method. -type blobClientReleaseLeaseOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// blobClientRenewLeaseOptions contains the optional parameters for the blobClient.RenewLease method. -type blobClientRenewLeaseOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// blobClientSetExpiryOptions contains the optional parameters for the blobClient.SetExpiry method. -type blobClientSetExpiryOptions struct { - // The time to set the blob to expiry - ExpiresOn *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// blobClientSetHTTPHeadersOptions contains the optional parameters for the blobClient.SetHTTPHeaders method. -type blobClientSetHTTPHeadersOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// blobClientSetImmutabilityPolicyOptions contains the optional parameters for the blobClient.SetImmutabilityPolicy method. -type blobClientSetImmutabilityPolicyOptions struct { - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *BlobImmutabilityPolicyMode - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// blobClientSetLegalHoldOptions contains the optional parameters for the blobClient.SetLegalHold method. -type blobClientSetLegalHoldOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// blobClientSetMetadataOptions contains the optional parameters for the blobClient.SetMetadata method. -type blobClientSetMetadataOptions struct { - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// blobClientSetTagsOptions contains the optional parameters for the blobClient.SetTags method. -type blobClientSetTagsOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Blob tags - Tags *BlobTags - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // Specify the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 []byte - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// blobClientSetTierOptions contains the optional parameters for the blobClient.SetTier method. -type blobClientSetTierOptions struct { - // Optional: Indicates the priority with which to rehydrate an archived blob. - RehydratePriority *RehydratePriority - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// blobClientStartCopyFromURLOptions contains the optional parameters for the blobClient.StartCopyFromURL method. -type blobClientStartCopyFromURLOptions struct { - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *BlobImmutabilityPolicyMode - // Specified if a legal hold should be set on the blob. - LegalHold *bool - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]string - // Optional: Indicates the priority with which to rehydrate an archived blob. - RehydratePriority *RehydratePriority - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Overrides the sealed state of the destination blob. Service version 2019-12-12 and newer. - SealBlob *bool - // Optional. Indicates the tier to be set on the blob. - Tier *AccessTier - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// blobClientUndeleteOptions contains the optional parameters for the blobClient.Undelete method. -type blobClientUndeleteOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// blockBlobClientCommitBlockListOptions contains the optional parameters for the blockBlobClient.CommitBlockList method. -type blockBlobClientCommitBlockListOptions struct { +// BlockBlobClientCommitBlockListOptions contains the optional parameters for the BlockBlobClient.CommitBlockList method. +type BlockBlobClientCommitBlockListOptions struct { // Optional. Used to set blob tags in various blob operations. BlobTagsString *string // Specifies the date time when the blobs immutability policy is set to expire. ImmutabilityPolicyExpiry *time.Time // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *BlobImmutabilityPolicyMode + ImmutabilityPolicyMode *ImmutabilityPolicySetting // Specified if a legal hold should be set on the blob. LegalHold *bool // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the @@ -1533,8 +671,8 @@ type blockBlobClientCommitBlockListOptions struct { TransactionalContentMD5 []byte } -// blockBlobClientGetBlockListOptions contains the optional parameters for the blockBlobClient.GetBlockList method. -type blockBlobClientGetBlockListOptions struct { +// BlockBlobClientGetBlockListOptions contains the optional parameters for the BlockBlobClient.GetBlockList method. +type BlockBlobClientGetBlockListOptions struct { // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage // analytics logging is enabled. RequestID *string @@ -1547,8 +685,8 @@ type blockBlobClientGetBlockListOptions struct { Timeout *int32 } -// blockBlobClientPutBlobFromURLOptions contains the optional parameters for the blockBlobClient.PutBlobFromURL method. -type blockBlobClientPutBlobFromURLOptions struct { +// BlockBlobClientPutBlobFromURLOptions contains the optional parameters for the BlockBlobClient.PutBlobFromURL method. +type BlockBlobClientPutBlobFromURLOptions struct { // Optional. Used to set blob tags in various blob operations. BlobTagsString *string // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. @@ -1576,8 +714,8 @@ type blockBlobClientPutBlobFromURLOptions struct { TransactionalContentMD5 []byte } -// blockBlobClientStageBlockFromURLOptions contains the optional parameters for the blockBlobClient.StageBlockFromURL method. -type blockBlobClientStageBlockFromURLOptions struct { +// BlockBlobClientStageBlockFromURLOptions contains the optional parameters for the BlockBlobClient.StageBlockFromURL method. +type BlockBlobClientStageBlockFromURLOptions struct { // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. CopySourceAuthorization *string // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage @@ -1594,8 +732,8 @@ type blockBlobClientStageBlockFromURLOptions struct { Timeout *int32 } -// blockBlobClientStageBlockOptions contains the optional parameters for the blockBlobClient.StageBlock method. -type blockBlobClientStageBlockOptions struct { +// BlockBlobClientStageBlockOptions contains the optional parameters for the BlockBlobClient.StageBlock method. +type BlockBlobClientStageBlockOptions struct { // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage // analytics logging is enabled. RequestID *string @@ -1608,14 +746,14 @@ type blockBlobClientStageBlockOptions struct { TransactionalContentMD5 []byte } -// blockBlobClientUploadOptions contains the optional parameters for the blockBlobClient.Upload method. -type blockBlobClientUploadOptions struct { +// BlockBlobClientUploadOptions contains the optional parameters for the BlockBlobClient.Upload method. +type BlockBlobClientUploadOptions struct { // Optional. Used to set blob tags in various blob operations. BlobTagsString *string // Specifies the date time when the blobs immutability policy is set to expire. ImmutabilityPolicyExpiry *time.Time // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *BlobImmutabilityPolicyMode + ImmutabilityPolicyMode *ImmutabilityPolicySetting // Specified if a legal hold should be set on the blob. LegalHold *bool // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the @@ -1637,8 +775,27 @@ type blockBlobClientUploadOptions struct { TransactionalContentMD5 []byte } -// containerClientAcquireLeaseOptions contains the optional parameters for the containerClient.AcquireLease method. -type containerClientAcquireLeaseOptions struct { +type BlockList struct { + CommittedBlocks []*Block `xml:"CommittedBlocks>Block"` + UncommittedBlocks []*Block `xml:"UncommittedBlocks>Block"` +} + +type BlockLookupList struct { + Committed []*string `xml:"Committed"` + Latest []*string `xml:"Latest"` + Uncommitted []*string `xml:"Uncommitted"` +} + +type ClearRange struct { + // REQUIRED + End *int64 `xml:"End"` + + // REQUIRED + Start *int64 `xml:"Start"` +} + +// ContainerClientAcquireLeaseOptions contains the optional parameters for the ContainerClient.AcquireLease method. +type ContainerClientAcquireLeaseOptions struct { // Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite lease // can be between 15 and 60 seconds. A lease duration cannot be changed using // renew or change. @@ -1655,8 +812,8 @@ type containerClientAcquireLeaseOptions struct { Timeout *int32 } -// containerClientBreakLeaseOptions contains the optional parameters for the containerClient.BreakLease method. -type containerClientBreakLeaseOptions struct { +// ContainerClientBreakLeaseOptions contains the optional parameters for the ContainerClient.BreakLease method. +type ContainerClientBreakLeaseOptions struct { // For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This // break period is only used if it is shorter than the time remaining on the // lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has @@ -1672,8 +829,8 @@ type containerClientBreakLeaseOptions struct { Timeout *int32 } -// containerClientChangeLeaseOptions contains the optional parameters for the containerClient.ChangeLease method. -type containerClientChangeLeaseOptions struct { +// ContainerClientChangeLeaseOptions contains the optional parameters for the ContainerClient.ChangeLease method. +type ContainerClientChangeLeaseOptions struct { // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage // analytics logging is enabled. RequestID *string @@ -1682,8 +839,8 @@ type containerClientChangeLeaseOptions struct { Timeout *int32 } -// containerClientCreateOptions contains the optional parameters for the containerClient.Create method. -type containerClientCreateOptions struct { +// ContainerClientCreateOptions contains the optional parameters for the ContainerClient.Create method. +type ContainerClientCreateOptions struct { // Specifies whether data in the container may be accessed publicly and the level of access Access *PublicAccessType // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the @@ -1701,8 +858,119 @@ type containerClientCreateOptions struct { Timeout *int32 } -// containerClientDeleteOptions contains the optional parameters for the containerClient.Delete method. -type containerClientDeleteOptions struct { +// ContainerClientDeleteOptions contains the optional parameters for the ContainerClient.Delete method. +type ContainerClientDeleteOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientGetAccessPolicyOptions contains the optional parameters for the ContainerClient.GetAccessPolicy method. +type ContainerClientGetAccessPolicyOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientGetAccountInfoOptions contains the optional parameters for the ContainerClient.GetAccountInfo method. +type ContainerClientGetAccountInfoOptions struct { + // placeholder for future optional parameters +} + +// ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method. +type ContainerClientGetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientListBlobFlatSegmentOptions contains the optional parameters for the ContainerClient.ListBlobFlatSegment +// method. +type ContainerClientListBlobFlatSegmentOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []ListBlobsIncludeItem + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientListBlobHierarchySegmentOptions contains the optional parameters for the ContainerClient.ListBlobHierarchySegment +// method. +type ContainerClientListBlobHierarchySegmentOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []ListBlobsIncludeItem + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientReleaseLeaseOptions contains the optional parameters for the ContainerClient.ReleaseLease method. +type ContainerClientReleaseLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientRenameOptions contains the optional parameters for the ContainerClient.Rename method. +type ContainerClientRenameOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // A lease ID for the source path. If specified, the source path must have an active lease and the lease ID must match. + SourceLeaseID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientRenewLeaseOptions contains the optional parameters for the ContainerClient.RenewLease method. +type ContainerClientRenewLeaseOptions struct { // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage // analytics logging is enabled. RequestID *string @@ -1711,8 +979,12 @@ type containerClientDeleteOptions struct { Timeout *int32 } -// containerClientGetAccessPolicyOptions contains the optional parameters for the containerClient.GetAccessPolicy method. -type containerClientGetAccessPolicyOptions struct { +// ContainerClientRestoreOptions contains the optional parameters for the ContainerClient.Restore method. +type ContainerClientRestoreOptions struct { + // Optional. Version 2019-12-12 and later. Specifies the name of the deleted container to restore. + DeletedContainerName *string + // Optional. Version 2019-12-12 and later. Specifies the version of the deleted container to restore. + DeletedContainerVersion *string // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage // analytics logging is enabled. RequestID *string @@ -1721,13 +993,10 @@ type containerClientGetAccessPolicyOptions struct { Timeout *int32 } -// containerClientGetAccountInfoOptions contains the optional parameters for the containerClient.GetAccountInfo method. -type containerClientGetAccountInfoOptions struct { - // placeholder for future optional parameters -} - -// containerClientGetPropertiesOptions contains the optional parameters for the containerClient.GetProperties method. -type containerClientGetPropertiesOptions struct { +// ContainerClientSetAccessPolicyOptions contains the optional parameters for the ContainerClient.SetAccessPolicy method. +type ContainerClientSetAccessPolicyOptions struct { + // Specifies whether data in the container may be accessed publicly and the level of access + Access *PublicAccessType // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage // analytics logging is enabled. RequestID *string @@ -1736,25 +1005,15 @@ type containerClientGetPropertiesOptions struct { Timeout *int32 } -// containerClientListBlobFlatSegmentOptions contains the optional parameters for the containerClient.ListBlobFlatSegment -// method. -type containerClientListBlobFlatSegmentOptions struct { - // Include this parameter to specify one or more datasets to include in the response. - Include []ListBlobsIncludeItem - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - Maxresults *int32 - // Filters the results to return only containers whose name begins with the specified prefix. - Prefix *string +// ContainerClientSetMetadataOptions contains the optional parameters for the ContainerClient.SetMetadata method. +type ContainerClientSetMetadataOptions struct { + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage // analytics logging is enabled. RequestID *string @@ -1763,25 +1022,8 @@ type containerClientListBlobFlatSegmentOptions struct { Timeout *int32 } -// containerClientListBlobHierarchySegmentOptions contains the optional parameters for the containerClient.ListBlobHierarchySegment -// method. -type containerClientListBlobHierarchySegmentOptions struct { - // Include this parameter to specify one or more datasets to include in the response. - Include []ListBlobsIncludeItem - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - Maxresults *int32 - // Filters the results to return only containers whose name begins with the specified prefix. - Prefix *string +// ContainerClientSubmitBatchOptions contains the optional parameters for the ContainerClient.SubmitBatch method. +type ContainerClientSubmitBatchOptions struct { // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage // analytics logging is enabled. RequestID *string @@ -1790,95 +1032,268 @@ type containerClientListBlobHierarchySegmentOptions struct { Timeout *int32 } -// containerClientReleaseLeaseOptions contains the optional parameters for the containerClient.ReleaseLease method. -type containerClientReleaseLeaseOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} +// ContainerCpkScopeInfo contains a group of parameters for the ContainerClient.Create method. +type ContainerCpkScopeInfo struct { + // Optional. Version 2019-07-07 and later. Specifies the default encryption scope to set on the container and use for all + // future writes. + DefaultEncryptionScope *string + // Optional. Version 2019-07-07 and newer. If true, prevents any request from specifying a different encryption scope than + // the scope set on the container. + PreventEncryptionScopeOverride *bool +} + +// ContainerItem - An Azure Storage container +type ContainerItem struct { + // REQUIRED + Name *string `xml:"Name"` + + // REQUIRED; Properties of a container + Properties *ContainerProperties `xml:"Properties"` + Deleted *bool `xml:"Deleted"` + + // Dictionary of + Metadata map[string]*string `xml:"Metadata"` + Version *string `xml:"Version"` +} + +// ContainerProperties - Properties of a container +type ContainerProperties struct { + // REQUIRED + ETag *azcore.ETag `xml:"Etag"` + + // REQUIRED + LastModified *time.Time `xml:"Last-Modified"` + DefaultEncryptionScope *string `xml:"DefaultEncryptionScope"` + DeletedTime *time.Time `xml:"DeletedTime"` + HasImmutabilityPolicy *bool `xml:"HasImmutabilityPolicy"` + HasLegalHold *bool `xml:"HasLegalHold"` + + // Indicates if version level worm is enabled on this container. + IsImmutableStorageWithVersioningEnabled *bool `xml:"ImmutableStorageWithVersioningEnabled"` + LeaseDuration *LeaseDurationType `xml:"LeaseDuration"` + LeaseState *LeaseStateType `xml:"LeaseState"` + LeaseStatus *LeaseStatusType `xml:"LeaseStatus"` + PreventEncryptionScopeOverride *bool `xml:"DenyEncryptionScopeOverride"` + PublicAccess *PublicAccessType `xml:"PublicAccess"` + RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` +} + +// CorsRule - CORS is an HTTP feature that enables a web application running under one domain to access resources in another +// domain. Web browsers implement a security restriction known as same-origin policy that +// prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin +// domain) to call APIs in another domain +type CorsRule struct { + // REQUIRED; the request headers that the origin domain may specify on the CORS request. + AllowedHeaders *string `xml:"AllowedHeaders"` + + // REQUIRED; The methods (HTTP request verbs) that the origin domain may use for a CORS request. (comma separated) + AllowedMethods *string `xml:"AllowedMethods"` + + // REQUIRED; The origin domains that are permitted to make a request against the storage service via CORS. The origin domain + // is the domain from which the request originates. Note that the origin must be an exact + // case-sensitive match with the origin that the user age sends to the service. You can also use the wildcard character '*' + // to allow all origin domains to make requests via CORS. + AllowedOrigins *string `xml:"AllowedOrigins"` + + // REQUIRED; The response headers that may be sent in the response to the CORS request and exposed by the browser to the request + // issuer + ExposedHeaders *string `xml:"ExposedHeaders"` + + // REQUIRED; The maximum amount time that a browser should cache the preflight OPTIONS request. + MaxAgeInSeconds *int32 `xml:"MaxAgeInSeconds"` +} + +// CpkInfo contains a group of parameters for the BlobClient.Download method. +type CpkInfo struct { + // The algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided + // if the x-ms-encryption-key header is provided. + EncryptionAlgorithm *EncryptionAlgorithmType + // Optional. Specifies the encryption key to use to encrypt the data provided in the request. If not specified, encryption + // is performed with the root account encryption key. For more information, see + // Encryption at Rest for Azure Storage Services. + EncryptionKey *string + // The SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. + EncryptionKeySHA256 *string +} + +// CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +type CpkScopeInfo struct { + // Optional. Version 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data provided + // in the request. If not specified, encryption is performed with the default + // account encryption scope. For more information, see Encryption at Rest for Azure Storage Services. + EncryptionScope *string +} + +// DelimitedTextConfiguration - Groups the settings used for interpreting the blob data if the blob is delimited text formatted. +type DelimitedTextConfiguration struct { + // The string used to separate columns. + ColumnSeparator *string `xml:"ColumnSeparator"` + + // The string used as an escape character. + EscapeChar *string `xml:"EscapeChar"` + + // The string used to quote a specific field. + FieldQuote *string `xml:"FieldQuote"` + + // Represents whether the data has headers. + HeadersPresent *bool `xml:"HasHeaders"` + + // The string used to separate records. + RecordSeparator *string `xml:"RecordSeparator"` +} + +// FilterBlobItem - Blob info from a Filter Blobs API call +type FilterBlobItem struct { + // REQUIRED + ContainerName *string `xml:"ContainerName"` + + // REQUIRED + Name *string `xml:"Name"` + + // Blob tags + Tags *BlobTags `xml:"Tags"` +} + +// FilterBlobSegment - The result of a Filter Blobs API call +type FilterBlobSegment struct { + // REQUIRED + Blobs []*FilterBlobItem `xml:"Blobs>Blob"` + + // REQUIRED + ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` + + // REQUIRED + Where *string `xml:"Where"` + NextMarker *string `xml:"NextMarker"` +} + +// GeoReplication - Geo-Replication information for the Secondary Storage Service +type GeoReplication struct { + // REQUIRED; A GMT date/time value, to the second. All primary writes preceding this value are guaranteed to be available + // for read operations at the secondary. Primary writes after this point in time may or may + // not be available for reads. + LastSyncTime *time.Time `xml:"LastSyncTime"` + + // REQUIRED; The status of the secondary location + Status *BlobGeoReplicationStatus `xml:"Status"` +} + +// JSONTextConfiguration - json text configuration +type JSONTextConfiguration struct { + // The string used to separate records. + RecordSeparator *string `xml:"RecordSeparator"` +} + +// KeyInfo - Key information +type KeyInfo struct { + // REQUIRED; The date-time the key expires in ISO 8601 UTC time + Expiry *string `xml:"Expiry"` + + // REQUIRED; The date-time the key is active in ISO 8601 UTC time + Start *string `xml:"Start"` +} + +// LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +type LeaseAccessConditions struct { + // If specified, the operation only succeeds if the resource's lease is active and matches this ID. + LeaseID *string +} + +// ListBlobsFlatSegmentResponse - An enumeration of blobs +type ListBlobsFlatSegmentResponse struct { + // REQUIRED + ContainerName *string `xml:"ContainerName,attr"` + + // REQUIRED + Segment *BlobFlatListSegment `xml:"Blobs"` + + // REQUIRED + ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` + Marker *string `xml:"Marker"` + MaxResults *int32 `xml:"MaxResults"` + NextMarker *string `xml:"NextMarker"` + Prefix *string `xml:"Prefix"` +} + +// ListBlobsHierarchySegmentResponse - An enumeration of blobs +type ListBlobsHierarchySegmentResponse struct { + // REQUIRED + ContainerName *string `xml:"ContainerName,attr"` + + // REQUIRED + Segment *BlobHierarchyListSegment `xml:"Blobs"` + + // REQUIRED + ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` + Delimiter *string `xml:"Delimiter"` + Marker *string `xml:"Marker"` + MaxResults *int32 `xml:"MaxResults"` + NextMarker *string `xml:"NextMarker"` + Prefix *string `xml:"Prefix"` +} + +// ListContainersSegmentResponse - An enumeration of containers +type ListContainersSegmentResponse struct { + // REQUIRED + ContainerItems []*ContainerItem `xml:"Containers>Container"` + + // REQUIRED + ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` + Marker *string `xml:"Marker"` + MaxResults *int32 `xml:"MaxResults"` + NextMarker *string `xml:"NextMarker"` + Prefix *string `xml:"Prefix"` +} + +// Logging - Azure Analytics Logging settings. +type Logging struct { + // REQUIRED; Indicates whether all delete requests should be logged. + Delete *bool `xml:"Delete"` -// containerClientRenameOptions contains the optional parameters for the containerClient.Rename method. -type containerClientRenameOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // A lease ID for the source path. If specified, the source path must have an active lease and the lease ID must match. - SourceLeaseID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} + // REQUIRED; Indicates whether all read requests should be logged. + Read *bool `xml:"Read"` -// containerClientRenewLeaseOptions contains the optional parameters for the containerClient.RenewLease method. -type containerClientRenewLeaseOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} + // REQUIRED; the retention policy which determines how long the associated data should persist + RetentionPolicy *RetentionPolicy `xml:"RetentionPolicy"` -// containerClientRestoreOptions contains the optional parameters for the containerClient.Restore method. -type containerClientRestoreOptions struct { - // Optional. Version 2019-12-12 and later. Specifies the name of the deleted container to restore. - DeletedContainerName *string - // Optional. Version 2019-12-12 and later. Specifies the version of the deleted container to restore. - DeletedContainerVersion *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} + // REQUIRED; The version of Storage Analytics to configure. + Version *string `xml:"Version"` -// containerClientSetAccessPolicyOptions contains the optional parameters for the containerClient.SetAccessPolicy method. -type containerClientSetAccessPolicyOptions struct { - // Specifies whether data in the container may be accessed publicly and the level of access - Access *PublicAccessType - // the acls for the container - ContainerACL []*SignedIdentifier - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 + // REQUIRED; Indicates whether all write requests should be logged. + Write *bool `xml:"Write"` } -// containerClientSetMetadataOptions contains the optional parameters for the containerClient.SetMetadata method. -type containerClientSetMetadataOptions struct { - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 +// Metrics - a summary of request statistics grouped by API in hour or minute aggregates for blobs +type Metrics struct { + // REQUIRED; Indicates whether metrics are enabled for the Blob service. + Enabled *bool `xml:"Enabled"` + + // Indicates whether metrics should generate summary statistics for called API operations. + IncludeAPIs *bool `xml:"IncludeAPIs"` + + // the retention policy which determines how long the associated data should persist + RetentionPolicy *RetentionPolicy `xml:"RetentionPolicy"` + + // The version of Storage Analytics to configure. + Version *string `xml:"Version"` } -// containerClientSubmitBatchOptions contains the optional parameters for the containerClient.SubmitBatch method. -type containerClientSubmitBatchOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 +// ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +type ModifiedAccessConditions struct { + // Specify an ETag value to operate only on blobs with a matching value. + IfMatch *azcore.ETag + // Specify this header value to operate only on a blob if it has been modified since the specified date/time. + IfModifiedSince *time.Time + // Specify an ETag value to operate only on blobs without a matching value. + IfNoneMatch *azcore.ETag + // Specify a SQL where clause on blob tags to operate only on blobs with a matching value. + IfTags *string + // Specify this header value to operate only on a blob if it has not been modified since the specified date/time. + IfUnmodifiedSince *time.Time } -// pageBlobClientClearPagesOptions contains the optional parameters for the pageBlobClient.ClearPages method. -type pageBlobClientClearPagesOptions struct { +// PageBlobClientClearPagesOptions contains the optional parameters for the PageBlobClient.ClearPages method. +type PageBlobClientClearPagesOptions struct { // Return only the bytes of the blob in the specified range. Range *string // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage @@ -1889,8 +1304,8 @@ type pageBlobClientClearPagesOptions struct { Timeout *int32 } -// pageBlobClientCopyIncrementalOptions contains the optional parameters for the pageBlobClient.CopyIncremental method. -type pageBlobClientCopyIncrementalOptions struct { +// PageBlobClientCopyIncrementalOptions contains the optional parameters for the PageBlobClient.CopyIncremental method. +type PageBlobClientCopyIncrementalOptions struct { // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage // analytics logging is enabled. RequestID *string @@ -1899,8 +1314,8 @@ type pageBlobClientCopyIncrementalOptions struct { Timeout *int32 } -// pageBlobClientCreateOptions contains the optional parameters for the pageBlobClient.Create method. -type pageBlobClientCreateOptions struct { +// PageBlobClientCreateOptions contains the optional parameters for the PageBlobClient.Create method. +type PageBlobClientCreateOptions struct { // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of // the sequence number must be between 0 and 2^63 - 1. BlobSequenceNumber *int64 @@ -1909,7 +1324,7 @@ type pageBlobClientCreateOptions struct { // Specifies the date time when the blobs immutability policy is set to expire. ImmutabilityPolicyExpiry *time.Time // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *BlobImmutabilityPolicyMode + ImmutabilityPolicyMode *ImmutabilityPolicySetting // Specified if a legal hold should be set on the blob. LegalHold *bool // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the @@ -1929,8 +1344,8 @@ type pageBlobClientCreateOptions struct { Timeout *int32 } -// pageBlobClientGetPageRangesDiffOptions contains the optional parameters for the pageBlobClient.GetPageRangesDiff method. -type pageBlobClientGetPageRangesDiffOptions struct { +// PageBlobClientGetPageRangesDiffOptions contains the optional parameters for the PageBlobClient.GetPageRangesDiff method. +type PageBlobClientGetPageRangesDiffOptions struct { // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The // operation returns the NextMarker value within the response body if the listing // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used @@ -1967,8 +1382,8 @@ type pageBlobClientGetPageRangesDiffOptions struct { Timeout *int32 } -// pageBlobClientGetPageRangesOptions contains the optional parameters for the pageBlobClient.GetPageRanges method. -type pageBlobClientGetPageRangesOptions struct { +// PageBlobClientGetPageRangesOptions contains the optional parameters for the PageBlobClient.GetPageRanges method. +type PageBlobClientGetPageRangesOptions struct { // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The // operation returns the NextMarker value within the response body if the listing // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used @@ -1995,8 +1410,8 @@ type pageBlobClientGetPageRangesOptions struct { Timeout *int32 } -// pageBlobClientResizeOptions contains the optional parameters for the pageBlobClient.Resize method. -type pageBlobClientResizeOptions struct { +// PageBlobClientResizeOptions contains the optional parameters for the PageBlobClient.Resize method. +type PageBlobClientResizeOptions struct { // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage // analytics logging is enabled. RequestID *string @@ -2005,9 +1420,9 @@ type pageBlobClientResizeOptions struct { Timeout *int32 } -// pageBlobClientUpdateSequenceNumberOptions contains the optional parameters for the pageBlobClient.UpdateSequenceNumber +// PageBlobClientUpdateSequenceNumberOptions contains the optional parameters for the PageBlobClient.UpdateSequenceNumber // method. -type pageBlobClientUpdateSequenceNumberOptions struct { +type PageBlobClientUpdateSequenceNumberOptions struct { // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of // the sequence number must be between 0 and 2^63 - 1. BlobSequenceNumber *int64 @@ -2019,8 +1434,8 @@ type pageBlobClientUpdateSequenceNumberOptions struct { Timeout *int32 } -// pageBlobClientUploadPagesFromURLOptions contains the optional parameters for the pageBlobClient.UploadPagesFromURL method. -type pageBlobClientUploadPagesFromURLOptions struct { +// PageBlobClientUploadPagesFromURLOptions contains the optional parameters for the PageBlobClient.UploadPagesFromURL method. +type PageBlobClientUploadPagesFromURLOptions struct { // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. CopySourceAuthorization *string // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage @@ -2035,8 +1450,8 @@ type pageBlobClientUploadPagesFromURLOptions struct { Timeout *int32 } -// pageBlobClientUploadPagesOptions contains the optional parameters for the pageBlobClient.UploadPages method. -type pageBlobClientUploadPagesOptions struct { +// PageBlobClientUploadPagesOptions contains the optional parameters for the PageBlobClient.UploadPages method. +type PageBlobClientUploadPagesOptions struct { // Return only the bytes of the blob in the specified range. Range *string // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage @@ -2051,8 +1466,80 @@ type pageBlobClientUploadPagesOptions struct { TransactionalContentMD5 []byte } -// serviceClientFilterBlobsOptions contains the optional parameters for the serviceClient.FilterBlobs method. -type serviceClientFilterBlobsOptions struct { +// PageList - the list of pages +type PageList struct { + ClearRange []*ClearRange `xml:"ClearRange"` + NextMarker *string `xml:"NextMarker"` + PageRange []*PageRange `xml:"PageRange"` +} + +type PageRange struct { + // REQUIRED + End *int64 `xml:"End"` + + // REQUIRED + Start *int64 `xml:"Start"` +} + +type QueryFormat struct { + // REQUIRED; The quick query format type. + Type *QueryFormatType `xml:"Type"` + + // Groups the settings used for formatting the response if the response should be Arrow formatted. + ArrowConfiguration *ArrowConfiguration `xml:"ArrowConfiguration"` + + // Groups the settings used for interpreting the blob data if the blob is delimited text formatted. + DelimitedTextConfiguration *DelimitedTextConfiguration `xml:"DelimitedTextConfiguration"` + + // json text configuration + JSONTextConfiguration *JSONTextConfiguration `xml:"JsonTextConfiguration"` + + // parquet configuration + ParquetTextConfiguration interface{} `xml:"ParquetTextConfiguration"` +} + +// QueryRequest - Groups the set of query request settings. +type QueryRequest struct { + // REQUIRED; The query expression in SQL. The maximum size of the query expression is 256KiB. + Expression *string `xml:"Expression"` + + // CONSTANT; Required. The type of the provided query expression. + // Field has constant value "SQL", any specified value is ignored. + QueryType *string `xml:"QueryType"` + InputSerialization *QuerySerialization `xml:"InputSerialization"` + OutputSerialization *QuerySerialization `xml:"OutputSerialization"` +} + +type QuerySerialization struct { + // REQUIRED + Format *QueryFormat `xml:"Format"` +} + +// RetentionPolicy - the retention policy which determines how long the associated data should persist +type RetentionPolicy struct { + // REQUIRED; Indicates whether a retention policy is enabled for the storage service + Enabled *bool `xml:"Enabled"` + + // Indicates whether permanent delete is allowed on this storage account. + AllowPermanentDelete *bool `xml:"AllowPermanentDelete"` + + // Indicates the number of days that metrics or logging or soft-deleted data should be retained. All data older than this + // value will be deleted + Days *int32 `xml:"Days"` +} + +// SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages method. +type SequenceNumberAccessConditions struct { + // Specify this header value to operate only on a blob if it has the specified sequence number. + IfSequenceNumberEqualTo *int64 + // Specify this header value to operate only on a blob if it has a sequence number less than the specified. + IfSequenceNumberLessThan *int64 + // Specify this header value to operate only on a blob if it has a sequence number less than or equal to the specified. + IfSequenceNumberLessThanOrEqualTo *int64 +} + +// ServiceClientFilterBlobsOptions contains the optional parameters for the ServiceClient.FilterBlobs method. +type ServiceClientFilterBlobsOptions struct { // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The // operation returns the NextMarker value within the response body if the listing // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used @@ -2075,13 +1562,13 @@ type serviceClientFilterBlobsOptions struct { Where *string } -// serviceClientGetAccountInfoOptions contains the optional parameters for the serviceClient.GetAccountInfo method. -type serviceClientGetAccountInfoOptions struct { +// ServiceClientGetAccountInfoOptions contains the optional parameters for the ServiceClient.GetAccountInfo method. +type ServiceClientGetAccountInfoOptions struct { // placeholder for future optional parameters } -// serviceClientGetPropertiesOptions contains the optional parameters for the serviceClient.GetProperties method. -type serviceClientGetPropertiesOptions struct { +// ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method. +type ServiceClientGetPropertiesOptions struct { // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage // analytics logging is enabled. RequestID *string @@ -2090,8 +1577,8 @@ type serviceClientGetPropertiesOptions struct { Timeout *int32 } -// serviceClientGetStatisticsOptions contains the optional parameters for the serviceClient.GetStatistics method. -type serviceClientGetStatisticsOptions struct { +// ServiceClientGetStatisticsOptions contains the optional parameters for the ServiceClient.GetStatistics method. +type ServiceClientGetStatisticsOptions struct { // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage // analytics logging is enabled. RequestID *string @@ -2100,8 +1587,8 @@ type serviceClientGetStatisticsOptions struct { Timeout *int32 } -// serviceClientGetUserDelegationKeyOptions contains the optional parameters for the serviceClient.GetUserDelegationKey method. -type serviceClientGetUserDelegationKeyOptions struct { +// ServiceClientGetUserDelegationKeyOptions contains the optional parameters for the ServiceClient.GetUserDelegationKey method. +type ServiceClientGetUserDelegationKeyOptions struct { // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage // analytics logging is enabled. RequestID *string @@ -2110,9 +1597,9 @@ type serviceClientGetUserDelegationKeyOptions struct { Timeout *int32 } -// serviceClientListContainersSegmentOptions contains the optional parameters for the serviceClient.ListContainersSegment +// ServiceClientListContainersSegmentOptions contains the optional parameters for the ServiceClient.ListContainersSegment // method. -type serviceClientListContainersSegmentOptions struct { +type ServiceClientListContainersSegmentOptions struct { // Include this parameter to specify that the container's metadata be returned as part of the response body. Include []ListContainersIncludeType // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The @@ -2137,8 +1624,8 @@ type serviceClientListContainersSegmentOptions struct { Timeout *int32 } -// serviceClientSetPropertiesOptions contains the optional parameters for the serviceClient.SetProperties method. -type serviceClientSetPropertiesOptions struct { +// ServiceClientSetPropertiesOptions contains the optional parameters for the ServiceClient.SetProperties method. +type ServiceClientSetPropertiesOptions struct { // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage // analytics logging is enabled. RequestID *string @@ -2147,8 +1634,8 @@ type serviceClientSetPropertiesOptions struct { Timeout *int32 } -// serviceClientSubmitBatchOptions contains the optional parameters for the serviceClient.SubmitBatch method. -type serviceClientSubmitBatchOptions struct { +// ServiceClientSubmitBatchOptions contains the optional parameters for the ServiceClient.SubmitBatch method. +type ServiceClientSubmitBatchOptions struct { // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage // analytics logging is enabled. RequestID *string @@ -2156,3 +1643,100 @@ type serviceClientSubmitBatchOptions struct { // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] Timeout *int32 } + +// SignedIdentifier - signed identifier +type SignedIdentifier struct { + // REQUIRED; An Access policy + AccessPolicy *AccessPolicy `xml:"AccessPolicy"` + + // REQUIRED; a unique id + ID *string `xml:"Id"` +} + +// SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL method. +type SourceModifiedAccessConditions struct { + // Specify an ETag value to operate only on blobs with a matching value. + SourceIfMatch *azcore.ETag + // Specify this header value to operate only on a blob if it has been modified since the specified date/time. + SourceIfModifiedSince *time.Time + // Specify an ETag value to operate only on blobs without a matching value. + SourceIfNoneMatch *azcore.ETag + // Specify a SQL where clause on blob tags to operate only on blobs with a matching value. + SourceIfTags *string + // Specify this header value to operate only on a blob if it has not been modified since the specified date/time. + SourceIfUnmodifiedSince *time.Time +} + +// StaticWebsite - The properties that enable an account to host a static website +type StaticWebsite struct { + // REQUIRED; Indicates whether this account is hosting a static website + Enabled *bool `xml:"Enabled"` + + // Absolute path of the default index page + DefaultIndexDocumentPath *string `xml:"DefaultIndexDocumentPath"` + + // The absolute path of the custom 404 page + ErrorDocument404Path *string `xml:"ErrorDocument404Path"` + + // The default name of the index page under each directory + IndexDocument *string `xml:"IndexDocument"` +} + +type StorageError struct { + Message *string `json:"Message,omitempty"` +} + +// StorageServiceProperties - Storage Service Properties. +type StorageServiceProperties struct { + // The set of CORS rules. + Cors []*CorsRule `xml:"Cors>CorsRule"` + + // The default version to use for requests to the Blob service if an incoming request's version is not specified. Possible + // values include version 2008-10-27 and all more recent versions + DefaultServiceVersion *string `xml:"DefaultServiceVersion"` + + // the retention policy which determines how long the associated data should persist + DeleteRetentionPolicy *RetentionPolicy `xml:"DeleteRetentionPolicy"` + + // a summary of request statistics grouped by API in hour or minute aggregates for blobs + HourMetrics *Metrics `xml:"HourMetrics"` + + // Azure Analytics Logging settings. + Logging *Logging `xml:"Logging"` + + // a summary of request statistics grouped by API in hour or minute aggregates for blobs + MinuteMetrics *Metrics `xml:"MinuteMetrics"` + + // The properties that enable an account to host a static website + StaticWebsite *StaticWebsite `xml:"StaticWebsite"` +} + +// StorageServiceStats - Stats for the storage service. +type StorageServiceStats struct { + // Geo-Replication information for the Secondary Storage Service + GeoReplication *GeoReplication `xml:"GeoReplication"` +} + +// UserDelegationKey - A user delegation key +type UserDelegationKey struct { + // REQUIRED; The date-time the key expires + SignedExpiry *time.Time `xml:"SignedExpiry"` + + // REQUIRED; The Azure Active Directory object ID in GUID format. + SignedOID *string `xml:"SignedOid"` + + // REQUIRED; Abbreviation of the Azure Storage service that accepts the key + SignedService *string `xml:"SignedService"` + + // REQUIRED; The date-time the key is active + SignedStart *time.Time `xml:"SignedStart"` + + // REQUIRED; The Azure Active Directory tenant ID in GUID format + SignedTID *string `xml:"SignedTid"` + + // REQUIRED; The service version that created the key + SignedVersion *string `xml:"SignedVersion"` + + // REQUIRED; The key as a base64 string + Value *string `xml:"Value"` +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go new file mode 100644 index 000000000..770e41a08 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go @@ -0,0 +1,481 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "encoding/json" + "encoding/xml" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "reflect" + "time" +) + +// MarshalXML implements the xml.Marshaller interface for type AccessPolicy. +func (a AccessPolicy) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias AccessPolicy + aux := &struct { + *alias + Expiry *timeRFC3339 `xml:"Expiry"` + Start *timeRFC3339 `xml:"Start"` + }{ + alias: (*alias)(&a), + Expiry: (*timeRFC3339)(a.Expiry), + Start: (*timeRFC3339)(a.Start), + } + return e.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type AccessPolicy. +func (a *AccessPolicy) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + type alias AccessPolicy + aux := &struct { + *alias + Expiry *timeRFC3339 `xml:"Expiry"` + Start *timeRFC3339 `xml:"Start"` + }{ + alias: (*alias)(a), + } + if err := d.DecodeElement(aux, &start); err != nil { + return err + } + a.Expiry = (*time.Time)(aux.Expiry) + a.Start = (*time.Time)(aux.Start) + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type ArrowConfiguration. +func (a ArrowConfiguration) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias ArrowConfiguration + aux := &struct { + *alias + Schema *[]*ArrowField `xml:"Schema>Field"` + }{ + alias: (*alias)(&a), + } + if a.Schema != nil { + aux.Schema = &a.Schema + } + return e.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type BlobFlatListSegment. +func (b BlobFlatListSegment) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias BlobFlatListSegment + aux := &struct { + *alias + BlobItems *[]*BlobItemInternal `xml:"Blob"` + }{ + alias: (*alias)(&b), + } + if b.BlobItems != nil { + aux.BlobItems = &b.BlobItems + } + return e.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type BlobHierarchyListSegment. +func (b BlobHierarchyListSegment) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias BlobHierarchyListSegment + aux := &struct { + *alias + BlobItems *[]*BlobItemInternal `xml:"Blob"` + BlobPrefixes *[]*BlobPrefix `xml:"BlobPrefix"` + }{ + alias: (*alias)(&b), + } + if b.BlobItems != nil { + aux.BlobItems = &b.BlobItems + } + if b.BlobPrefixes != nil { + aux.BlobPrefixes = &b.BlobPrefixes + } + return e.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type BlobItemInternal. +func (b *BlobItemInternal) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + type alias BlobItemInternal + aux := &struct { + *alias + Metadata additionalProperties `xml:"Metadata"` + OrMetadata additionalProperties `xml:"OrMetadata"` + }{ + alias: (*alias)(b), + } + if err := d.DecodeElement(aux, &start); err != nil { + return err + } + b.Metadata = (map[string]*string)(aux.Metadata) + b.OrMetadata = (map[string]*string)(aux.OrMetadata) + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type BlobPropertiesInternal. +func (b BlobPropertiesInternal) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias BlobPropertiesInternal + aux := &struct { + *alias + AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"` + ContentMD5 *string `xml:"Content-MD5"` + CopyCompletionTime *timeRFC1123 `xml:"CopyCompletionTime"` + CreationTime *timeRFC1123 `xml:"Creation-Time"` + DeletedTime *timeRFC1123 `xml:"DeletedTime"` + ExpiresOn *timeRFC1123 `xml:"Expiry-Time"` + ImmutabilityPolicyExpiresOn *timeRFC1123 `xml:"ImmutabilityPolicyUntilDate"` + LastAccessedOn *timeRFC1123 `xml:"LastAccessTime"` + LastModified *timeRFC1123 `xml:"Last-Modified"` + }{ + alias: (*alias)(&b), + AccessTierChangeTime: (*timeRFC1123)(b.AccessTierChangeTime), + CopyCompletionTime: (*timeRFC1123)(b.CopyCompletionTime), + CreationTime: (*timeRFC1123)(b.CreationTime), + DeletedTime: (*timeRFC1123)(b.DeletedTime), + ExpiresOn: (*timeRFC1123)(b.ExpiresOn), + ImmutabilityPolicyExpiresOn: (*timeRFC1123)(b.ImmutabilityPolicyExpiresOn), + LastAccessedOn: (*timeRFC1123)(b.LastAccessedOn), + LastModified: (*timeRFC1123)(b.LastModified), + } + if b.ContentMD5 != nil { + encodedContentMD5 := runtime.EncodeByteArray(b.ContentMD5, runtime.Base64StdFormat) + aux.ContentMD5 = &encodedContentMD5 + } + return e.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type BlobPropertiesInternal. +func (b *BlobPropertiesInternal) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + type alias BlobPropertiesInternal + aux := &struct { + *alias + AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"` + ContentMD5 *string `xml:"Content-MD5"` + CopyCompletionTime *timeRFC1123 `xml:"CopyCompletionTime"` + CreationTime *timeRFC1123 `xml:"Creation-Time"` + DeletedTime *timeRFC1123 `xml:"DeletedTime"` + ExpiresOn *timeRFC1123 `xml:"Expiry-Time"` + ImmutabilityPolicyExpiresOn *timeRFC1123 `xml:"ImmutabilityPolicyUntilDate"` + LastAccessedOn *timeRFC1123 `xml:"LastAccessTime"` + LastModified *timeRFC1123 `xml:"Last-Modified"` + }{ + alias: (*alias)(b), + } + if err := d.DecodeElement(aux, &start); err != nil { + return err + } + b.AccessTierChangeTime = (*time.Time)(aux.AccessTierChangeTime) + if aux.ContentMD5 != nil { + if err := runtime.DecodeByteArray(*aux.ContentMD5, &b.ContentMD5, runtime.Base64StdFormat); err != nil { + return err + } + } + b.CopyCompletionTime = (*time.Time)(aux.CopyCompletionTime) + b.CreationTime = (*time.Time)(aux.CreationTime) + b.DeletedTime = (*time.Time)(aux.DeletedTime) + b.ExpiresOn = (*time.Time)(aux.ExpiresOn) + b.ImmutabilityPolicyExpiresOn = (*time.Time)(aux.ImmutabilityPolicyExpiresOn) + b.LastAccessedOn = (*time.Time)(aux.LastAccessedOn) + b.LastModified = (*time.Time)(aux.LastModified) + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type BlobTags. +func (b BlobTags) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + start.Name.Local = "Tags" + type alias BlobTags + aux := &struct { + *alias + BlobTagSet *[]*BlobTag `xml:"TagSet>Tag"` + }{ + alias: (*alias)(&b), + } + if b.BlobTagSet != nil { + aux.BlobTagSet = &b.BlobTagSet + } + return e.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type BlockList. +func (b BlockList) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias BlockList + aux := &struct { + *alias + CommittedBlocks *[]*Block `xml:"CommittedBlocks>Block"` + UncommittedBlocks *[]*Block `xml:"UncommittedBlocks>Block"` + }{ + alias: (*alias)(&b), + } + if b.CommittedBlocks != nil { + aux.CommittedBlocks = &b.CommittedBlocks + } + if b.UncommittedBlocks != nil { + aux.UncommittedBlocks = &b.UncommittedBlocks + } + return e.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type BlockLookupList. +func (b BlockLookupList) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + start.Name.Local = "BlockList" + type alias BlockLookupList + aux := &struct { + *alias + Committed *[]*string `xml:"Committed"` + Latest *[]*string `xml:"Latest"` + Uncommitted *[]*string `xml:"Uncommitted"` + }{ + alias: (*alias)(&b), + } + if b.Committed != nil { + aux.Committed = &b.Committed + } + if b.Latest != nil { + aux.Latest = &b.Latest + } + if b.Uncommitted != nil { + aux.Uncommitted = &b.Uncommitted + } + return e.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type ContainerItem. +func (c *ContainerItem) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + type alias ContainerItem + aux := &struct { + *alias + Metadata additionalProperties `xml:"Metadata"` + }{ + alias: (*alias)(c), + } + if err := d.DecodeElement(aux, &start); err != nil { + return err + } + c.Metadata = (map[string]*string)(aux.Metadata) + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type ContainerProperties. +func (c ContainerProperties) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias ContainerProperties + aux := &struct { + *alias + DeletedTime *timeRFC1123 `xml:"DeletedTime"` + LastModified *timeRFC1123 `xml:"Last-Modified"` + }{ + alias: (*alias)(&c), + DeletedTime: (*timeRFC1123)(c.DeletedTime), + LastModified: (*timeRFC1123)(c.LastModified), + } + return e.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type ContainerProperties. +func (c *ContainerProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + type alias ContainerProperties + aux := &struct { + *alias + DeletedTime *timeRFC1123 `xml:"DeletedTime"` + LastModified *timeRFC1123 `xml:"Last-Modified"` + }{ + alias: (*alias)(c), + } + if err := d.DecodeElement(aux, &start); err != nil { + return err + } + c.DeletedTime = (*time.Time)(aux.DeletedTime) + c.LastModified = (*time.Time)(aux.LastModified) + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type FilterBlobSegment. +func (f FilterBlobSegment) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias FilterBlobSegment + aux := &struct { + *alias + Blobs *[]*FilterBlobItem `xml:"Blobs>Blob"` + }{ + alias: (*alias)(&f), + } + if f.Blobs != nil { + aux.Blobs = &f.Blobs + } + return e.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type GeoReplication. +func (g GeoReplication) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias GeoReplication + aux := &struct { + *alias + LastSyncTime *timeRFC1123 `xml:"LastSyncTime"` + }{ + alias: (*alias)(&g), + LastSyncTime: (*timeRFC1123)(g.LastSyncTime), + } + return e.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type GeoReplication. +func (g *GeoReplication) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + type alias GeoReplication + aux := &struct { + *alias + LastSyncTime *timeRFC1123 `xml:"LastSyncTime"` + }{ + alias: (*alias)(g), + } + if err := d.DecodeElement(aux, &start); err != nil { + return err + } + g.LastSyncTime = (*time.Time)(aux.LastSyncTime) + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type ListContainersSegmentResponse. +func (l ListContainersSegmentResponse) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias ListContainersSegmentResponse + aux := &struct { + *alias + ContainerItems *[]*ContainerItem `xml:"Containers>Container"` + }{ + alias: (*alias)(&l), + } + if l.ContainerItems != nil { + aux.ContainerItems = &l.ContainerItems + } + return e.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type PageList. +func (p PageList) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias PageList + aux := &struct { + *alias + ClearRange *[]*ClearRange `xml:"ClearRange"` + PageRange *[]*PageRange `xml:"PageRange"` + }{ + alias: (*alias)(&p), + } + if p.ClearRange != nil { + aux.ClearRange = &p.ClearRange + } + if p.PageRange != nil { + aux.PageRange = &p.PageRange + } + return e.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type QueryRequest. +func (q QueryRequest) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + start.Name.Local = "QueryRequest" + type alias QueryRequest + aux := &struct { + *alias + }{ + alias: (*alias)(&q), + } + return e.EncodeElement(aux, start) +} + +// MarshalJSON implements the json.Marshaller interface for type StorageError. +func (s StorageError) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "Message", s.Message) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type StorageError. +func (s *StorageError) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "Message": + err = unpopulate(val, "Message", &s.Message) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type StorageServiceProperties. +func (s StorageServiceProperties) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias StorageServiceProperties + aux := &struct { + *alias + Cors *[]*CorsRule `xml:"Cors>CorsRule"` + }{ + alias: (*alias)(&s), + } + if s.Cors != nil { + aux.Cors = &s.Cors + } + return e.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type UserDelegationKey. +func (u UserDelegationKey) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias UserDelegationKey + aux := &struct { + *alias + SignedExpiry *timeRFC3339 `xml:"SignedExpiry"` + SignedStart *timeRFC3339 `xml:"SignedStart"` + }{ + alias: (*alias)(&u), + SignedExpiry: (*timeRFC3339)(u.SignedExpiry), + SignedStart: (*timeRFC3339)(u.SignedStart), + } + return e.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type UserDelegationKey. +func (u *UserDelegationKey) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + type alias UserDelegationKey + aux := &struct { + *alias + SignedExpiry *timeRFC3339 `xml:"SignedExpiry"` + SignedStart *timeRFC3339 `xml:"SignedStart"` + }{ + alias: (*alias)(u), + } + if err := d.DecodeElement(aux, &start); err != nil { + return err + } + u.SignedExpiry = (*time.Time)(aux.SignedExpiry) + u.SignedStart = (*time.Time)(aux.SignedStart) + return nil +} + +func populate(m map[string]interface{}, k string, v interface{}) { + if v == nil { + return + } else if azcore.IsNullValue(v) { + m[k] = nil + } else if !reflect.ValueOf(v).IsNil() { + m[k] = v + } +} + +func unpopulate(data json.RawMessage, fn string, v interface{}) error { + if data == nil { + return nil + } + if err := json.Unmarshal(data, v); err != nil { + return fmt.Errorf("struct field %s: %v", fn, err) + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go new file mode 100644 index 000000000..2747e4081 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go @@ -0,0 +1,1287 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "context" + "encoding/base64" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "io" + "net/http" + "strconv" + "time" +) + +// PageBlobClient contains the methods for the PageBlob group. +// Don't use this type directly, use NewPageBlobClient() instead. +type PageBlobClient struct { + endpoint string + pl runtime.Pipeline +} + +// NewPageBlobClient creates a new instance of PageBlobClient with the specified values. +// endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// pl - the pipeline used for sending requests and handling responses. +func NewPageBlobClient(endpoint string, pl runtime.Pipeline) *PageBlobClient { + client := &PageBlobClient{ + endpoint: endpoint, + pl: pl, + } + return client +} + +// ClearPages - The Clear Pages operation clears a set of pages from a page blob +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// contentLength - The length of the request. +// options - PageBlobClientClearPagesOptions contains the optional parameters for the PageBlobClient.ClearPages method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) ClearPages(ctx context.Context, contentLength int64, options *PageBlobClientClearPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientClearPagesResponse, error) { + req, err := client.clearPagesCreateRequest(ctx, contentLength, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) + if err != nil { + return PageBlobClientClearPagesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return PageBlobClientClearPagesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return PageBlobClientClearPagesResponse{}, runtime.NewResponseError(resp) + } + return client.clearPagesHandleResponse(resp) +} + +// clearPagesCreateRequest creates the ClearPages request. +func (client *PageBlobClient) clearPagesCreateRequest(ctx context.Context, contentLength int64, options *PageBlobClientClearPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "page") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-page-write"] = []string{"clear"} + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo != nil { + req.Raw().Header["x-ms-if-sequence-number-le"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo, 10)} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThan != nil { + req.Raw().Header["x-ms-if-sequence-number-lt"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThan, 10)} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { + req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// clearPagesHandleResponse handles the ClearPages response. +func (client *PageBlobClient) clearPagesHandleResponse(resp *http.Response) (PageBlobClientClearPagesResponse, error) { + result := PageBlobClientClearPagesResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientClearPagesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return PageBlobClientClearPagesResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return PageBlobClientClearPagesResponse{}, err + } + result.XMSContentCRC64 = xMSContentCRC64 + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PageBlobClientClearPagesResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientClearPagesResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// CopyIncremental - The Copy Incremental operation copies a snapshot of the source page blob to a destination page blob. +// The snapshot is copied such that only the differential changes between the previously copied +// snapshot are transferred to the destination. The copied snapshots are complete copies of the original snapshot and can +// be read or copied from as usual. This API is supported since REST version +// 2016-05-31. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies +// a page blob snapshot. The value should be URL-encoded as it would appear in a request +// URI. The source blob must either be public or must be authenticated via a shared access signature. +// options - PageBlobClientCopyIncrementalOptions contains the optional parameters for the PageBlobClient.CopyIncremental +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) CopyIncremental(ctx context.Context, copySource string, options *PageBlobClientCopyIncrementalOptions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientCopyIncrementalResponse, error) { + req, err := client.copyIncrementalCreateRequest(ctx, copySource, options, modifiedAccessConditions) + if err != nil { + return PageBlobClientCopyIncrementalResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return PageBlobClientCopyIncrementalResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return PageBlobClientCopyIncrementalResponse{}, runtime.NewResponseError(resp) + } + return client.copyIncrementalHandleResponse(resp) +} + +// copyIncrementalCreateRequest creates the CopyIncremental request. +func (client *PageBlobClient) copyIncrementalCreateRequest(ctx context.Context, copySource string, options *PageBlobClientCopyIncrementalOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "incrementalcopy") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-copy-source"] = []string{copySource} + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// copyIncrementalHandleResponse handles the CopyIncremental response. +func (client *PageBlobClient) copyIncrementalHandleResponse(resp *http.Response) (PageBlobClientCopyIncrementalResponse, error) { + result := PageBlobClientCopyIncrementalResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientCopyIncrementalResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientCopyIncrementalResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + return result, nil +} + +// Create - The Create operation creates a new page blob. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// contentLength - The length of the request. +// blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned +// to a 512-byte boundary. +// options - PageBlobClientCreateOptions contains the optional parameters for the PageBlobClient.Create method. +// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) Create(ctx context.Context, contentLength int64, blobContentLength int64, options *PageBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientCreateResponse, error) { + req, err := client.createCreateRequest(ctx, contentLength, blobContentLength, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return PageBlobClientCreateResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return PageBlobClientCreateResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return PageBlobClientCreateResponse{}, runtime.NewResponseError(resp) + } + return client.createHandleResponse(resp) +} + +// createCreateRequest creates the Create request. +func (client *PageBlobClient) createCreateRequest(ctx context.Context, contentLength int64, blobContentLength int64, options *PageBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-blob-type"] = []string{"PageBlob"} + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { + req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { + req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + } + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + req.Raw().Header["x-ms-meta-"+k] = []string{v} + } + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-blob-content-length"] = []string{strconv.FormatInt(blobContentLength, 10)} + if options != nil && options.BlobSequenceNumber != nil { + req.Raw().Header["x-ms-blob-sequence-number"] = []string{strconv.FormatInt(*options.BlobSequenceNumber, 10)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)} + } + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + } + if options != nil && options.LegalHold != nil { + req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// createHandleResponse handles the Create response. +func (client *PageBlobClient) createHandleResponse(resp *http.Response) (PageBlobClientCreateResponse, error) { + result := PageBlobClientCreateResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientCreateResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return PageBlobClientCreateResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientCreateResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return PageBlobClientCreateResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// NewGetPageRangesPager - The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot +// of a page blob +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// options - PageBlobClientGetPageRangesOptions contains the optional parameters for the PageBlobClient.GetPageRanges method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) NewGetPageRangesPager(options *PageBlobClientGetPageRangesOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) *runtime.Pager[PageBlobClientGetPageRangesResponse] { + return runtime.NewPager(runtime.PagingHandler[PageBlobClientGetPageRangesResponse]{ + More: func(page PageBlobClientGetPageRangesResponse) bool { + return page.NextMarker != nil && len(*page.NextMarker) > 0 + }, + Fetcher: func(ctx context.Context, page *PageBlobClientGetPageRangesResponse) (PageBlobClientGetPageRangesResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.GetPageRangesCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextMarker) + } + if err != nil { + return PageBlobClientGetPageRangesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return PageBlobClientGetPageRangesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return PageBlobClientGetPageRangesResponse{}, runtime.NewResponseError(resp) + } + return client.GetPageRangesHandleResponse(resp) + }, + }) +} + +// GetPageRangesCreateRequest creates the GetPageRanges request. +func (client *PageBlobClient) GetPageRangesCreateRequest(ctx context.Context, options *PageBlobClientGetPageRangesOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "pagelist") + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// GetPageRangesHandleResponse handles the GetPageRanges response. +func (client *PageBlobClient) GetPageRangesHandleResponse(resp *http.Response) (PageBlobClientGetPageRangesResponse, error) { + result := PageBlobClientGetPageRangesResponse{} + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientGetPageRangesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { + blobContentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PageBlobClientGetPageRangesResponse{}, err + } + result.BlobContentLength = &blobContentLength + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientGetPageRangesResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.PageList); err != nil { + return PageBlobClientGetPageRangesResponse{}, err + } + return result, nil +} + +// NewGetPageRangesDiffPager - The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that +// were changed between target blob and previous snapshot. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// options - PageBlobClientGetPageRangesDiffOptions contains the optional parameters for the PageBlobClient.GetPageRangesDiff +// method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) NewGetPageRangesDiffPager(options *PageBlobClientGetPageRangesDiffOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) *runtime.Pager[PageBlobClientGetPageRangesDiffResponse] { + return runtime.NewPager(runtime.PagingHandler[PageBlobClientGetPageRangesDiffResponse]{ + More: func(page PageBlobClientGetPageRangesDiffResponse) bool { + return page.NextMarker != nil && len(*page.NextMarker) > 0 + }, + Fetcher: func(ctx context.Context, page *PageBlobClientGetPageRangesDiffResponse) (PageBlobClientGetPageRangesDiffResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.GetPageRangesDiffCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextMarker) + } + if err != nil { + return PageBlobClientGetPageRangesDiffResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return PageBlobClientGetPageRangesDiffResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return PageBlobClientGetPageRangesDiffResponse{}, runtime.NewResponseError(resp) + } + return client.GetPageRangesDiffHandleResponse(resp) + }, + }) +} + +// GetPageRangesDiffCreateRequest creates the GetPageRangesDiff request. +func (client *PageBlobClient) GetPageRangesDiffCreateRequest(ctx context.Context, options *PageBlobClientGetPageRangesDiffOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "pagelist") + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.Prevsnapshot != nil { + reqQP.Set("prevsnapshot", *options.Prevsnapshot) + } + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.PrevSnapshotURL != nil { + req.Raw().Header["x-ms-previous-snapshot-url"] = []string{*options.PrevSnapshotURL} + } + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// GetPageRangesDiffHandleResponse handles the GetPageRangesDiff response. +func (client *PageBlobClient) GetPageRangesDiffHandleResponse(resp *http.Response) (PageBlobClientGetPageRangesDiffResponse, error) { + result := PageBlobClientGetPageRangesDiffResponse{} + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientGetPageRangesDiffResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { + blobContentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PageBlobClientGetPageRangesDiffResponse{}, err + } + result.BlobContentLength = &blobContentLength + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientGetPageRangesDiffResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.PageList); err != nil { + return PageBlobClientGetPageRangesDiffResponse{}, err + } + return result, nil +} + +// Resize - Resize the Blob +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned +// to a 512-byte boundary. +// options - PageBlobClientResizeOptions contains the optional parameters for the PageBlobClient.Resize method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) Resize(ctx context.Context, blobContentLength int64, options *PageBlobClientResizeOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientResizeResponse, error) { + req, err := client.resizeCreateRequest(ctx, blobContentLength, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return PageBlobClientResizeResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return PageBlobClientResizeResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return PageBlobClientResizeResponse{}, runtime.NewResponseError(resp) + } + return client.resizeHandleResponse(resp) +} + +// resizeCreateRequest creates the Resize request. +func (client *PageBlobClient) resizeCreateRequest(ctx context.Context, blobContentLength int64, options *PageBlobClientResizeOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "properties") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-blob-content-length"] = []string{strconv.FormatInt(blobContentLength, 10)} + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// resizeHandleResponse handles the Resize response. +func (client *PageBlobClient) resizeHandleResponse(resp *http.Response) (PageBlobClientResizeResponse, error) { + result := PageBlobClientResizeResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientResizeResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PageBlobClientResizeResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientResizeResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// UpdateSequenceNumber - Update the sequence number of the blob +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// sequenceNumberAction - Required if the x-ms-blob-sequence-number header is set for the request. This property applies to +// page blobs only. This property indicates how the service should modify the blob's sequence number +// options - PageBlobClientUpdateSequenceNumberOptions contains the optional parameters for the PageBlobClient.UpdateSequenceNumber +// method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) UpdateSequenceNumber(ctx context.Context, sequenceNumberAction SequenceNumberActionType, options *PageBlobClientUpdateSequenceNumberOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientUpdateSequenceNumberResponse, error) { + req, err := client.updateSequenceNumberCreateRequest(ctx, sequenceNumberAction, options, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return PageBlobClientUpdateSequenceNumberResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return PageBlobClientUpdateSequenceNumberResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return PageBlobClientUpdateSequenceNumberResponse{}, runtime.NewResponseError(resp) + } + return client.updateSequenceNumberHandleResponse(resp) +} + +// updateSequenceNumberCreateRequest creates the UpdateSequenceNumber request. +func (client *PageBlobClient) updateSequenceNumberCreateRequest(ctx context.Context, sequenceNumberAction SequenceNumberActionType, options *PageBlobClientUpdateSequenceNumberOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "properties") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-sequence-number-action"] = []string{string(sequenceNumberAction)} + if options != nil && options.BlobSequenceNumber != nil { + req.Raw().Header["x-ms-blob-sequence-number"] = []string{strconv.FormatInt(*options.BlobSequenceNumber, 10)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// updateSequenceNumberHandleResponse handles the UpdateSequenceNumber response. +func (client *PageBlobClient) updateSequenceNumberHandleResponse(resp *http.Response) (PageBlobClientUpdateSequenceNumberResponse, error) { + result := PageBlobClientUpdateSequenceNumberResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUpdateSequenceNumberResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PageBlobClientUpdateSequenceNumberResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUpdateSequenceNumberResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// UploadPages - The Upload Pages operation writes a range of pages to a page blob +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// contentLength - The length of the request. +// body - Initial data +// options - PageBlobClientUploadPagesOptions contains the optional parameters for the PageBlobClient.UploadPages method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) UploadPages(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *PageBlobClientUploadPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientUploadPagesResponse, error) { + req, err := client.uploadPagesCreateRequest(ctx, contentLength, body, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return PageBlobClientUploadPagesResponse{}, runtime.NewResponseError(resp) + } + return client.uploadPagesHandleResponse(resp) +} + +// uploadPagesCreateRequest creates the UploadPages request. +func (client *PageBlobClient) uploadPagesCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *PageBlobClientUploadPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "page") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-page-write"] = []string{"update"} + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + if options != nil && options.TransactionalContentCRC64 != nil { + req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} + } + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo != nil { + req.Raw().Header["x-ms-if-sequence-number-le"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo, 10)} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThan != nil { + req.Raw().Header["x-ms-if-sequence-number-lt"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThan, 10)} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { + req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, req.SetBody(body, "application/octet-stream") +} + +// uploadPagesHandleResponse handles the UploadPages response. +func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (PageBlobClientUploadPagesResponse, error) { + result := PageBlobClientUploadPagesResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + result.XMSContentCRC64 = xMSContentCRC64 + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// UploadPagesFromURL - The Upload Pages operation writes a range of pages to a page blob where the contents are read from +// a URL +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// sourceURL - Specify a URL to the copy source. +// sourceRange - Bytes of source data in the specified range. The length of this range should match the ContentLength header +// and x-ms-range/Range destination range header. +// contentLength - The length of the request. +// rangeParam - The range of bytes to which the source range would be written. The range should be 512 aligned and range-end +// is required. +// options - PageBlobClientUploadPagesFromURLOptions contains the optional parameters for the PageBlobClient.UploadPagesFromURL +// method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL +// method. +func (client *PageBlobClient) UploadPagesFromURL(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParam string, options *PageBlobClientUploadPagesFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (PageBlobClientUploadPagesFromURLResponse, error) { + req, err := client.uploadPagesFromURLCreateRequest(ctx, sourceURL, sourceRange, contentLength, rangeParam, options, cpkInfo, cpkScopeInfo, leaseAccessConditions, sequenceNumberAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return PageBlobClientUploadPagesFromURLResponse{}, runtime.NewResponseError(resp) + } + return client.uploadPagesFromURLHandleResponse(resp) +} + +// uploadPagesFromURLCreateRequest creates the UploadPagesFromURL request. +func (client *PageBlobClient) uploadPagesFromURLCreateRequest(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParam string, options *PageBlobClientUploadPagesFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "page") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-page-write"] = []string{"update"} + req.Raw().Header["x-ms-copy-source"] = []string{sourceURL} + req.Raw().Header["x-ms-source-range"] = []string{sourceRange} + if options != nil && options.SourceContentMD5 != nil { + req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} + } + if options != nil && options.SourceContentcrc64 != nil { + req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentcrc64)} + } + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + req.Raw().Header["x-ms-range"] = []string{rangeParam} + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo != nil { + req.Raw().Header["x-ms-if-sequence-number-le"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo, 10)} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThan != nil { + req.Raw().Header["x-ms-if-sequence-number-lt"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThan, 10)} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { + req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.CopySourceAuthorization != nil { + req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// uploadPagesFromURLHandleResponse handles the UploadPagesFromURL response. +func (client *PageBlobClient) uploadPagesFromURLHandleResponse(resp *http.Response) (PageBlobClientUploadPagesFromURLResponse, error) { + result := PageBlobClientUploadPagesFromURLResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + result.XMSContentCRC64 = xMSContentCRC64 + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_response_types.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go similarity index 70% rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_response_types.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go index 60c1c0c34..3d2d5d2f9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_response_types.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go @@ -5,23 +5,18 @@ // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. -package azblob +package generated import ( - "net/http" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "io" "time" ) -// appendBlobClientAppendBlockFromURLResponse contains the response from method appendBlobClient.AppendBlockFromURL. -type appendBlobClientAppendBlockFromURLResponse struct { - appendBlobClientAppendBlockFromURLResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// appendBlobClientAppendBlockFromURLResult contains the result from method appendBlobClient.AppendBlockFromURL. -type appendBlobClientAppendBlockFromURLResult struct { +// AppendBlobClientAppendBlockFromURLResponse contains the response from method AppendBlobClient.AppendBlockFromURL. +type AppendBlobClientAppendBlockFromURLResponse struct { // BlobAppendOffset contains the information returned from the x-ms-blob-append-offset header response. BlobAppendOffset *string @@ -35,7 +30,7 @@ type appendBlobClientAppendBlockFromURLResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. EncryptionKeySHA256 *string @@ -59,15 +54,8 @@ type appendBlobClientAppendBlockFromURLResult struct { XMSContentCRC64 []byte } -// appendBlobClientAppendBlockResponse contains the response from method appendBlobClient.AppendBlock. -type appendBlobClientAppendBlockResponse struct { - appendBlobClientAppendBlockResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// appendBlobClientAppendBlockResult contains the result from method appendBlobClient.AppendBlock. -type appendBlobClientAppendBlockResult struct { +// AppendBlobClientAppendBlockResponse contains the response from method AppendBlobClient.AppendBlock. +type AppendBlobClientAppendBlockResponse struct { // BlobAppendOffset contains the information returned from the x-ms-blob-append-offset header response. BlobAppendOffset *string @@ -84,7 +72,7 @@ type appendBlobClientAppendBlockResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. EncryptionKeySHA256 *string @@ -108,15 +96,8 @@ type appendBlobClientAppendBlockResult struct { XMSContentCRC64 []byte } -// appendBlobClientCreateResponse contains the response from method appendBlobClient.Create. -type appendBlobClientCreateResponse struct { - appendBlobClientCreateResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// appendBlobClientCreateResult contains the result from method appendBlobClient.Create. -type appendBlobClientCreateResult struct { +// AppendBlobClientCreateResponse contains the response from method AppendBlobClient.Create. +type AppendBlobClientCreateResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -127,7 +108,7 @@ type appendBlobClientCreateResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. EncryptionKeySHA256 *string @@ -151,15 +132,8 @@ type appendBlobClientCreateResult struct { VersionID *string } -// appendBlobClientSealResponse contains the response from method appendBlobClient.Seal. -type appendBlobClientSealResponse struct { - appendBlobClientSealResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// appendBlobClientSealResult contains the result from method appendBlobClient.Seal. -type appendBlobClientSealResult struct { +// AppendBlobClientSealResponse contains the response from method AppendBlobClient.Seal. +type AppendBlobClientSealResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -167,7 +141,7 @@ type appendBlobClientSealResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // IsSealed contains the information returned from the x-ms-blob-sealed header response. IsSealed *bool @@ -182,15 +156,8 @@ type appendBlobClientSealResult struct { Version *string } -// blobClientAbortCopyFromURLResponse contains the response from method blobClient.AbortCopyFromURL. -type blobClientAbortCopyFromURLResponse struct { - blobClientAbortCopyFromURLResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientAbortCopyFromURLResult contains the result from method blobClient.AbortCopyFromURL. -type blobClientAbortCopyFromURLResult struct { +// BlobClientAbortCopyFromURLResponse contains the response from method BlobClient.AbortCopyFromURL. +type BlobClientAbortCopyFromURLResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -204,15 +171,8 @@ type blobClientAbortCopyFromURLResult struct { Version *string } -// blobClientAcquireLeaseResponse contains the response from method blobClient.AcquireLease. -type blobClientAcquireLeaseResponse struct { - blobClientAcquireLeaseResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientAcquireLeaseResult contains the result from method blobClient.AcquireLease. -type blobClientAcquireLeaseResult struct { +// BlobClientAcquireLeaseResponse contains the response from method BlobClient.AcquireLease. +type BlobClientAcquireLeaseResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -220,7 +180,7 @@ type blobClientAcquireLeaseResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time @@ -235,15 +195,8 @@ type blobClientAcquireLeaseResult struct { Version *string } -// blobClientBreakLeaseResponse contains the response from method blobClient.BreakLease. -type blobClientBreakLeaseResponse struct { - blobClientBreakLeaseResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientBreakLeaseResult contains the result from method blobClient.BreakLease. -type blobClientBreakLeaseResult struct { +// BlobClientBreakLeaseResponse contains the response from method BlobClient.BreakLease. +type BlobClientBreakLeaseResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -251,7 +204,7 @@ type blobClientBreakLeaseResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time @@ -266,15 +219,8 @@ type blobClientBreakLeaseResult struct { Version *string } -// blobClientChangeLeaseResponse contains the response from method blobClient.ChangeLease. -type blobClientChangeLeaseResponse struct { - blobClientChangeLeaseResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientChangeLeaseResult contains the result from method blobClient.ChangeLease. -type blobClientChangeLeaseResult struct { +// BlobClientChangeLeaseResponse contains the response from method BlobClient.ChangeLease. +type BlobClientChangeLeaseResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -282,7 +228,7 @@ type blobClientChangeLeaseResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time @@ -297,15 +243,8 @@ type blobClientChangeLeaseResult struct { Version *string } -// blobClientCopyFromURLResponse contains the response from method blobClient.CopyFromURL. -type blobClientCopyFromURLResponse struct { - blobClientCopyFromURLResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientCopyFromURLResult contains the result from method blobClient.CopyFromURL. -type blobClientCopyFromURLResult struct { +// BlobClientCopyFromURLResponse contains the response from method BlobClient.CopyFromURL. +type BlobClientCopyFromURLResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -322,7 +261,7 @@ type blobClientCopyFromURLResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time @@ -340,15 +279,8 @@ type blobClientCopyFromURLResult struct { XMSContentCRC64 []byte } -// blobClientCreateSnapshotResponse contains the response from method blobClient.CreateSnapshot. -type blobClientCreateSnapshotResponse struct { - blobClientCreateSnapshotResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientCreateSnapshotResult contains the result from method blobClient.CreateSnapshot. -type blobClientCreateSnapshotResult struct { +// BlobClientCreateSnapshotResponse contains the response from method BlobClient.CreateSnapshot. +type BlobClientCreateSnapshotResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -356,7 +288,7 @@ type blobClientCreateSnapshotResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. IsServerEncrypted *bool @@ -377,15 +309,8 @@ type blobClientCreateSnapshotResult struct { VersionID *string } -// blobClientDeleteImmutabilityPolicyResponse contains the response from method blobClient.DeleteImmutabilityPolicy. -type blobClientDeleteImmutabilityPolicyResponse struct { - blobClientDeleteImmutabilityPolicyResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientDeleteImmutabilityPolicyResult contains the result from method blobClient.DeleteImmutabilityPolicy. -type blobClientDeleteImmutabilityPolicyResult struct { +// BlobClientDeleteImmutabilityPolicyResponse contains the response from method BlobClient.DeleteImmutabilityPolicy. +type BlobClientDeleteImmutabilityPolicyResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -399,15 +324,8 @@ type blobClientDeleteImmutabilityPolicyResult struct { Version *string } -// blobClientDeleteResponse contains the response from method blobClient.Delete. -type blobClientDeleteResponse struct { - blobClientDeleteResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientDeleteResult contains the result from method blobClient.Delete. -type blobClientDeleteResult struct { +// BlobClientDeleteResponse contains the response from method BlobClient.Delete. +type BlobClientDeleteResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -421,15 +339,8 @@ type blobClientDeleteResult struct { Version *string } -// blobClientDownloadResponse contains the response from method blobClient.Download. -type blobClientDownloadResponse struct { - blobClientDownloadResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientDownloadResult contains the result from method blobClient.Download. -type blobClientDownloadResult struct { +// BlobClientDownloadResponse contains the response from method BlobClient.Download. +type BlobClientDownloadResponse struct { // AcceptRanges contains the information returned from the Accept-Ranges header response. AcceptRanges *string @@ -445,6 +356,9 @@ type blobClientDownloadResult struct { // BlobType contains the information returned from the x-ms-blob-type header response. BlobType *BlobType + // Body contains the streaming response. + Body io.ReadCloser + // CacheControl contains the information returned from the Cache-Control header response. CacheControl *string @@ -497,7 +411,7 @@ type blobClientDownloadResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. EncryptionKeySHA256 *string @@ -512,7 +426,7 @@ type blobClientDownloadResult struct { ImmutabilityPolicyExpiresOn *time.Time // ImmutabilityPolicyMode contains the information returned from the x-ms-immutability-policy-mode header response. - ImmutabilityPolicyMode *BlobImmutabilityPolicyMode + ImmutabilityPolicyMode *ImmutabilityPolicyMode // IsCurrentVersion contains the information returned from the x-ms-is-current-version header response. IsCurrentVersion *bool @@ -563,15 +477,8 @@ type blobClientDownloadResult struct { VersionID *string } -// blobClientGetAccountInfoResponse contains the response from method blobClient.GetAccountInfo. -type blobClientGetAccountInfoResponse struct { - blobClientGetAccountInfoResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientGetAccountInfoResult contains the result from method blobClient.GetAccountInfo. -type blobClientGetAccountInfoResult struct { +// BlobClientGetAccountInfoResponse contains the response from method BlobClient.GetAccountInfo. +type BlobClientGetAccountInfoResponse struct { // AccountKind contains the information returned from the x-ms-account-kind header response. AccountKind *AccountKind @@ -591,15 +498,8 @@ type blobClientGetAccountInfoResult struct { Version *string } -// blobClientGetPropertiesResponse contains the response from method blobClient.GetProperties. -type blobClientGetPropertiesResponse struct { - blobClientGetPropertiesResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientGetPropertiesResult contains the result from method blobClient.GetProperties. -type blobClientGetPropertiesResult struct { +// BlobClientGetPropertiesResponse contains the response from method BlobClient.GetProperties. +type BlobClientGetPropertiesResponse struct { // AcceptRanges contains the information returned from the Accept-Ranges header response. AcceptRanges *string @@ -676,7 +576,7 @@ type blobClientGetPropertiesResult struct { DestinationSnapshot *string // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. EncryptionKeySHA256 *string @@ -691,7 +591,7 @@ type blobClientGetPropertiesResult struct { ImmutabilityPolicyExpiresOn *time.Time // ImmutabilityPolicyMode contains the information returned from the x-ms-immutability-policy-mode header response. - ImmutabilityPolicyMode *BlobImmutabilityPolicyMode + ImmutabilityPolicyMode *ImmutabilityPolicyMode // IsCurrentVersion contains the information returned from the x-ms-is-current-version header response. IsCurrentVersion *bool @@ -748,15 +648,8 @@ type blobClientGetPropertiesResult struct { VersionID *string } -// blobClientGetTagsResponse contains the response from method blobClient.GetTags. -type blobClientGetTagsResponse struct { - blobClientGetTagsResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientGetTagsResult contains the result from method blobClient.GetTags. -type blobClientGetTagsResult struct { +// BlobClientGetTagsResponse contains the response from method BlobClient.GetTags. +type BlobClientGetTagsResponse struct { BlobTags // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string `xml:"ClientRequestID"` @@ -771,15 +664,8 @@ type blobClientGetTagsResult struct { Version *string `xml:"Version"` } -// blobClientQueryResponse contains the response from method blobClient.Query. -type blobClientQueryResponse struct { - blobClientQueryResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientQueryResult contains the result from method blobClient.Query. -type blobClientQueryResult struct { +// BlobClientQueryResponse contains the response from method BlobClient.Query. +type BlobClientQueryResponse struct { // AcceptRanges contains the information returned from the Accept-Ranges header response. AcceptRanges *string @@ -795,6 +681,9 @@ type blobClientQueryResult struct { // BlobType contains the information returned from the x-ms-blob-type header response. BlobType *BlobType + // Body contains the streaming response. + Body io.ReadCloser + // CacheControl contains the information returned from the Cache-Control header response. CacheControl *string @@ -847,7 +736,7 @@ type blobClientQueryResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. EncryptionKeySHA256 *string @@ -880,15 +769,8 @@ type blobClientQueryResult struct { Version *string } -// blobClientReleaseLeaseResponse contains the response from method blobClient.ReleaseLease. -type blobClientReleaseLeaseResponse struct { - blobClientReleaseLeaseResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientReleaseLeaseResult contains the result from method blobClient.ReleaseLease. -type blobClientReleaseLeaseResult struct { +// BlobClientReleaseLeaseResponse contains the response from method BlobClient.ReleaseLease. +type BlobClientReleaseLeaseResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -896,7 +778,7 @@ type blobClientReleaseLeaseResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time @@ -908,15 +790,8 @@ type blobClientReleaseLeaseResult struct { Version *string } -// blobClientRenewLeaseResponse contains the response from method blobClient.RenewLease. -type blobClientRenewLeaseResponse struct { - blobClientRenewLeaseResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientRenewLeaseResult contains the result from method blobClient.RenewLease. -type blobClientRenewLeaseResult struct { +// BlobClientRenewLeaseResponse contains the response from method BlobClient.RenewLease. +type BlobClientRenewLeaseResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -924,7 +799,7 @@ type blobClientRenewLeaseResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time @@ -939,15 +814,8 @@ type blobClientRenewLeaseResult struct { Version *string } -// blobClientSetExpiryResponse contains the response from method blobClient.SetExpiry. -type blobClientSetExpiryResponse struct { - blobClientSetExpiryResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientSetExpiryResult contains the result from method blobClient.SetExpiry. -type blobClientSetExpiryResult struct { +// BlobClientSetExpiryResponse contains the response from method BlobClient.SetExpiry. +type BlobClientSetExpiryResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -955,7 +823,7 @@ type blobClientSetExpiryResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time @@ -967,15 +835,8 @@ type blobClientSetExpiryResult struct { Version *string } -// blobClientSetHTTPHeadersResponse contains the response from method blobClient.SetHTTPHeaders. -type blobClientSetHTTPHeadersResponse struct { - blobClientSetHTTPHeadersResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientSetHTTPHeadersResult contains the result from method blobClient.SetHTTPHeaders. -type blobClientSetHTTPHeadersResult struct { +// BlobClientSetHTTPHeadersResponse contains the response from method BlobClient.SetHTTPHeaders. +type BlobClientSetHTTPHeadersResponse struct { // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. BlobSequenceNumber *int64 @@ -986,7 +847,7 @@ type blobClientSetHTTPHeadersResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time @@ -998,15 +859,8 @@ type blobClientSetHTTPHeadersResult struct { Version *string } -// blobClientSetImmutabilityPolicyResponse contains the response from method blobClient.SetImmutabilityPolicy. -type blobClientSetImmutabilityPolicyResponse struct { - blobClientSetImmutabilityPolicyResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientSetImmutabilityPolicyResult contains the result from method blobClient.SetImmutabilityPolicy. -type blobClientSetImmutabilityPolicyResult struct { +// BlobClientSetImmutabilityPolicyResponse contains the response from method BlobClient.SetImmutabilityPolicy. +type BlobClientSetImmutabilityPolicyResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1017,7 +871,7 @@ type blobClientSetImmutabilityPolicyResult struct { ImmutabilityPolicyExpiry *time.Time // ImmutabilityPolicyMode contains the information returned from the x-ms-immutability-policy-mode header response. - ImmutabilityPolicyMode *BlobImmutabilityPolicyMode + ImmutabilityPolicyMode *ImmutabilityPolicyMode // RequestID contains the information returned from the x-ms-request-id header response. RequestID *string @@ -1026,15 +880,8 @@ type blobClientSetImmutabilityPolicyResult struct { Version *string } -// blobClientSetLegalHoldResponse contains the response from method blobClient.SetLegalHold. -type blobClientSetLegalHoldResponse struct { - blobClientSetLegalHoldResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientSetLegalHoldResult contains the result from method blobClient.SetLegalHold. -type blobClientSetLegalHoldResult struct { +// BlobClientSetLegalHoldResponse contains the response from method BlobClient.SetLegalHold. +type BlobClientSetLegalHoldResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1051,15 +898,8 @@ type blobClientSetLegalHoldResult struct { Version *string } -// blobClientSetMetadataResponse contains the response from method blobClient.SetMetadata. -type blobClientSetMetadataResponse struct { - blobClientSetMetadataResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientSetMetadataResult contains the result from method blobClient.SetMetadata. -type blobClientSetMetadataResult struct { +// BlobClientSetMetadataResponse contains the response from method BlobClient.SetMetadata. +type BlobClientSetMetadataResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1067,7 +907,7 @@ type blobClientSetMetadataResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. EncryptionKeySHA256 *string @@ -1091,15 +931,8 @@ type blobClientSetMetadataResult struct { VersionID *string } -// blobClientSetTagsResponse contains the response from method blobClient.SetTags. -type blobClientSetTagsResponse struct { - blobClientSetTagsResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientSetTagsResult contains the result from method blobClient.SetTags. -type blobClientSetTagsResult struct { +// BlobClientSetTagsResponse contains the response from method BlobClient.SetTags. +type BlobClientSetTagsResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1113,15 +946,8 @@ type blobClientSetTagsResult struct { Version *string } -// blobClientSetTierResponse contains the response from method blobClient.SetTier. -type blobClientSetTierResponse struct { - blobClientSetTierResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientSetTierResult contains the result from method blobClient.SetTier. -type blobClientSetTierResult struct { +// BlobClientSetTierResponse contains the response from method BlobClient.SetTier. +type BlobClientSetTierResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1132,15 +958,8 @@ type blobClientSetTierResult struct { Version *string } -// blobClientStartCopyFromURLResponse contains the response from method blobClient.StartCopyFromURL. -type blobClientStartCopyFromURLResponse struct { - blobClientStartCopyFromURLResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientStartCopyFromURLResult contains the result from method blobClient.StartCopyFromURL. -type blobClientStartCopyFromURLResult struct { +// BlobClientStartCopyFromURLResponse contains the response from method BlobClient.StartCopyFromURL. +type BlobClientStartCopyFromURLResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1154,7 +973,7 @@ type blobClientStartCopyFromURLResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time @@ -1169,15 +988,8 @@ type blobClientStartCopyFromURLResult struct { VersionID *string } -// blobClientUndeleteResponse contains the response from method blobClient.Undelete. -type blobClientUndeleteResponse struct { - blobClientUndeleteResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blobClientUndeleteResult contains the result from method blobClient.Undelete. -type blobClientUndeleteResult struct { +// BlobClientUndeleteResponse contains the response from method BlobClient.Undelete. +type BlobClientUndeleteResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1191,15 +1003,8 @@ type blobClientUndeleteResult struct { Version *string } -// blockBlobClientCommitBlockListResponse contains the response from method blockBlobClient.CommitBlockList. -type blockBlobClientCommitBlockListResponse struct { - blockBlobClientCommitBlockListResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blockBlobClientCommitBlockListResult contains the result from method blockBlobClient.CommitBlockList. -type blockBlobClientCommitBlockListResult struct { +// BlockBlobClientCommitBlockListResponse contains the response from method BlockBlobClient.CommitBlockList. +type BlockBlobClientCommitBlockListResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1210,7 +1015,7 @@ type blockBlobClientCommitBlockListResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. EncryptionKeySHA256 *string @@ -1237,15 +1042,8 @@ type blockBlobClientCommitBlockListResult struct { XMSContentCRC64 []byte } -// blockBlobClientGetBlockListResponse contains the response from method blockBlobClient.GetBlockList. -type blockBlobClientGetBlockListResponse struct { - blockBlobClientGetBlockListResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blockBlobClientGetBlockListResult contains the result from method blockBlobClient.GetBlockList. -type blockBlobClientGetBlockListResult struct { +// BlockBlobClientGetBlockListResponse contains the response from method BlockBlobClient.GetBlockList. +type BlockBlobClientGetBlockListResponse struct { BlockList // BlobContentLength contains the information returned from the x-ms-blob-content-length header response. BlobContentLength *int64 `xml:"BlobContentLength"` @@ -1260,7 +1058,7 @@ type blockBlobClientGetBlockListResult struct { Date *time.Time `xml:"Date"` // ETag contains the information returned from the ETag header response. - ETag *string `xml:"ETag"` + ETag *azcore.ETag `xml:"ETag"` // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time `xml:"LastModified"` @@ -1272,15 +1070,8 @@ type blockBlobClientGetBlockListResult struct { Version *string `xml:"Version"` } -// blockBlobClientPutBlobFromURLResponse contains the response from method blockBlobClient.PutBlobFromURL. -type blockBlobClientPutBlobFromURLResponse struct { - blockBlobClientPutBlobFromURLResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blockBlobClientPutBlobFromURLResult contains the result from method blockBlobClient.PutBlobFromURL. -type blockBlobClientPutBlobFromURLResult struct { +// BlockBlobClientPutBlobFromURLResponse contains the response from method BlockBlobClient.PutBlobFromURL. +type BlockBlobClientPutBlobFromURLResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1291,7 +1082,7 @@ type blockBlobClientPutBlobFromURLResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. EncryptionKeySHA256 *string @@ -1315,15 +1106,8 @@ type blockBlobClientPutBlobFromURLResult struct { VersionID *string } -// blockBlobClientStageBlockFromURLResponse contains the response from method blockBlobClient.StageBlockFromURL. -type blockBlobClientStageBlockFromURLResponse struct { - blockBlobClientStageBlockFromURLResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blockBlobClientStageBlockFromURLResult contains the result from method blockBlobClient.StageBlockFromURL. -type blockBlobClientStageBlockFromURLResult struct { +// BlockBlobClientStageBlockFromURLResponse contains the response from method BlockBlobClient.StageBlockFromURL. +type BlockBlobClientStageBlockFromURLResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1352,15 +1136,8 @@ type blockBlobClientStageBlockFromURLResult struct { XMSContentCRC64 []byte } -// blockBlobClientStageBlockResponse contains the response from method blockBlobClient.StageBlock. -type blockBlobClientStageBlockResponse struct { - blockBlobClientStageBlockResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blockBlobClientStageBlockResult contains the result from method blockBlobClient.StageBlock. -type blockBlobClientStageBlockResult struct { +// BlockBlobClientStageBlockResponse contains the response from method BlockBlobClient.StageBlock. +type BlockBlobClientStageBlockResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1389,15 +1166,8 @@ type blockBlobClientStageBlockResult struct { XMSContentCRC64 []byte } -// blockBlobClientUploadResponse contains the response from method blockBlobClient.Upload. -type blockBlobClientUploadResponse struct { - blockBlobClientUploadResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// blockBlobClientUploadResult contains the result from method blockBlobClient.Upload. -type blockBlobClientUploadResult struct { +// BlockBlobClientUploadResponse contains the response from method BlockBlobClient.Upload. +type BlockBlobClientUploadResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1408,7 +1178,7 @@ type blockBlobClientUploadResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. EncryptionKeySHA256 *string @@ -1432,15 +1202,8 @@ type blockBlobClientUploadResult struct { VersionID *string } -// containerClientAcquireLeaseResponse contains the response from method containerClient.AcquireLease. -type containerClientAcquireLeaseResponse struct { - containerClientAcquireLeaseResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// containerClientAcquireLeaseResult contains the result from method containerClient.AcquireLease. -type containerClientAcquireLeaseResult struct { +// ContainerClientAcquireLeaseResponse contains the response from method ContainerClient.AcquireLease. +type ContainerClientAcquireLeaseResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1448,7 +1211,7 @@ type containerClientAcquireLeaseResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time @@ -1463,15 +1226,8 @@ type containerClientAcquireLeaseResult struct { Version *string } -// containerClientBreakLeaseResponse contains the response from method containerClient.BreakLease. -type containerClientBreakLeaseResponse struct { - containerClientBreakLeaseResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// containerClientBreakLeaseResult contains the result from method containerClient.BreakLease. -type containerClientBreakLeaseResult struct { +// ContainerClientBreakLeaseResponse contains the response from method ContainerClient.BreakLease. +type ContainerClientBreakLeaseResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1479,7 +1235,7 @@ type containerClientBreakLeaseResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time @@ -1494,15 +1250,8 @@ type containerClientBreakLeaseResult struct { Version *string } -// containerClientChangeLeaseResponse contains the response from method containerClient.ChangeLease. -type containerClientChangeLeaseResponse struct { - containerClientChangeLeaseResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// containerClientChangeLeaseResult contains the result from method containerClient.ChangeLease. -type containerClientChangeLeaseResult struct { +// ContainerClientChangeLeaseResponse contains the response from method ContainerClient.ChangeLease. +type ContainerClientChangeLeaseResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1510,7 +1259,7 @@ type containerClientChangeLeaseResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time @@ -1525,15 +1274,8 @@ type containerClientChangeLeaseResult struct { Version *string } -// containerClientCreateResponse contains the response from method containerClient.Create. -type containerClientCreateResponse struct { - containerClientCreateResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// containerClientCreateResult contains the result from method containerClient.Create. -type containerClientCreateResult struct { +// ContainerClientCreateResponse contains the response from method ContainerClient.Create. +type ContainerClientCreateResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1541,7 +1283,7 @@ type containerClientCreateResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time @@ -1553,15 +1295,8 @@ type containerClientCreateResult struct { Version *string } -// containerClientDeleteResponse contains the response from method containerClient.Delete. -type containerClientDeleteResponse struct { - containerClientDeleteResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// containerClientDeleteResult contains the result from method containerClient.Delete. -type containerClientDeleteResult struct { +// ContainerClientDeleteResponse contains the response from method ContainerClient.Delete. +type ContainerClientDeleteResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1575,15 +1310,8 @@ type containerClientDeleteResult struct { Version *string } -// containerClientGetAccessPolicyResponse contains the response from method containerClient.GetAccessPolicy. -type containerClientGetAccessPolicyResponse struct { - containerClientGetAccessPolicyResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// containerClientGetAccessPolicyResult contains the result from method containerClient.GetAccessPolicy. -type containerClientGetAccessPolicyResult struct { +// ContainerClientGetAccessPolicyResponse contains the response from method ContainerClient.GetAccessPolicy. +type ContainerClientGetAccessPolicyResponse struct { // BlobPublicAccess contains the information returned from the x-ms-blob-public-access header response. BlobPublicAccess *PublicAccessType `xml:"BlobPublicAccess"` @@ -1594,7 +1322,7 @@ type containerClientGetAccessPolicyResult struct { Date *time.Time `xml:"Date"` // ETag contains the information returned from the ETag header response. - ETag *string `xml:"ETag"` + ETag *azcore.ETag `xml:"ETag"` // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time `xml:"LastModified"` @@ -1609,15 +1337,8 @@ type containerClientGetAccessPolicyResult struct { Version *string `xml:"Version"` } -// containerClientGetAccountInfoResponse contains the response from method containerClient.GetAccountInfo. -type containerClientGetAccountInfoResponse struct { - containerClientGetAccountInfoResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// containerClientGetAccountInfoResult contains the result from method containerClient.GetAccountInfo. -type containerClientGetAccountInfoResult struct { +// ContainerClientGetAccountInfoResponse contains the response from method ContainerClient.GetAccountInfo. +type ContainerClientGetAccountInfoResponse struct { // AccountKind contains the information returned from the x-ms-account-kind header response. AccountKind *AccountKind @@ -1637,15 +1358,8 @@ type containerClientGetAccountInfoResult struct { Version *string } -// containerClientGetPropertiesResponse contains the response from method containerClient.GetProperties. -type containerClientGetPropertiesResponse struct { - containerClientGetPropertiesResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// containerClientGetPropertiesResult contains the result from method containerClient.GetProperties. -type containerClientGetPropertiesResult struct { +// ContainerClientGetPropertiesResponse contains the response from method ContainerClient.GetProperties. +type ContainerClientGetPropertiesResponse struct { // BlobPublicAccess contains the information returned from the x-ms-blob-public-access header response. BlobPublicAccess *PublicAccessType @@ -1662,7 +1376,7 @@ type containerClientGetPropertiesResult struct { DenyEncryptionScopeOverride *bool // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // HasImmutabilityPolicy contains the information returned from the x-ms-has-immutability-policy header response. HasImmutabilityPolicy *bool @@ -1696,15 +1410,8 @@ type containerClientGetPropertiesResult struct { Version *string } -// containerClientListBlobFlatSegmentResponse contains the response from method containerClient.ListBlobFlatSegment. -type containerClientListBlobFlatSegmentResponse struct { - containerClientListBlobFlatSegmentResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// containerClientListBlobFlatSegmentResult contains the result from method containerClient.ListBlobFlatSegment. -type containerClientListBlobFlatSegmentResult struct { +// ContainerClientListBlobFlatSegmentResponse contains the response from method ContainerClient.ListBlobFlatSegment. +type ContainerClientListBlobFlatSegmentResponse struct { ListBlobsFlatSegmentResponse // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string `xml:"ClientRequestID"` @@ -1722,15 +1429,8 @@ type containerClientListBlobFlatSegmentResult struct { Version *string `xml:"Version"` } -// containerClientListBlobHierarchySegmentResponse contains the response from method containerClient.ListBlobHierarchySegment. -type containerClientListBlobHierarchySegmentResponse struct { - containerClientListBlobHierarchySegmentResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// containerClientListBlobHierarchySegmentResult contains the result from method containerClient.ListBlobHierarchySegment. -type containerClientListBlobHierarchySegmentResult struct { +// ContainerClientListBlobHierarchySegmentResponse contains the response from method ContainerClient.ListBlobHierarchySegment. +type ContainerClientListBlobHierarchySegmentResponse struct { ListBlobsHierarchySegmentResponse // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string `xml:"ClientRequestID"` @@ -1748,15 +1448,8 @@ type containerClientListBlobHierarchySegmentResult struct { Version *string `xml:"Version"` } -// containerClientReleaseLeaseResponse contains the response from method containerClient.ReleaseLease. -type containerClientReleaseLeaseResponse struct { - containerClientReleaseLeaseResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// containerClientReleaseLeaseResult contains the result from method containerClient.ReleaseLease. -type containerClientReleaseLeaseResult struct { +// ContainerClientReleaseLeaseResponse contains the response from method ContainerClient.ReleaseLease. +type ContainerClientReleaseLeaseResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1764,7 +1457,7 @@ type containerClientReleaseLeaseResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time @@ -1776,15 +1469,8 @@ type containerClientReleaseLeaseResult struct { Version *string } -// containerClientRenameResponse contains the response from method containerClient.Rename. -type containerClientRenameResponse struct { - containerClientRenameResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// containerClientRenameResult contains the result from method containerClient.Rename. -type containerClientRenameResult struct { +// ContainerClientRenameResponse contains the response from method ContainerClient.Rename. +type ContainerClientRenameResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1798,15 +1484,8 @@ type containerClientRenameResult struct { Version *string } -// containerClientRenewLeaseResponse contains the response from method containerClient.RenewLease. -type containerClientRenewLeaseResponse struct { - containerClientRenewLeaseResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// containerClientRenewLeaseResult contains the result from method containerClient.RenewLease. -type containerClientRenewLeaseResult struct { +// ContainerClientRenewLeaseResponse contains the response from method ContainerClient.RenewLease. +type ContainerClientRenewLeaseResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1814,7 +1493,7 @@ type containerClientRenewLeaseResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time @@ -1829,15 +1508,8 @@ type containerClientRenewLeaseResult struct { Version *string } -// containerClientRestoreResponse contains the response from method containerClient.Restore. -type containerClientRestoreResponse struct { - containerClientRestoreResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// containerClientRestoreResult contains the result from method containerClient.Restore. -type containerClientRestoreResult struct { +// ContainerClientRestoreResponse contains the response from method ContainerClient.Restore. +type ContainerClientRestoreResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1851,15 +1523,8 @@ type containerClientRestoreResult struct { Version *string } -// containerClientSetAccessPolicyResponse contains the response from method containerClient.SetAccessPolicy. -type containerClientSetAccessPolicyResponse struct { - containerClientSetAccessPolicyResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// containerClientSetAccessPolicyResult contains the result from method containerClient.SetAccessPolicy. -type containerClientSetAccessPolicyResult struct { +// ContainerClientSetAccessPolicyResponse contains the response from method ContainerClient.SetAccessPolicy. +type ContainerClientSetAccessPolicyResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1867,7 +1532,7 @@ type containerClientSetAccessPolicyResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time @@ -1879,15 +1544,8 @@ type containerClientSetAccessPolicyResult struct { Version *string } -// containerClientSetMetadataResponse contains the response from method containerClient.SetMetadata. -type containerClientSetMetadataResponse struct { - containerClientSetMetadataResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// containerClientSetMetadataResult contains the result from method containerClient.SetMetadata. -type containerClientSetMetadataResult struct { +// ContainerClientSetMetadataResponse contains the response from method ContainerClient.SetMetadata. +type ContainerClientSetMetadataResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1895,7 +1553,7 @@ type containerClientSetMetadataResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time @@ -1907,15 +1565,11 @@ type containerClientSetMetadataResult struct { Version *string } -// containerClientSubmitBatchResponse contains the response from method containerClient.SubmitBatch. -type containerClientSubmitBatchResponse struct { - containerClientSubmitBatchResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} +// ContainerClientSubmitBatchResponse contains the response from method ContainerClient.SubmitBatch. +type ContainerClientSubmitBatchResponse struct { + // Body contains the streaming response. + Body io.ReadCloser -// containerClientSubmitBatchResult contains the result from method containerClient.SubmitBatch. -type containerClientSubmitBatchResult struct { // ContentType contains the information returned from the Content-Type header response. ContentType *string @@ -1926,15 +1580,8 @@ type containerClientSubmitBatchResult struct { Version *string } -// pageBlobClientClearPagesResponse contains the response from method pageBlobClient.ClearPages. -type pageBlobClientClearPagesResponse struct { - pageBlobClientClearPagesResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// pageBlobClientClearPagesResult contains the result from method pageBlobClient.ClearPages. -type pageBlobClientClearPagesResult struct { +// PageBlobClientClearPagesResponse contains the response from method PageBlobClient.ClearPages. +type PageBlobClientClearPagesResponse struct { // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. BlobSequenceNumber *int64 @@ -1948,7 +1595,7 @@ type pageBlobClientClearPagesResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time @@ -1963,15 +1610,8 @@ type pageBlobClientClearPagesResult struct { XMSContentCRC64 []byte } -// pageBlobClientCopyIncrementalResponse contains the response from method pageBlobClient.CopyIncremental. -type pageBlobClientCopyIncrementalResponse struct { - pageBlobClientCopyIncrementalResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// pageBlobClientCopyIncrementalResult contains the result from method pageBlobClient.CopyIncremental. -type pageBlobClientCopyIncrementalResult struct { +// PageBlobClientCopyIncrementalResponse contains the response from method PageBlobClient.CopyIncremental. +type PageBlobClientCopyIncrementalResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -1985,7 +1625,7 @@ type pageBlobClientCopyIncrementalResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time @@ -1997,15 +1637,8 @@ type pageBlobClientCopyIncrementalResult struct { Version *string } -// pageBlobClientCreateResponse contains the response from method pageBlobClient.Create. -type pageBlobClientCreateResponse struct { - pageBlobClientCreateResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// pageBlobClientCreateResult contains the result from method pageBlobClient.Create. -type pageBlobClientCreateResult struct { +// PageBlobClientCreateResponse contains the response from method PageBlobClient.Create. +type PageBlobClientCreateResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -2016,7 +1649,7 @@ type pageBlobClientCreateResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. EncryptionKeySHA256 *string @@ -2040,15 +1673,8 @@ type pageBlobClientCreateResult struct { VersionID *string } -// pageBlobClientGetPageRangesDiffResponse contains the response from method pageBlobClient.GetPageRangesDiff. -type pageBlobClientGetPageRangesDiffResponse struct { - pageBlobClientGetPageRangesDiffResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// pageBlobClientGetPageRangesDiffResult contains the result from method pageBlobClient.GetPageRangesDiff. -type pageBlobClientGetPageRangesDiffResult struct { +// PageBlobClientGetPageRangesDiffResponse contains the response from method PageBlobClient.GetPageRangesDiff. +type PageBlobClientGetPageRangesDiffResponse struct { PageList // BlobContentLength contains the information returned from the x-ms-blob-content-length header response. BlobContentLength *int64 `xml:"BlobContentLength"` @@ -2060,7 +1686,7 @@ type pageBlobClientGetPageRangesDiffResult struct { Date *time.Time `xml:"Date"` // ETag contains the information returned from the ETag header response. - ETag *string `xml:"ETag"` + ETag *azcore.ETag `xml:"ETag"` // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time `xml:"LastModified"` @@ -2072,15 +1698,8 @@ type pageBlobClientGetPageRangesDiffResult struct { Version *string `xml:"Version"` } -// pageBlobClientGetPageRangesResponse contains the response from method pageBlobClient.GetPageRanges. -type pageBlobClientGetPageRangesResponse struct { - pageBlobClientGetPageRangesResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// pageBlobClientGetPageRangesResult contains the result from method pageBlobClient.GetPageRanges. -type pageBlobClientGetPageRangesResult struct { +// PageBlobClientGetPageRangesResponse contains the response from method PageBlobClient.GetPageRanges. +type PageBlobClientGetPageRangesResponse struct { PageList // BlobContentLength contains the information returned from the x-ms-blob-content-length header response. BlobContentLength *int64 `xml:"BlobContentLength"` @@ -2092,7 +1711,7 @@ type pageBlobClientGetPageRangesResult struct { Date *time.Time `xml:"Date"` // ETag contains the information returned from the ETag header response. - ETag *string `xml:"ETag"` + ETag *azcore.ETag `xml:"ETag"` // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time `xml:"LastModified"` @@ -2104,15 +1723,8 @@ type pageBlobClientGetPageRangesResult struct { Version *string `xml:"Version"` } -// pageBlobClientResizeResponse contains the response from method pageBlobClient.Resize. -type pageBlobClientResizeResponse struct { - pageBlobClientResizeResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// pageBlobClientResizeResult contains the result from method pageBlobClient.Resize. -type pageBlobClientResizeResult struct { +// PageBlobClientResizeResponse contains the response from method PageBlobClient.Resize. +type PageBlobClientResizeResponse struct { // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. BlobSequenceNumber *int64 @@ -2123,7 +1735,7 @@ type pageBlobClientResizeResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time @@ -2135,15 +1747,8 @@ type pageBlobClientResizeResult struct { Version *string } -// pageBlobClientUpdateSequenceNumberResponse contains the response from method pageBlobClient.UpdateSequenceNumber. -type pageBlobClientUpdateSequenceNumberResponse struct { - pageBlobClientUpdateSequenceNumberResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// pageBlobClientUpdateSequenceNumberResult contains the result from method pageBlobClient.UpdateSequenceNumber. -type pageBlobClientUpdateSequenceNumberResult struct { +// PageBlobClientUpdateSequenceNumberResponse contains the response from method PageBlobClient.UpdateSequenceNumber. +type PageBlobClientUpdateSequenceNumberResponse struct { // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. BlobSequenceNumber *int64 @@ -2154,7 +1759,7 @@ type pageBlobClientUpdateSequenceNumberResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time @@ -2166,15 +1771,8 @@ type pageBlobClientUpdateSequenceNumberResult struct { Version *string } -// pageBlobClientUploadPagesFromURLResponse contains the response from method pageBlobClient.UploadPagesFromURL. -type pageBlobClientUploadPagesFromURLResponse struct { - pageBlobClientUploadPagesFromURLResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// pageBlobClientUploadPagesFromURLResult contains the result from method pageBlobClient.UploadPagesFromURL. -type pageBlobClientUploadPagesFromURLResult struct { +// PageBlobClientUploadPagesFromURLResponse contains the response from method PageBlobClient.UploadPagesFromURL. +type PageBlobClientUploadPagesFromURLResponse struct { // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. BlobSequenceNumber *int64 @@ -2185,7 +1783,7 @@ type pageBlobClientUploadPagesFromURLResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. EncryptionKeySHA256 *string @@ -2209,15 +1807,8 @@ type pageBlobClientUploadPagesFromURLResult struct { XMSContentCRC64 []byte } -// pageBlobClientUploadPagesResponse contains the response from method pageBlobClient.UploadPages. -type pageBlobClientUploadPagesResponse struct { - pageBlobClientUploadPagesResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// pageBlobClientUploadPagesResult contains the result from method pageBlobClient.UploadPages. -type pageBlobClientUploadPagesResult struct { +// PageBlobClientUploadPagesResponse contains the response from method PageBlobClient.UploadPages. +type PageBlobClientUploadPagesResponse struct { // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. BlobSequenceNumber *int64 @@ -2231,7 +1822,7 @@ type pageBlobClientUploadPagesResult struct { Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *string + ETag *azcore.ETag // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. EncryptionKeySHA256 *string @@ -2255,15 +1846,8 @@ type pageBlobClientUploadPagesResult struct { XMSContentCRC64 []byte } -// serviceClientFilterBlobsResponse contains the response from method serviceClient.FilterBlobs. -type serviceClientFilterBlobsResponse struct { - serviceClientFilterBlobsResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// serviceClientFilterBlobsResult contains the result from method serviceClient.FilterBlobs. -type serviceClientFilterBlobsResult struct { +// ServiceClientFilterBlobsResponse contains the response from method ServiceClient.FilterBlobs. +type ServiceClientFilterBlobsResponse struct { FilterBlobSegment // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string `xml:"ClientRequestID"` @@ -2278,15 +1862,8 @@ type serviceClientFilterBlobsResult struct { Version *string `xml:"Version"` } -// serviceClientGetAccountInfoResponse contains the response from method serviceClient.GetAccountInfo. -type serviceClientGetAccountInfoResponse struct { - serviceClientGetAccountInfoResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// serviceClientGetAccountInfoResult contains the result from method serviceClient.GetAccountInfo. -type serviceClientGetAccountInfoResult struct { +// ServiceClientGetAccountInfoResponse contains the response from method ServiceClient.GetAccountInfo. +type ServiceClientGetAccountInfoResponse struct { // AccountKind contains the information returned from the x-ms-account-kind header response. AccountKind *AccountKind @@ -2309,15 +1886,8 @@ type serviceClientGetAccountInfoResult struct { Version *string } -// serviceClientGetPropertiesResponse contains the response from method serviceClient.GetProperties. -type serviceClientGetPropertiesResponse struct { - serviceClientGetPropertiesResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// serviceClientGetPropertiesResult contains the result from method serviceClient.GetProperties. -type serviceClientGetPropertiesResult struct { +// ServiceClientGetPropertiesResponse contains the response from method ServiceClient.GetProperties. +type ServiceClientGetPropertiesResponse struct { StorageServiceProperties // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string `xml:"ClientRequestID"` @@ -2329,15 +1899,8 @@ type serviceClientGetPropertiesResult struct { Version *string `xml:"Version"` } -// serviceClientGetStatisticsResponse contains the response from method serviceClient.GetStatistics. -type serviceClientGetStatisticsResponse struct { - serviceClientGetStatisticsResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// serviceClientGetStatisticsResult contains the result from method serviceClient.GetStatistics. -type serviceClientGetStatisticsResult struct { +// ServiceClientGetStatisticsResponse contains the response from method ServiceClient.GetStatistics. +type ServiceClientGetStatisticsResponse struct { StorageServiceStats // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string `xml:"ClientRequestID"` @@ -2352,15 +1915,8 @@ type serviceClientGetStatisticsResult struct { Version *string `xml:"Version"` } -// serviceClientGetUserDelegationKeyResponse contains the response from method serviceClient.GetUserDelegationKey. -type serviceClientGetUserDelegationKeyResponse struct { - serviceClientGetUserDelegationKeyResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// serviceClientGetUserDelegationKeyResult contains the result from method serviceClient.GetUserDelegationKey. -type serviceClientGetUserDelegationKeyResult struct { +// ServiceClientGetUserDelegationKeyResponse contains the response from method ServiceClient.GetUserDelegationKey. +type ServiceClientGetUserDelegationKeyResponse struct { UserDelegationKey // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string `xml:"ClientRequestID"` @@ -2375,15 +1931,8 @@ type serviceClientGetUserDelegationKeyResult struct { Version *string `xml:"Version"` } -// serviceClientListContainersSegmentResponse contains the response from method serviceClient.ListContainersSegment. -type serviceClientListContainersSegmentResponse struct { - serviceClientListContainersSegmentResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// serviceClientListContainersSegmentResult contains the result from method serviceClient.ListContainersSegment. -type serviceClientListContainersSegmentResult struct { +// ServiceClientListContainersSegmentResponse contains the response from method ServiceClient.ListContainersSegment. +type ServiceClientListContainersSegmentResponse struct { ListContainersSegmentResponse // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string `xml:"ClientRequestID"` @@ -2395,15 +1944,8 @@ type serviceClientListContainersSegmentResult struct { Version *string `xml:"Version"` } -// serviceClientSetPropertiesResponse contains the response from method serviceClient.SetProperties. -type serviceClientSetPropertiesResponse struct { - serviceClientSetPropertiesResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} - -// serviceClientSetPropertiesResult contains the result from method serviceClient.SetProperties. -type serviceClientSetPropertiesResult struct { +// ServiceClientSetPropertiesResponse contains the response from method ServiceClient.SetProperties. +type ServiceClientSetPropertiesResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string @@ -2414,15 +1956,11 @@ type serviceClientSetPropertiesResult struct { Version *string } -// serviceClientSubmitBatchResponse contains the response from method serviceClient.SubmitBatch. -type serviceClientSubmitBatchResponse struct { - serviceClientSubmitBatchResult - // RawResponse contains the underlying HTTP response. - RawResponse *http.Response -} +// ServiceClientSubmitBatchResponse contains the response from method ServiceClient.SubmitBatch. +type ServiceClientSubmitBatchResponse struct { + // Body contains the streaming response. + Body io.ReadCloser -// serviceClientSubmitBatchResult contains the result from method serviceClient.SubmitBatch. -type serviceClientSubmitBatchResult struct { // ContentType contains the information returned from the Content-Type header response. ContentType *string diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_service_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go similarity index 62% rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_service_client.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go index 7dcf6ef13..1cb779d84 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_service_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go @@ -5,8 +5,9 @@ // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. -package azblob +package generated import ( "context" @@ -20,16 +21,18 @@ import ( "time" ) -type serviceClient struct { +// ServiceClient contains the methods for the Service group. +// Don't use this type directly, use NewServiceClient() instead. +type ServiceClient struct { endpoint string pl runtime.Pipeline } -// newServiceClient creates a new instance of serviceClient with the specified values. +// NewServiceClient creates a new instance of ServiceClient with the specified values. // endpoint - The URL of the service account, container, or blob that is the target of the desired operation. // pl - the pipeline used for sending requests and handling responses. -func newServiceClient(endpoint string, pl runtime.Pipeline) *serviceClient { - client := &serviceClient{ +func NewServiceClient(endpoint string, pl runtime.Pipeline) *ServiceClient { + client := &ServiceClient{ endpoint: endpoint, pl: pl, } @@ -40,24 +43,25 @@ func newServiceClient(endpoint string, pl runtime.Pipeline) *serviceClient { // expression. Filter blobs searches across all containers within a storage account but can // be scoped within the expression to a single container. // If the operation fails it returns an *azcore.ResponseError type. -// options - serviceClientFilterBlobsOptions contains the optional parameters for the serviceClient.FilterBlobs method. -func (client *serviceClient) FilterBlobs(ctx context.Context, options *serviceClientFilterBlobsOptions) (serviceClientFilterBlobsResponse, error) { +// Generated from API version 2020-10-02 +// options - ServiceClientFilterBlobsOptions contains the optional parameters for the ServiceClient.FilterBlobs method. +func (client *ServiceClient) FilterBlobs(ctx context.Context, options *ServiceClientFilterBlobsOptions) (ServiceClientFilterBlobsResponse, error) { req, err := client.filterBlobsCreateRequest(ctx, options) if err != nil { - return serviceClientFilterBlobsResponse{}, err + return ServiceClientFilterBlobsResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return serviceClientFilterBlobsResponse{}, err + return ServiceClientFilterBlobsResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return serviceClientFilterBlobsResponse{}, runtime.NewResponseError(resp) + return ServiceClientFilterBlobsResponse{}, runtime.NewResponseError(resp) } return client.filterBlobsHandleResponse(resp) } // filterBlobsCreateRequest creates the FilterBlobs request. -func (client *serviceClient) filterBlobsCreateRequest(ctx context.Context, options *serviceClientFilterBlobsOptions) (*policy.Request, error) { +func (client *ServiceClient) filterBlobsCreateRequest(ctx context.Context, options *ServiceClientFilterBlobsOptions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) if err != nil { return nil, err @@ -77,17 +81,17 @@ func (client *serviceClient) filterBlobsCreateRequest(ctx context.Context, optio reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-version", "2020-10-02") + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} if options != nil && options.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // filterBlobsHandleResponse handles the FilterBlobs response. -func (client *serviceClient) filterBlobsHandleResponse(resp *http.Response) (serviceClientFilterBlobsResponse, error) { - result := serviceClientFilterBlobsResponse{RawResponse: resp} +func (client *ServiceClient) filterBlobsHandleResponse(resp *http.Response) (ServiceClientFilterBlobsResponse, error) { + result := ServiceClientFilterBlobsResponse{} if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } @@ -100,36 +104,37 @@ func (client *serviceClient) filterBlobsHandleResponse(resp *http.Response) (ser if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return serviceClientFilterBlobsResponse{}, err + return ServiceClientFilterBlobsResponse{}, err } result.Date = &date } if err := runtime.UnmarshalAsXML(resp, &result.FilterBlobSegment); err != nil { - return serviceClientFilterBlobsResponse{}, err + return ServiceClientFilterBlobsResponse{}, err } return result, nil } // GetAccountInfo - Returns the sku name and account kind // If the operation fails it returns an *azcore.ResponseError type. -// options - serviceClientGetAccountInfoOptions contains the optional parameters for the serviceClient.GetAccountInfo method. -func (client *serviceClient) GetAccountInfo(ctx context.Context, options *serviceClientGetAccountInfoOptions) (serviceClientGetAccountInfoResponse, error) { +// Generated from API version 2020-10-02 +// options - ServiceClientGetAccountInfoOptions contains the optional parameters for the ServiceClient.GetAccountInfo method. +func (client *ServiceClient) GetAccountInfo(ctx context.Context, options *ServiceClientGetAccountInfoOptions) (ServiceClientGetAccountInfoResponse, error) { req, err := client.getAccountInfoCreateRequest(ctx, options) if err != nil { - return serviceClientGetAccountInfoResponse{}, err + return ServiceClientGetAccountInfoResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return serviceClientGetAccountInfoResponse{}, err + return ServiceClientGetAccountInfoResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return serviceClientGetAccountInfoResponse{}, runtime.NewResponseError(resp) + return ServiceClientGetAccountInfoResponse{}, runtime.NewResponseError(resp) } return client.getAccountInfoHandleResponse(resp) } // getAccountInfoCreateRequest creates the GetAccountInfo request. -func (client *serviceClient) getAccountInfoCreateRequest(ctx context.Context, options *serviceClientGetAccountInfoOptions) (*policy.Request, error) { +func (client *ServiceClient) getAccountInfoCreateRequest(ctx context.Context, options *ServiceClientGetAccountInfoOptions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) if err != nil { return nil, err @@ -138,14 +143,14 @@ func (client *serviceClient) getAccountInfoCreateRequest(ctx context.Context, op reqQP.Set("restype", "account") reqQP.Set("comp", "properties") req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-version", "2020-10-02") - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // getAccountInfoHandleResponse handles the GetAccountInfo response. -func (client *serviceClient) getAccountInfoHandleResponse(resp *http.Response) (serviceClientGetAccountInfoResponse, error) { - result := serviceClientGetAccountInfoResponse{RawResponse: resp} +func (client *ServiceClient) getAccountInfoHandleResponse(resp *http.Response) (ServiceClientGetAccountInfoResponse, error) { + result := ServiceClientGetAccountInfoResponse{} if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } @@ -158,7 +163,7 @@ func (client *serviceClient) getAccountInfoHandleResponse(resp *http.Response) ( if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return serviceClientGetAccountInfoResponse{}, err + return ServiceClientGetAccountInfoResponse{}, err } result.Date = &date } @@ -171,7 +176,7 @@ func (client *serviceClient) getAccountInfoHandleResponse(resp *http.Response) ( if val := resp.Header.Get("x-ms-is-hns-enabled"); val != "" { isHierarchicalNamespaceEnabled, err := strconv.ParseBool(val) if err != nil { - return serviceClientGetAccountInfoResponse{}, err + return ServiceClientGetAccountInfoResponse{}, err } result.IsHierarchicalNamespaceEnabled = &isHierarchicalNamespaceEnabled } @@ -181,24 +186,25 @@ func (client *serviceClient) getAccountInfoHandleResponse(resp *http.Response) ( // GetProperties - gets the properties of a storage account's Blob service, including properties for Storage Analytics and // CORS (Cross-Origin Resource Sharing) rules. // If the operation fails it returns an *azcore.ResponseError type. -// options - serviceClientGetPropertiesOptions contains the optional parameters for the serviceClient.GetProperties method. -func (client *serviceClient) GetProperties(ctx context.Context, options *serviceClientGetPropertiesOptions) (serviceClientGetPropertiesResponse, error) { +// Generated from API version 2020-10-02 +// options - ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method. +func (client *ServiceClient) GetProperties(ctx context.Context, options *ServiceClientGetPropertiesOptions) (ServiceClientGetPropertiesResponse, error) { req, err := client.getPropertiesCreateRequest(ctx, options) if err != nil { - return serviceClientGetPropertiesResponse{}, err + return ServiceClientGetPropertiesResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return serviceClientGetPropertiesResponse{}, err + return ServiceClientGetPropertiesResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return serviceClientGetPropertiesResponse{}, runtime.NewResponseError(resp) + return ServiceClientGetPropertiesResponse{}, runtime.NewResponseError(resp) } return client.getPropertiesHandleResponse(resp) } // getPropertiesCreateRequest creates the GetProperties request. -func (client *serviceClient) getPropertiesCreateRequest(ctx context.Context, options *serviceClientGetPropertiesOptions) (*policy.Request, error) { +func (client *ServiceClient) getPropertiesCreateRequest(ctx context.Context, options *ServiceClientGetPropertiesOptions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) if err != nil { return nil, err @@ -210,17 +216,17 @@ func (client *serviceClient) getPropertiesCreateRequest(ctx context.Context, opt reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-version", "2020-10-02") + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} if options != nil && options.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // getPropertiesHandleResponse handles the GetProperties response. -func (client *serviceClient) getPropertiesHandleResponse(resp *http.Response) (serviceClientGetPropertiesResponse, error) { - result := serviceClientGetPropertiesResponse{RawResponse: resp} +func (client *ServiceClient) getPropertiesHandleResponse(resp *http.Response) (ServiceClientGetPropertiesResponse, error) { + result := ServiceClientGetPropertiesResponse{} if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } @@ -231,7 +237,7 @@ func (client *serviceClient) getPropertiesHandleResponse(resp *http.Response) (s result.Version = &val } if err := runtime.UnmarshalAsXML(resp, &result.StorageServiceProperties); err != nil { - return serviceClientGetPropertiesResponse{}, err + return ServiceClientGetPropertiesResponse{}, err } return result, nil } @@ -239,24 +245,25 @@ func (client *serviceClient) getPropertiesHandleResponse(resp *http.Response) (s // GetStatistics - Retrieves statistics related to replication for the Blob service. It is only available on the secondary // location endpoint when read-access geo-redundant replication is enabled for the storage account. // If the operation fails it returns an *azcore.ResponseError type. -// options - serviceClientGetStatisticsOptions contains the optional parameters for the serviceClient.GetStatistics method. -func (client *serviceClient) GetStatistics(ctx context.Context, options *serviceClientGetStatisticsOptions) (serviceClientGetStatisticsResponse, error) { +// Generated from API version 2020-10-02 +// options - ServiceClientGetStatisticsOptions contains the optional parameters for the ServiceClient.GetStatistics method. +func (client *ServiceClient) GetStatistics(ctx context.Context, options *ServiceClientGetStatisticsOptions) (ServiceClientGetStatisticsResponse, error) { req, err := client.getStatisticsCreateRequest(ctx, options) if err != nil { - return serviceClientGetStatisticsResponse{}, err + return ServiceClientGetStatisticsResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return serviceClientGetStatisticsResponse{}, err + return ServiceClientGetStatisticsResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return serviceClientGetStatisticsResponse{}, runtime.NewResponseError(resp) + return ServiceClientGetStatisticsResponse{}, runtime.NewResponseError(resp) } return client.getStatisticsHandleResponse(resp) } // getStatisticsCreateRequest creates the GetStatistics request. -func (client *serviceClient) getStatisticsCreateRequest(ctx context.Context, options *serviceClientGetStatisticsOptions) (*policy.Request, error) { +func (client *ServiceClient) getStatisticsCreateRequest(ctx context.Context, options *ServiceClientGetStatisticsOptions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) if err != nil { return nil, err @@ -268,17 +275,17 @@ func (client *serviceClient) getStatisticsCreateRequest(ctx context.Context, opt reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-version", "2020-10-02") + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} if options != nil && options.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // getStatisticsHandleResponse handles the GetStatistics response. -func (client *serviceClient) getStatisticsHandleResponse(resp *http.Response) (serviceClientGetStatisticsResponse, error) { - result := serviceClientGetStatisticsResponse{RawResponse: resp} +func (client *ServiceClient) getStatisticsHandleResponse(resp *http.Response) (ServiceClientGetStatisticsResponse, error) { + result := ServiceClientGetStatisticsResponse{} if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } @@ -291,12 +298,12 @@ func (client *serviceClient) getStatisticsHandleResponse(resp *http.Response) (s if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return serviceClientGetStatisticsResponse{}, err + return ServiceClientGetStatisticsResponse{}, err } result.Date = &date } if err := runtime.UnmarshalAsXML(resp, &result.StorageServiceStats); err != nil { - return serviceClientGetStatisticsResponse{}, err + return ServiceClientGetStatisticsResponse{}, err } return result, nil } @@ -304,26 +311,27 @@ func (client *serviceClient) getStatisticsHandleResponse(resp *http.Response) (s // GetUserDelegationKey - Retrieves a user delegation key for the Blob service. This is only a valid operation when using // bearer token authentication. // If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 // keyInfo - Key information -// options - serviceClientGetUserDelegationKeyOptions contains the optional parameters for the serviceClient.GetUserDelegationKey +// options - ServiceClientGetUserDelegationKeyOptions contains the optional parameters for the ServiceClient.GetUserDelegationKey // method. -func (client *serviceClient) GetUserDelegationKey(ctx context.Context, keyInfo KeyInfo, options *serviceClientGetUserDelegationKeyOptions) (serviceClientGetUserDelegationKeyResponse, error) { +func (client *ServiceClient) GetUserDelegationKey(ctx context.Context, keyInfo KeyInfo, options *ServiceClientGetUserDelegationKeyOptions) (ServiceClientGetUserDelegationKeyResponse, error) { req, err := client.getUserDelegationKeyCreateRequest(ctx, keyInfo, options) if err != nil { - return serviceClientGetUserDelegationKeyResponse{}, err + return ServiceClientGetUserDelegationKeyResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return serviceClientGetUserDelegationKeyResponse{}, err + return ServiceClientGetUserDelegationKeyResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return serviceClientGetUserDelegationKeyResponse{}, runtime.NewResponseError(resp) + return ServiceClientGetUserDelegationKeyResponse{}, runtime.NewResponseError(resp) } return client.getUserDelegationKeyHandleResponse(resp) } // getUserDelegationKeyCreateRequest creates the GetUserDelegationKey request. -func (client *serviceClient) getUserDelegationKeyCreateRequest(ctx context.Context, keyInfo KeyInfo, options *serviceClientGetUserDelegationKeyOptions) (*policy.Request, error) { +func (client *ServiceClient) getUserDelegationKeyCreateRequest(ctx context.Context, keyInfo KeyInfo, options *ServiceClientGetUserDelegationKeyOptions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint) if err != nil { return nil, err @@ -335,17 +343,17 @@ func (client *serviceClient) getUserDelegationKeyCreateRequest(ctx context.Conte reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-version", "2020-10-02") + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} if options != nil && options.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, runtime.MarshalAsXML(req, keyInfo) } // getUserDelegationKeyHandleResponse handles the GetUserDelegationKey response. -func (client *serviceClient) getUserDelegationKeyHandleResponse(resp *http.Response) (serviceClientGetUserDelegationKeyResponse, error) { - result := serviceClientGetUserDelegationKeyResponse{RawResponse: resp} +func (client *ServiceClient) getUserDelegationKeyHandleResponse(resp *http.Response) (ServiceClientGetUserDelegationKeyResponse, error) { + result := ServiceClientGetUserDelegationKeyResponse{} if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } @@ -358,34 +366,24 @@ func (client *serviceClient) getUserDelegationKeyHandleResponse(resp *http.Respo if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return serviceClientGetUserDelegationKeyResponse{}, err + return ServiceClientGetUserDelegationKeyResponse{}, err } result.Date = &date } if err := runtime.UnmarshalAsXML(resp, &result.UserDelegationKey); err != nil { - return serviceClientGetUserDelegationKeyResponse{}, err + return ServiceClientGetUserDelegationKeyResponse{}, err } return result, nil } -// ListContainersSegment - The List Containers Segment operation returns a list of the containers under the specified account +// NewListContainersSegmentPager - The List Containers Segment operation returns a list of the containers under the specified +// account // If the operation fails it returns an *azcore.ResponseError type. -// options - serviceClientListContainersSegmentOptions contains the optional parameters for the serviceClient.ListContainersSegment +// Generated from API version 2020-10-02 +// options - ServiceClientListContainersSegmentOptions contains the optional parameters for the ServiceClient.ListContainersSegment // method. -func (client *serviceClient) ListContainersSegment(options *serviceClientListContainersSegmentOptions) *serviceClientListContainersSegmentPager { - return &serviceClientListContainersSegmentPager{ - client: client, - requester: func(ctx context.Context) (*policy.Request, error) { - return client.listContainersSegmentCreateRequest(ctx, options) - }, - advancer: func(ctx context.Context, resp serviceClientListContainersSegmentResponse) (*policy.Request, error) { - return runtime.NewRequest(ctx, http.MethodGet, *resp.ListContainersSegmentResponse.NextMarker) - }, - } -} - // listContainersSegmentCreateRequest creates the ListContainersSegment request. -func (client *serviceClient) listContainersSegmentCreateRequest(ctx context.Context, options *serviceClientListContainersSegmentOptions) (*policy.Request, error) { +func (client *ServiceClient) ListContainersSegmentCreateRequest(ctx context.Context, options *ServiceClientListContainersSegmentOptions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) if err != nil { return nil, err @@ -408,17 +406,17 @@ func (client *serviceClient) listContainersSegmentCreateRequest(ctx context.Cont reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-version", "2020-10-02") + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} if options != nil && options.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // listContainersSegmentHandleResponse handles the ListContainersSegment response. -func (client *serviceClient) listContainersSegmentHandleResponse(resp *http.Response) (serviceClientListContainersSegmentResponse, error) { - result := serviceClientListContainersSegmentResponse{RawResponse: resp} +func (client *ServiceClient) ListContainersSegmentHandleResponse(resp *http.Response) (ServiceClientListContainersSegmentResponse, error) { + result := ServiceClientListContainersSegmentResponse{} if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } @@ -429,7 +427,7 @@ func (client *serviceClient) listContainersSegmentHandleResponse(resp *http.Resp result.Version = &val } if err := runtime.UnmarshalAsXML(resp, &result.ListContainersSegmentResponse); err != nil { - return serviceClientListContainersSegmentResponse{}, err + return ServiceClientListContainersSegmentResponse{}, err } return result, nil } @@ -437,25 +435,26 @@ func (client *serviceClient) listContainersSegmentHandleResponse(resp *http.Resp // SetProperties - Sets properties for a storage account's Blob service endpoint, including properties for Storage Analytics // and CORS (Cross-Origin Resource Sharing) rules // If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 // storageServiceProperties - The StorageService properties. -// options - serviceClientSetPropertiesOptions contains the optional parameters for the serviceClient.SetProperties method. -func (client *serviceClient) SetProperties(ctx context.Context, storageServiceProperties StorageServiceProperties, options *serviceClientSetPropertiesOptions) (serviceClientSetPropertiesResponse, error) { +// options - ServiceClientSetPropertiesOptions contains the optional parameters for the ServiceClient.SetProperties method. +func (client *ServiceClient) SetProperties(ctx context.Context, storageServiceProperties StorageServiceProperties, options *ServiceClientSetPropertiesOptions) (ServiceClientSetPropertiesResponse, error) { req, err := client.setPropertiesCreateRequest(ctx, storageServiceProperties, options) if err != nil { - return serviceClientSetPropertiesResponse{}, err + return ServiceClientSetPropertiesResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return serviceClientSetPropertiesResponse{}, err + return ServiceClientSetPropertiesResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return serviceClientSetPropertiesResponse{}, runtime.NewResponseError(resp) + return ServiceClientSetPropertiesResponse{}, runtime.NewResponseError(resp) } return client.setPropertiesHandleResponse(resp) } // setPropertiesCreateRequest creates the SetProperties request. -func (client *serviceClient) setPropertiesCreateRequest(ctx context.Context, storageServiceProperties StorageServiceProperties, options *serviceClientSetPropertiesOptions) (*policy.Request, error) { +func (client *ServiceClient) setPropertiesCreateRequest(ctx context.Context, storageServiceProperties StorageServiceProperties, options *ServiceClientSetPropertiesOptions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -467,17 +466,17 @@ func (client *serviceClient) setPropertiesCreateRequest(ctx context.Context, sto reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-version", "2020-10-02") + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} if options != nil && options.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") + req.Raw().Header["Accept"] = []string{"application/xml"} return req, runtime.MarshalAsXML(req, storageServiceProperties) } // setPropertiesHandleResponse handles the SetProperties response. -func (client *serviceClient) setPropertiesHandleResponse(resp *http.Response) (serviceClientSetPropertiesResponse, error) { - result := serviceClientSetPropertiesResponse{RawResponse: resp} +func (client *ServiceClient) setPropertiesHandleResponse(resp *http.Response) (ServiceClientSetPropertiesResponse, error) { + result := ServiceClientSetPropertiesResponse{} if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } @@ -492,28 +491,29 @@ func (client *serviceClient) setPropertiesHandleResponse(resp *http.Response) (s // SubmitBatch - The Batch operation allows multiple API calls to be embedded into a single HTTP request. // If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 // contentLength - The length of the request. // multipartContentType - Required. The value of this header must be multipart/mixed with a batch boundary. Example header // value: multipart/mixed; boundary=batch_ // body - Initial data -// options - serviceClientSubmitBatchOptions contains the optional parameters for the serviceClient.SubmitBatch method. -func (client *serviceClient) SubmitBatch(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *serviceClientSubmitBatchOptions) (serviceClientSubmitBatchResponse, error) { +// options - ServiceClientSubmitBatchOptions contains the optional parameters for the ServiceClient.SubmitBatch method. +func (client *ServiceClient) SubmitBatch(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *ServiceClientSubmitBatchOptions) (ServiceClientSubmitBatchResponse, error) { req, err := client.submitBatchCreateRequest(ctx, contentLength, multipartContentType, body, options) if err != nil { - return serviceClientSubmitBatchResponse{}, err + return ServiceClientSubmitBatchResponse{}, err } resp, err := client.pl.Do(req) if err != nil { - return serviceClientSubmitBatchResponse{}, err + return ServiceClientSubmitBatchResponse{}, err } if !runtime.HasStatusCode(resp, http.StatusOK) { - return serviceClientSubmitBatchResponse{}, runtime.NewResponseError(resp) + return ServiceClientSubmitBatchResponse{}, runtime.NewResponseError(resp) } return client.submitBatchHandleResponse(resp) } // submitBatchCreateRequest creates the SubmitBatch request. -func (client *serviceClient) submitBatchCreateRequest(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *serviceClientSubmitBatchOptions) (*policy.Request, error) { +func (client *ServiceClient) submitBatchCreateRequest(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *ServiceClientSubmitBatchOptions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint) if err != nil { return nil, err @@ -525,19 +525,19 @@ func (client *serviceClient) submitBatchCreateRequest(ctx context.Context, conte } req.Raw().URL.RawQuery = reqQP.Encode() runtime.SkipBodyDownload(req) - req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) - req.Raw().Header.Set("Content-Type", multipartContentType) - req.Raw().Header.Set("x-ms-version", "2020-10-02") + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + req.Raw().Header["Content-Type"] = []string{multipartContentType} + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} if options != nil && options.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header.Set("Accept", "application/xml") - return req, runtime.MarshalAsXML(req, body) + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, req.SetBody(body, "application/xml") } // submitBatchHandleResponse handles the SubmitBatch response. -func (client *serviceClient) submitBatchHandleResponse(resp *http.Response) (serviceClientSubmitBatchResponse, error) { - result := serviceClientSubmitBatchResponse{RawResponse: resp} +func (client *ServiceClient) submitBatchHandleResponse(resp *http.Response) (ServiceClientSubmitBatchResponse, error) { + result := ServiceClientSubmitBatchResponse{Body: resp.Body} if val := resp.Header.Get("Content-Type"); val != "" { result.ContentType = &val } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_time_rfc1123.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go similarity index 96% rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_time_rfc1123.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go index 42726159b..4b4d51aa3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_time_rfc1123.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go @@ -5,8 +5,9 @@ // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. -package azblob +package generated import ( "strings" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_time_rfc3339.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go similarity index 97% rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_time_rfc3339.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go index c51d8d78c..1ce9d6211 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_time_rfc3339.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go @@ -5,8 +5,9 @@ // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. -package azblob +package generated import ( "regexp" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_xml_helper.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go similarity index 96% rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_xml_helper.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go index 1cf97387d..144ea18e1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_xml_helper.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go @@ -5,8 +5,9 @@ // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. -package azblob +package generated import ( "encoding/xml" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go new file mode 100644 index 000000000..a86fc582c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go @@ -0,0 +1,78 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "context" + "errors" +) + +// BatchTransferOptions identifies options used by doBatchTransfer. +type BatchTransferOptions struct { + TransferSize int64 + ChunkSize int64 + Concurrency uint16 + Operation func(offset int64, chunkSize int64, ctx context.Context) error + OperationName string +} + +// DoBatchTransfer helps to execute operations in a batch manner. +// Can be used by users to customize batch works (for other scenarios that the SDK does not provide) +func DoBatchTransfer(ctx context.Context, o *BatchTransferOptions) error { + if o.ChunkSize == 0 { + return errors.New("ChunkSize cannot be 0") + } + + if o.Concurrency == 0 { + o.Concurrency = 5 // default concurrency + } + + // Prepare and do parallel operations. + numChunks := uint16(((o.TransferSize - 1) / o.ChunkSize) + 1) + operationChannel := make(chan func() error, o.Concurrency) // Create the channel that release 'concurrency' goroutines concurrently + operationResponseChannel := make(chan error, numChunks) // Holds each response + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // Create the goroutines that process each operation (in parallel). + for g := uint16(0); g < o.Concurrency; g++ { + //grIndex := g + go func() { + for f := range operationChannel { + err := f() + operationResponseChannel <- err + } + }() + } + + // Add each chunk's operation to the channel. + for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ { + curChunkSize := o.ChunkSize + + if chunkNum == numChunks-1 { // Last chunk + curChunkSize = o.TransferSize - (int64(chunkNum) * o.ChunkSize) // Remove size of all transferred chunks from total + } + offset := int64(chunkNum) * o.ChunkSize + + operationChannel <- func() error { + return o.Operation(offset, curChunkSize, ctx) + } + } + close(operationChannel) + + // Wait for the operations to complete. + var firstErr error = nil + for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ { + responseError := <-operationResponseChannel + // record the first error (the original error which should cause the other chunks to fail with canceled context) + if responseError != nil && firstErr == nil { + cancel() // As soon as any operation fails, cancel all remaining operation calls + firstErr = responseError + } + } + return firstErr +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bytes_writer.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/bytes_writer.go similarity index 74% rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bytes_writer.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/bytes_writer.go index 14c7feda1..8d4d35bde 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bytes_writer.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/bytes_writer.go @@ -2,9 +2,9 @@ // +build go1.18 // Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. +// Licensed under the MIT License. See License.txt in the project root for license information. -package azblob +package shared import ( "errors" @@ -12,7 +12,7 @@ import ( type bytesWriter []byte -func newBytesWriter(b []byte) bytesWriter { +func NewBytesWriter(b []byte) bytesWriter { return b } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/section_writer.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/section_writer.go new file mode 100644 index 000000000..c8528a2e3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/section_writer.go @@ -0,0 +1,53 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "errors" + "io" +) + +type SectionWriter struct { + Count int64 + Offset int64 + Position int64 + WriterAt io.WriterAt +} + +func NewSectionWriter(c io.WriterAt, off int64, count int64) *SectionWriter { + return &SectionWriter{ + Count: count, + Offset: off, + WriterAt: c, + } +} + +func (c *SectionWriter) Write(p []byte) (int, error) { + remaining := c.Count - c.Position + + if remaining <= 0 { + return 0, errors.New("end of section reached") + } + + slice := p + + if int64(len(slice)) > remaining { + slice = slice[:remaining] + } + + n, err := c.WriterAt.WriteAt(slice, c.Offset+c.Position) + c.Position += int64(n) + if err != nil { + return n, err + } + + if len(p) > n { + return n, errors.New("not enough space for all bytes") + } + + return n, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go new file mode 100644 index 000000000..48a5ad842 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go @@ -0,0 +1,238 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "errors" + "fmt" + "io" + "net" + "net/url" + "strconv" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +const ( + TokenScope = "https://storage.azure.com/.default" +) + +const ( + HeaderAuthorization = "Authorization" + HeaderXmsDate = "x-ms-date" + HeaderContentLength = "Content-Length" + HeaderContentEncoding = "Content-Encoding" + HeaderContentLanguage = "Content-Language" + HeaderContentType = "Content-Type" + HeaderContentMD5 = "Content-MD5" + HeaderIfModifiedSince = "If-Modified-Since" + HeaderIfMatch = "If-Match" + HeaderIfNoneMatch = "If-None-Match" + HeaderIfUnmodifiedSince = "If-Unmodified-Since" + HeaderRange = "Range" +) + +// CopyOptions returns a zero-value T if opts is nil. +// If opts is not nil, a copy is made and its address returned. +func CopyOptions[T any](opts *T) *T { + if opts == nil { + return new(T) + } + cp := *opts + return &cp +} + +var errConnectionString = errors.New("connection string is either blank or malformed. The expected connection string " + + "should contain key value pairs separated by semicolons. For example 'DefaultEndpointsProtocol=https;AccountName=;" + + "AccountKey=;EndpointSuffix=core.windows.net'") + +type ParsedConnectionString struct { + ServiceURL string + AccountName string + AccountKey string +} + +func ParseConnectionString(connectionString string) (ParsedConnectionString, error) { + const ( + defaultScheme = "https" + defaultSuffix = "core.windows.net" + ) + + connStrMap := make(map[string]string) + connectionString = strings.TrimRight(connectionString, ";") + + splitString := strings.Split(connectionString, ";") + if len(splitString) == 0 { + return ParsedConnectionString{}, errConnectionString + } + for _, stringPart := range splitString { + parts := strings.SplitN(stringPart, "=", 2) + if len(parts) != 2 { + return ParsedConnectionString{}, errConnectionString + } + connStrMap[parts[0]] = parts[1] + } + + accountName, ok := connStrMap["AccountName"] + if !ok { + return ParsedConnectionString{}, errors.New("connection string missing AccountName") + } + + accountKey, ok := connStrMap["AccountKey"] + if !ok { + sharedAccessSignature, ok := connStrMap["SharedAccessSignature"] + if !ok { + return ParsedConnectionString{}, errors.New("connection string missing AccountKey and SharedAccessSignature") + } + return ParsedConnectionString{ + ServiceURL: fmt.Sprintf("%v://%v.blob.%v/?%v", defaultScheme, accountName, defaultSuffix, sharedAccessSignature), + }, nil + } + + protocol, ok := connStrMap["DefaultEndpointsProtocol"] + if !ok { + protocol = defaultScheme + } + + suffix, ok := connStrMap["EndpointSuffix"] + if !ok { + suffix = defaultSuffix + } + + if blobEndpoint, ok := connStrMap["BlobEndpoint"]; ok { + return ParsedConnectionString{ + ServiceURL: blobEndpoint, + AccountName: accountName, + AccountKey: accountKey, + }, nil + } + + return ParsedConnectionString{ + ServiceURL: fmt.Sprintf("%v://%v.blob.%v", protocol, accountName, suffix), + AccountName: accountName, + AccountKey: accountKey, + }, nil +} + +// SerializeBlobTags converts tags to generated.BlobTags +func SerializeBlobTags(tagsMap map[string]string) *generated.BlobTags { + if tagsMap == nil { + return nil + } + blobTagSet := make([]*generated.BlobTag, 0) + for key, val := range tagsMap { + newKey, newVal := key, val + blobTagSet = append(blobTagSet, &generated.BlobTag{Key: &newKey, Value: &newVal}) + } + return &generated.BlobTags{BlobTagSet: blobTagSet} +} + +func SerializeBlobTagsToStrPtr(tagsMap map[string]string) *string { + if tagsMap == nil { + return nil + } + tags := make([]string, 0) + for key, val := range tagsMap { + tags = append(tags, url.QueryEscape(key)+"="+url.QueryEscape(val)) + } + blobTagsString := strings.Join(tags, "&") + return &blobTagsString +} + +func ValidateSeekableStreamAt0AndGetCount(body io.ReadSeeker) (int64, error) { + if body == nil { // nil body's are "logically" seekable to 0 and are 0 bytes long + return 0, nil + } + + err := validateSeekableStreamAt0(body) + if err != nil { + return 0, err + } + + count, err := body.Seek(0, io.SeekEnd) + if err != nil { + return 0, errors.New("body stream must be seekable") + } + + _, err = body.Seek(0, io.SeekStart) + if err != nil { + return 0, err + } + return count, nil +} + +// return an error if body is not a valid seekable stream at 0 +func validateSeekableStreamAt0(body io.ReadSeeker) error { + if body == nil { // nil body's are "logically" seekable to 0 + return nil + } + if pos, err := body.Seek(0, io.SeekCurrent); pos != 0 || err != nil { + // Help detect programmer error + if err != nil { + return errors.New("body stream must be seekable") + } + return errors.New("body stream must be set to position 0") + } + return nil +} + +func RangeToString(offset, count int64) string { + return "bytes=" + strconv.FormatInt(offset, 10) + "-" + strconv.FormatInt(offset+count-1, 10) +} + +type nopCloser struct { + io.ReadSeeker +} + +func (n nopCloser) Close() error { + return nil +} + +// NopCloser returns a ReadSeekCloser with a no-op close method wrapping the provided io.ReadSeeker. +func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser { + return nopCloser{rs} +} + +func GenerateLeaseID(leaseID *string) (*string, error) { + if leaseID == nil { + generatedUuid, err := uuid.New() + if err != nil { + return nil, err + } + leaseID = to.Ptr(generatedUuid.String()) + } + return leaseID, nil +} + +func GetClientOptions[T any](o *T) *T { + if o == nil { + return new(T) + } + return o +} + +// IsIPEndpointStyle checkes if URL's host is IP, in this case the storage account endpoint will be composed as: +// http(s)://IP(:port)/storageaccount/container/... +// As url's Host property, host could be both host or host:port +func IsIPEndpointStyle(host string) bool { + if host == "" { + return false + } + if h, _, err := net.SplitHostPort(host); err == nil { + host = h + } + // For IPv6, there could be case where SplitHostPort fails for cannot finding port. + // In this case, eliminate the '[' and ']' in the URL. + // For details about IPv6 URL, please refer to https://tools.ietf.org/html/rfc2732 + if host[0] == '[' && host[len(host)-1] == ']' { + host = host[1 : len(host)-1] + } + return net.ParseIP(host) != nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/transfer_manager.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/transfer_manager.go similarity index 96% rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/transfer_manager.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/transfer_manager.go index 5c40e9bc2..f3c9d4be7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/transfer_manager.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/transfer_manager.go @@ -2,15 +2,17 @@ // +build go1.18 // Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. +// Licensed under the MIT License. See License.txt in the project root for license information. -package azblob +package shared import ( "fmt" "sync" ) +const _1MiB = 1024 * 1024 + // TransferManager provides a buffer and thread pool manager for certain transfer options. // It is undefined behavior if code outside this package call any of these methods. type TransferManager interface { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/zc_shared.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/zc_shared.go deleted file mode 100644 index cd2ada9b5..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/zc_shared.go +++ /dev/null @@ -1,150 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package internal - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "io" - "io/ioutil" - "net/http" - "strconv" - "time" -) - -// CtxWithHTTPHeaderKey is used as a context key for adding/retrieving http.Header. -type CtxWithHTTPHeaderKey struct{} - -// CtxWithRetryOptionsKey is used as a context key for adding/retrieving RetryOptions. -type CtxWithRetryOptionsKey struct{} - -type nopCloser struct { - io.ReadSeeker -} - -func (n nopCloser) Close() error { - return nil -} - -// NopCloser returns a ReadSeekCloser with a no-op close method wrapping the provided io.ReadSeeker. -func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser { - return nopCloser{rs} -} - -// BodyDownloadPolicyOpValues is the struct containing the per-operation values -type BodyDownloadPolicyOpValues struct { - Skip bool -} - -func NewResponseError(inner error, resp *http.Response) error { - return &ResponseError{inner: inner, resp: resp} -} - -type ResponseError struct { - inner error - resp *http.Response -} - -// Error implements the error interface for type ResponseError. -func (e *ResponseError) Error() string { - return e.inner.Error() -} - -// Unwrap returns the inner error. -func (e *ResponseError) Unwrap() error { - return e.inner -} - -// RawResponse returns the HTTP response associated with this error. -func (e *ResponseError) RawResponse() *http.Response { - return e.resp -} - -// NonRetriable indicates this error is non-transient. -func (e *ResponseError) NonRetriable() { - // marker method -} - -// Delay waits for the duration to elapse or the context to be cancelled. -func Delay(ctx context.Context, delay time.Duration) error { - select { - case <-time.After(delay): - return nil - case <-ctx.Done(): - return ctx.Err() - } -} - -// ErrNoBody is returned if the response didn't contain a body. -var ErrNoBody = errors.New("the response did not contain a body") - -// GetJSON reads the response body into a raw JSON object. -// It returns ErrNoBody if there was no content. -func GetJSON(resp *http.Response) (map[string]interface{}, error) { - body, err := ioutil.ReadAll(resp.Body) - defer resp.Body.Close() - if err != nil { - return nil, err - } - if len(body) == 0 { - return nil, ErrNoBody - } - // put the body back so it's available to others - resp.Body = ioutil.NopCloser(bytes.NewReader(body)) - // unmarshall the body to get the value - var jsonBody map[string]interface{} - if err = json.Unmarshal(body, &jsonBody); err != nil { - return nil, err - } - return jsonBody, nil -} - -const HeaderRetryAfter = "Retry-After" - -// RetryAfter returns non-zero if the response contains a Retry-After header value. -func RetryAfter(resp *http.Response) time.Duration { - if resp == nil { - return 0 - } - ra := resp.Header.Get(HeaderRetryAfter) - if ra == "" { - return 0 - } - // retry-after values are expressed in either number of - // seconds or an HTTP-date indicating when to try again - if retryAfter, _ := strconv.Atoi(ra); retryAfter > 0 { - return time.Duration(retryAfter) * time.Second - } else if t, err := time.Parse(time.RFC1123, ra); err == nil { - return time.Until(t) - } - return 0 -} - -// HasStatusCode returns true if the Response's status code is one of the specified values. -func HasStatusCode(resp *http.Response, statusCodes ...int) bool { - if resp == nil { - return false - } - for _, sc := range statusCodes { - if resp.StatusCode == sc { - return true - } - } - return false -} - -const defaultScope = "/.default" - -// EndpointToScope converts the provided URL endpoint to its default scope. -func EndpointToScope(endpoint string) string { - if endpoint[len(endpoint)-1] != '/' { - endpoint += "/" - } - return endpoint + defaultScope -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/migrationguide.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/migrationguide.md new file mode 100644 index 000000000..1b1a4b45d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/migrationguide.md @@ -0,0 +1,76 @@ +# Guide to migrate from `azure-storage-blob-go` to `azblob` + +This guide is intended to assist in the migration from the `azure-storage-blob-go` module, or previous betas of `azblob`, to the latest releases of the `azblob` module. + +## Simplified API surface area + +The redesign of the `azblob` module separates clients into various sub-packages. +In previous versions, the public surface area was "flat", so all clients and supporting types were in the `azblob` package. +This made it difficult to navigate the public surface area. + +## Clients + +In `azure-storage-blob-go` a client constructor always requires a `url.URL` and `Pipeline` parameters. + +In `azblob` a client constructor always requires a `string` URL, any specified credential type, and a `*ClientOptions` for optional values. You pass `nil` to accept default options. + +```go +// new code +client, err := azblob.NewClient("", cred, nil) +``` + +## Authentication + +In `azure-storage-blob-go` you created a `Pipeline` with the required credential type. This pipeline was then passed to the client constructor. + +In `azblob`, you pass the required credential directly to the client constructor. + +```go +// new code. cred is an AAD token credential created from the azidentity module +client, err := azblob.NewClient("", cred, nil) +``` + +The `azure-storage-blob-go` module provided limited support for OAuth token authentication via `NewTokenCredential`. +This been replaced by using Azure Identity credentials from [azidentity](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#section-readme). + +Authentication with a shared key via `NewSharedKeyCredential` remains unchanged. + +In `azure-storage-blob-go` you created a `Pipeline` with `NewAnonymousCredential` to support anonymous or SAS authentication. + +In `azblob` you use the construtor `NewClientWithNoCredential()` instead. + +```go +// new code +client, err := azblob.NewClientWithNoCredential("", nil) +``` + +## Listing blobs/containers + +In `azure-storage-blob-go` you explicitly created a `Marker` type that was used to page over results ([example](https://pkg.go.dev/github.com/Azure/azure-storage-blob-go/azblob?utm_source=godoc#example-package)). + +In `azblob`, operations that return paginated values return a `*runtime.Pager[T]`. + +```go +// new code +pager := client.NewListBlobsFlatPager("my-container", nil) +for pager.More() { + page, err := pager.NextPage(context.TODO()) + // process results +} +``` + +## Configuring the HTTP pipeline + +In `azure-storage-blob-go` you explicitly created a HTTP pipeline with configuration before creating a client. +This pipeline instance was then passed as an argument to the client constructor ([example](https://pkg.go.dev/github.com/Azure/azure-storage-blob-go/azblob?utm_source=godoc#example-NewPipeline)). + +In `azblob` a HTTP pipeline is created during client construction. The pipeline is configured through the `azcore.ClientOptions` type. + +```go +// new code +client, err := azblob.NewClient(account, cred, &azblob.ClientOptions{ + ClientOptions: azcore.ClientOptions{ + // configure HTTP pipeline options here + }, +}) +``` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/models.go new file mode 100644 index 000000000..099f48d17 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/models.go @@ -0,0 +1,70 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package azblob + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" +) + +// CreateContainerOptions contains the optional parameters for the ContainerClient.Create method. +type CreateContainerOptions = service.CreateContainerOptions + +// DeleteContainerOptions contains the optional parameters for the container.Client.Delete method. +type DeleteContainerOptions = service.DeleteContainerOptions + +// DeleteBlobOptions contains the optional parameters for the Client.Delete method. +type DeleteBlobOptions = blob.DeleteOptions + +// DownloadStreamOptions contains the optional parameters for the Client.DownloadStream method. +type DownloadStreamOptions = blob.DownloadStreamOptions + +// ListBlobsFlatOptions contains the optional parameters for the container.Client.ListBlobFlatSegment method. +type ListBlobsFlatOptions = container.ListBlobsFlatOptions + +// ListBlobsInclude indicates what additional information the service should return with each blob. +type ListBlobsInclude = container.ListBlobsInclude + +// ListContainersOptions contains the optional parameters for the container.Client.ListContainers operation +type ListContainersOptions = service.ListContainersOptions + +// UploadBufferOptions provides set of configurations for UploadBuffer operation +type UploadBufferOptions = blockblob.UploadBufferOptions + +// UploadFileOptions provides set of configurations for UploadFile operation +type UploadFileOptions = blockblob.UploadFileOptions + +// UploadStreamOptions provides set of configurations for UploadStream operation +type UploadStreamOptions = blockblob.UploadStreamOptions + +// DownloadBufferOptions identifies options used by the DownloadBuffer and DownloadFile functions. +type DownloadBufferOptions = blob.DownloadBufferOptions + +// DownloadFileOptions identifies options used by the DownloadBuffer and DownloadFile functions. +type DownloadFileOptions = blob.DownloadFileOptions + +// CpkInfo contains a group of parameters for client provided encryption key. +type CpkInfo = generated.CpkInfo + +// CpkScopeInfo contains a group of parameters for the ContainerClient.Create method. +type CpkScopeInfo = generated.ContainerCpkScopeInfo + +// AccessConditions identifies blob-specific access conditions which you optionally set. +type AccessConditions = exported.BlobAccessConditions + +// ListContainersInclude indicates what additional information the service should return with each container. +type ListContainersInclude = service.ListContainersInclude + +// ObjectReplicationPolicy are deserialized attributes +type ObjectReplicationPolicy = blob.ObjectReplicationPolicy + +// RetryReaderOptions contains properties which can help to decide when to do retry. +type RetryReaderOptions = blob.RetryReaderOptions diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go new file mode 100644 index 000000000..e210a76e6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go @@ -0,0 +1,403 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package pageblob + +import ( + "context" + "io" + "net/http" + "net/url" + "os" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" +) + +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions struct { + azcore.ClientOptions +} + +// Client represents a client to an Azure Storage page blob; +type Client base.CompositeClient[generated.BlobClient, generated.PageBlobClient] + +// NewClient creates an instance of Client with the specified values. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt +// - cred - an Azure AD credential, typically obtained via the azidentity module +// - options - client options; pass nil to accept the default values +func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { + authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewPageBlobClient(blobURL, pl, nil)), nil +} + +// NewClientWithNoCredential creates an instance of Client with the specified values. +// This is used to anonymously access a blob or with a shared access signature (SAS) token. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt? +// - options - client options; pass nil to accept the default values +func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) { + conOptions := shared.GetClientOptions(options) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewPageBlobClient(blobURL, pl, nil)), nil +} + +// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt +// - cred - a SharedKeyCredential created with the matching blob's storage account and access key +// - options - client options; pass nil to accept the default values +func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCredential, options *ClientOptions) (*Client, error) { + authPolicy := exported.NewSharedKeyCredPolicy(cred) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewPageBlobClient(blobURL, pl, cred)), nil +} + +// NewClientFromConnectionString creates an instance of Client with the specified values. +// - connectionString - a connection string for the desired storage account +// - containerName - the name of the container within the storage account +// - blobName - the name of the blob within the container +// - options - client options; pass nil to accept the default values +func NewClientFromConnectionString(connectionString, containerName, blobName string, options *ClientOptions) (*Client, error) { + parsed, err := shared.ParseConnectionString(connectionString) + if err != nil { + return nil, err + } + parsed.ServiceURL = runtime.JoinPaths(parsed.ServiceURL, containerName, blobName) + + if parsed.AccountKey != "" && parsed.AccountName != "" { + credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey) + if err != nil { + return nil, err + } + return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options) + } + + return NewClientWithNoCredential(parsed.ServiceURL, options) +} + +func (pb *Client) generated() *generated.PageBlobClient { + _, pageBlob := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.PageBlobClient])(pb)) + return pageBlob +} + +// URL returns the URL endpoint used by the Client object. +func (pb *Client) URL() string { + return pb.generated().Endpoint() +} + +// BlobClient returns the embedded blob client for this AppendBlob client. +func (pb *Client) BlobClient() *blob.Client { + innerBlob, _ := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.PageBlobClient])(pb)) + return (*blob.Client)(innerBlob) +} + +func (pb *Client) sharedKey() *blob.SharedKeyCredential { + return base.SharedKeyComposite((*base.CompositeClient[generated.BlobClient, generated.PageBlobClient])(pb)) +} + +// WithSnapshot creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (pb *Client) WithSnapshot(snapshot string) (*Client, error) { + p, err := blob.ParseURL(pb.URL()) + if err != nil { + return nil, err + } + p.Snapshot = snapshot + + return (*Client)(base.NewPageBlobClient(p.String(), pb.generated().Pipeline(), pb.sharedKey())), nil +} + +// WithVersionID creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the version returning a URL to the base blob. +func (pb *Client) WithVersionID(versionID string) (*Client, error) { + p, err := blob.ParseURL(pb.URL()) + if err != nil { + return nil, err + } + p.VersionID = versionID + + return (*Client)(base.NewPageBlobClient(p.String(), pb.generated().Pipeline(), pb.sharedKey())), nil +} + +// Create creates a page blob of the specified length. Call PutPage to upload data to a page blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. +func (pb *Client) Create(ctx context.Context, size int64, o *CreateOptions) (CreateResponse, error) { + createOptions, HTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions := o.format() + + resp, err := pb.generated().Create(ctx, 0, size, createOptions, HTTPHeaders, + leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + return resp, err +} + +// UploadPages writes 1 or more pages to the page blob. The start offset and the stream size must be a multiple of 512 bytes. +// This method panics if the stream is not at position 0. +// Note that the http client closes the body stream after the request is sent to the service. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page. +func (pb *Client) UploadPages(ctx context.Context, body io.ReadSeekCloser, options *UploadPagesOptions) (UploadPagesResponse, error) { + count, err := shared.ValidateSeekableStreamAt0AndGetCount(body) + + if err != nil { + return UploadPagesResponse{}, err + } + + uploadPagesOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions := options.format() + + resp, err := pb.generated().UploadPages(ctx, count, body, uploadPagesOptions, leaseAccessConditions, + cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) + + return resp, err +} + +// UploadPagesFromURL copies 1 or more pages from a source URL to the page blob. +// The sourceOffset specifies the start offset of source data to copy from. +// The destOffset specifies the start offset of data in page blob will be written to. +// The count must be a multiple of 512 bytes. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page-from-url. +func (pb *Client) UploadPagesFromURL(ctx context.Context, source string, sourceOffset, destOffset, count int64, + o *UploadPagesFromURLOptions) (UploadPagesFromURLResponse, error) { + + uploadPagesFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, sequenceNumberAccessConditions, + modifiedAccessConditions, sourceModifiedAccessConditions := o.format() + + resp, err := pb.generated().UploadPagesFromURL(ctx, source, shared.RangeToString(sourceOffset, count), 0, + shared.RangeToString(destOffset, count), uploadPagesFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, + sequenceNumberAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) + + return resp, err +} + +// ClearPages frees the specified pages from the page blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page. +func (pb *Client) ClearPages(ctx context.Context, rnge blob.HTTPRange, options *ClearPagesOptions) (ClearPagesResponse, error) { + clearOptions := &generated.PageBlobClientClearPagesOptions{ + Range: exported.FormatHTTPRange(rnge), + } + + leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions := options.format() + + resp, err := pb.generated().ClearPages(ctx, 0, clearOptions, leaseAccessConditions, cpkInfo, + cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) + + return resp, err +} + +// NewGetPageRangesPager returns the list of valid page ranges for a page blob or snapshot of a page blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges. +func (pb *Client) NewGetPageRangesPager(o *GetPageRangesOptions) *runtime.Pager[GetPageRangesResponse] { + opts, leaseAccessConditions, modifiedAccessConditions := o.format() + + return runtime.NewPager(runtime.PagingHandler[GetPageRangesResponse]{ + More: func(page GetPageRangesResponse) bool { + return page.NextMarker != nil && len(*page.NextMarker) > 0 + }, + Fetcher: func(ctx context.Context, page *GetPageRangesResponse) (GetPageRangesResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = pb.generated().GetPageRangesCreateRequest(ctx, opts, leaseAccessConditions, modifiedAccessConditions) + } else { + opts.Marker = page.NextMarker + req, err = pb.generated().GetPageRangesCreateRequest(ctx, opts, leaseAccessConditions, modifiedAccessConditions) + } + if err != nil { + return GetPageRangesResponse{}, err + } + resp, err := pb.generated().Pipeline().Do(req) + if err != nil { + return GetPageRangesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return GetPageRangesResponse{}, runtime.NewResponseError(resp) + } + return pb.generated().GetPageRangesHandleResponse(resp) + }, + }) +} + +// NewGetPageRangesDiffPager gets the collection of page ranges that differ between a specified snapshot and this page blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges. +func (pb *Client) NewGetPageRangesDiffPager(o *GetPageRangesDiffOptions) *runtime.Pager[GetPageRangesDiffResponse] { + opts, leaseAccessConditions, modifiedAccessConditions := o.format() + + return runtime.NewPager(runtime.PagingHandler[GetPageRangesDiffResponse]{ + More: func(page GetPageRangesDiffResponse) bool { + return page.NextMarker != nil && len(*page.NextMarker) > 0 + }, + Fetcher: func(ctx context.Context, page *GetPageRangesDiffResponse) (GetPageRangesDiffResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = pb.generated().GetPageRangesDiffCreateRequest(ctx, opts, leaseAccessConditions, modifiedAccessConditions) + } else { + opts.Marker = page.NextMarker + req, err = pb.generated().GetPageRangesDiffCreateRequest(ctx, opts, leaseAccessConditions, modifiedAccessConditions) + } + if err != nil { + return GetPageRangesDiffResponse{}, err + } + resp, err := pb.generated().Pipeline().Do(req) + if err != nil { + return GetPageRangesDiffResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return GetPageRangesDiffResponse{}, runtime.NewResponseError(resp) + } + return pb.generated().GetPageRangesDiffHandleResponse(resp) + }, + }) +} + +// Resize resizes the page blob to the specified size (which must be a multiple of 512). +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. +func (pb *Client) Resize(ctx context.Context, size int64, options *ResizeOptions) (ResizeResponse, error) { + resizeOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions := options.format() + + resp, err := pb.generated().Resize(ctx, size, resizeOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + + return resp, err +} + +// UpdateSequenceNumber sets the page blob's sequence number. +func (pb *Client) UpdateSequenceNumber(ctx context.Context, options *UpdateSequenceNumberOptions) (UpdateSequenceNumberResponse, error) { + actionType, updateOptions, lac, mac := options.format() + resp, err := pb.generated().UpdateSequenceNumber(ctx, *actionType, updateOptions, lac, mac) + + return resp, err +} + +// StartCopyIncremental begins an operation to start an incremental copy from one-page blob's snapshot to this page blob. +// The snapshot is copied such that only the differential changes between the previously copied snapshot are transferred to the destination. +// The copied snapshots are complete copies of the original snapshot and can be read or copied from as usual. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/incremental-copy-blob and +// https://docs.microsoft.com/en-us/azure/virtual-machines/windows/incremental-snapshots. +func (pb *Client) StartCopyIncremental(ctx context.Context, copySource string, prevSnapshot string, options *CopyIncrementalOptions) (CopyIncrementalResponse, error) { + copySourceURL, err := url.Parse(copySource) + if err != nil { + return CopyIncrementalResponse{}, err + } + + queryParams := copySourceURL.Query() + queryParams.Set("snapshot", prevSnapshot) + copySourceURL.RawQuery = queryParams.Encode() + + pageBlobCopyIncrementalOptions, modifiedAccessConditions := options.format() + resp, err := pb.generated().CopyIncremental(ctx, copySourceURL.String(), pageBlobCopyIncrementalOptions, modifiedAccessConditions) + + return resp, err +} + +// Redeclared APIs + +// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. +// Note that deleting a blob also deletes all its snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob. +func (pb *Client) Delete(ctx context.Context, o *blob.DeleteOptions) (blob.DeleteResponse, error) { + return pb.BlobClient().Delete(ctx, o) +} + +// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob. +func (pb *Client) Undelete(ctx context.Context, o *blob.UndeleteOptions) (blob.UndeleteResponse, error) { + return pb.BlobClient().Undelete(ctx, o) +} + +// SetTier operation sets the tier on a blob. The operation is allowed on a page +// blob in a premium storage account and on a block blob in a blob storage account (locally +// redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and +// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation +// does not update the blob's ETag. +// For detailed information about block blob level tier-ing see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers. +func (pb *Client) SetTier(ctx context.Context, tier blob.AccessTier, o *blob.SetTierOptions) (blob.SetTierResponse, error) { + return pb.BlobClient().SetTier(ctx, tier, o) +} + +// GetProperties returns the blob's properties. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties. +func (pb *Client) GetProperties(ctx context.Context, o *blob.GetPropertiesOptions) (blob.GetPropertiesResponse, error) { + return pb.BlobClient().GetProperties(ctx, o) +} + +// SetHTTPHeaders changes a blob's HTTP headers. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. +func (pb *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) { + return pb.BlobClient().SetHTTPHeaders(ctx, HTTPHeaders, o) +} + +// SetMetadata changes a blob's metadata. +// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata. +func (pb *Client) SetMetadata(ctx context.Context, metadata map[string]string, o *blob.SetMetadataOptions) (blob.SetMetadataResponse, error) { + return pb.BlobClient().SetMetadata(ctx, metadata, o) +} + +// CreateSnapshot creates a read-only snapshot of a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob. +func (pb *Client) CreateSnapshot(ctx context.Context, o *blob.CreateSnapshotOptions) (blob.CreateSnapshotResponse, error) { + return pb.BlobClient().CreateSnapshot(ctx, o) +} + +// StartCopyFromURL copies the data at the source URL to a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob. +func (pb *Client) StartCopyFromURL(ctx context.Context, copySource string, o *blob.StartCopyFromURLOptions) (blob.StartCopyFromURLResponse, error) { + return pb.BlobClient().StartCopyFromURL(ctx, copySource, o) +} + +// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob. +func (pb *Client) AbortCopyFromURL(ctx context.Context, copyID string, o *blob.AbortCopyFromURLOptions) (blob.AbortCopyFromURLResponse, error) { + return pb.BlobClient().AbortCopyFromURL(ctx, copyID, o) +} + +// SetTags operation enables users to set tags on a blob or specific blob version, but not snapshot. +// Each call to this operation replaces all existing tags attached to the blob. +// To remove all tags from the blob, call this operation with no tags set. +// https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tags +func (pb *Client) SetTags(ctx context.Context, tags map[string]string, o *blob.SetTagsOptions) (blob.SetTagsResponse, error) { + return pb.BlobClient().SetTags(ctx, tags, o) +} + +// GetTags operation enables users to get tags on a blob or specific blob version, or snapshot. +// https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-tags +func (pb *Client) GetTags(ctx context.Context, o *blob.GetTagsOptions) (blob.GetTagsResponse, error) { + return pb.BlobClient().GetTags(ctx, o) +} + +// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB. +// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url. +func (pb *Client) CopyFromURL(ctx context.Context, copySource string, o *blob.CopyFromURLOptions) (blob.CopyFromURLResponse, error) { + return pb.BlobClient().CopyFromURL(ctx, copySource, o) +} + +// Concurrent Download Functions ----------------------------------------------------------------------------------------- + +// DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob. +func (pb *Client) DownloadStream(ctx context.Context, o *blob.DownloadStreamOptions) (blob.DownloadStreamResponse, error) { + return pb.BlobClient().DownloadStream(ctx, o) +} + +// DownloadBuffer downloads an Azure blob to a buffer with parallel. +func (pb *Client) DownloadBuffer(ctx context.Context, buffer []byte, o *blob.DownloadBufferOptions) (int64, error) { + return pb.BlobClient().DownloadBuffer(ctx, shared.NewBytesWriter(buffer), o) +} + +// DownloadFile downloads an Azure blob to a local file. +// The file would be truncated if the size doesn't match. +func (pb *Client) DownloadFile(ctx context.Context, file *os.File, o *blob.DownloadFileOptions) (int64, error) { + return pb.BlobClient().DownloadFile(ctx, file, o) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/constants.go new file mode 100644 index 000000000..646587cec --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/constants.go @@ -0,0 +1,65 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package pageblob + +import "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + +const ( + // PageBytes indicates the number of bytes in a page (512). + PageBytes = 512 +) + +// CopyStatusType defines values for CopyStatusType +type CopyStatusType = generated.CopyStatusType + +const ( + CopyStatusTypePending CopyStatusType = generated.CopyStatusTypePending + CopyStatusTypeSuccess CopyStatusType = generated.CopyStatusTypeSuccess + CopyStatusTypeAborted CopyStatusType = generated.CopyStatusTypeAborted + CopyStatusTypeFailed CopyStatusType = generated.CopyStatusTypeFailed +) + +// PossibleCopyStatusTypeValues returns the possible values for the CopyStatusType const type. +func PossibleCopyStatusTypeValues() []CopyStatusType { + return generated.PossibleCopyStatusTypeValues() +} + +// PremiumPageBlobAccessTier defines values for Premium PageBlob's AccessTier +type PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTier + +const ( + PremiumPageBlobAccessTierP10 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP10 + PremiumPageBlobAccessTierP15 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP15 + PremiumPageBlobAccessTierP20 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP20 + PremiumPageBlobAccessTierP30 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP30 + PremiumPageBlobAccessTierP4 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP4 + PremiumPageBlobAccessTierP40 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP40 + PremiumPageBlobAccessTierP50 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP50 + PremiumPageBlobAccessTierP6 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP6 + PremiumPageBlobAccessTierP60 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP60 + PremiumPageBlobAccessTierP70 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP70 + PremiumPageBlobAccessTierP80 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP80 +) + +// PossiblePremiumPageBlobAccessTierValues returns the possible values for the PremiumPageBlobAccessTier const type. +func PossiblePremiumPageBlobAccessTierValues() []PremiumPageBlobAccessTier { + return generated.PossiblePremiumPageBlobAccessTierValues() +} + +// SequenceNumberActionType defines values for SequenceNumberActionType +type SequenceNumberActionType = generated.SequenceNumberActionType + +const ( + SequenceNumberActionTypeMax SequenceNumberActionType = generated.SequenceNumberActionTypeMax + SequenceNumberActionTypeUpdate SequenceNumberActionType = generated.SequenceNumberActionTypeUpdate + SequenceNumberActionTypeIncrement SequenceNumberActionType = generated.SequenceNumberActionTypeIncrement +) + +// PossibleSequenceNumberActionTypeValues returns the possible values for the SequenceNumberActionType const type. +func PossibleSequenceNumberActionTypeValues() []SequenceNumberActionType { + return generated.PossibleSequenceNumberActionTypeValues() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_page_blob_client_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/models.go similarity index 51% rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_page_blob_client_util.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/models.go index 2be275873..c1b1194ff 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_page_blob_client_util.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/models.go @@ -2,126 +2,123 @@ // +build go1.18 // Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. +// Licensed under the MIT License. See License.txt in the project root for license information. -package azblob +package pageblob import ( - "strconv" "time" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" ) -// --------------------------------------------------------------------------------------------------------------------- +// Type Declarations --------------------------------------------------------------------- -func rangeToString(offset, count int64) string { - return "bytes=" + strconv.FormatInt(offset, 10) + "-" + strconv.FormatInt(offset+count-1, 10) -} +// PageList - the list of pages +type PageList = generated.PageList -// --------------------------------------------------------------------------------------------------------------------- +// ClearRange defines a range of pages. +type ClearRange = generated.ClearRange + +// PageRange defines a range of pages. +type PageRange = generated.PageRange + +// SequenceNumberAccessConditions contains a group of parameters for the Client.UploadPages method. +type SequenceNumberAccessConditions = generated.SequenceNumberAccessConditions -// PageBlobCreateOptions provides set of configurations for CreatePageBlob operation -type PageBlobCreateOptions struct { +// Request Model Declaration ------------------------------------------------------------------------------------------- + +// CreateOptions contains the optional parameters for the Client.Create method. +type CreateOptions struct { // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of // the sequence number must be between 0 and 2^63 - 1. - BlobSequenceNumber *int64 + SequenceNumber *int64 + // Optional. Used to set blob tags in various blob operations. - BlobTagsMap map[string]string + Tags map[string]string + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the // operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs // are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source // blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. // See Naming and Referencing Containers, Blobs, and Metadata for more information. Metadata map[string]string + // Optional. Indicates the tier to be set on the page blob. Tier *PremiumPageBlobAccessTier - HTTPHeaders *BlobHTTPHeaders + HTTPHeaders *blob.HTTPHeaders - CpkInfo *CpkInfo + CpkInfo *blob.CpkInfo - CpkScopeInfo *CpkScopeInfo + CpkScopeInfo *blob.CpkScopeInfo - BlobAccessConditions *BlobAccessConditions + AccessConditions *blob.AccessConditions // Specifies the date time when the blobs immutability policy is set to expire. ImmutabilityPolicyExpiry *time.Time // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *BlobImmutabilityPolicyMode + ImmutabilityPolicyMode *blob.ImmutabilityPolicyMode // Specified if a legal hold should be set on the blob. LegalHold *bool } -func (o *PageBlobCreateOptions) format() (*pageBlobClientCreateOptions, *BlobHTTPHeaders, *LeaseAccessConditions, *CpkInfo, *CpkScopeInfo, *ModifiedAccessConditions) { +func (o *CreateOptions) format() (*generated.PageBlobClientCreateOptions, *generated.BlobHTTPHeaders, + *generated.LeaseAccessConditions, *generated.CpkInfo, *generated.CpkScopeInfo, *generated.ModifiedAccessConditions) { if o == nil { return nil, nil, nil, nil, nil, nil } - options := &pageBlobClientCreateOptions{ - BlobSequenceNumber: o.BlobSequenceNumber, - BlobTagsString: serializeBlobTagsToStrPtr(o.BlobTagsMap), + options := &generated.PageBlobClientCreateOptions{ + BlobSequenceNumber: o.SequenceNumber, + BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.Tags), Metadata: o.Metadata, Tier: o.Tier, } - leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) return options, o.HTTPHeaders, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions } -// PageBlobCreateResponse contains the response from method PageBlobClient.Create. -type PageBlobCreateResponse struct { - pageBlobClientCreateResponse -} - -func toPageBlobCreateResponse(resp pageBlobClientCreateResponse) PageBlobCreateResponse { - return PageBlobCreateResponse{resp} -} - // --------------------------------------------------------------------------------------------------------------------- -// PageBlobUploadPagesOptions provides set of configurations for UploadPages operation -type PageBlobUploadPagesOptions struct { - // Specify the transactional crc64 for the body, to be validated by the service. - PageRange *HttpRange +// UploadPagesOptions contains the optional parameters for the Client.UploadPages method. +type UploadPagesOptions struct { + // Range specifies a range of bytes. The default value is all bytes. + Range blob.HTTPRange + TransactionalContentCRC64 []byte // Specify the transactional md5 for the body, to be validated by the service. TransactionalContentMD5 []byte - CpkInfo *CpkInfo - CpkScopeInfo *CpkScopeInfo + CpkInfo *blob.CpkInfo + CpkScopeInfo *blob.CpkScopeInfo SequenceNumberAccessConditions *SequenceNumberAccessConditions - BlobAccessConditions *BlobAccessConditions + AccessConditions *blob.AccessConditions } -func (o *PageBlobUploadPagesOptions) format() (*pageBlobClientUploadPagesOptions, *LeaseAccessConditions, - *CpkInfo, *CpkScopeInfo, *SequenceNumberAccessConditions, *ModifiedAccessConditions) { +func (o *UploadPagesOptions) format() (*generated.PageBlobClientUploadPagesOptions, *generated.LeaseAccessConditions, + *generated.CpkInfo, *generated.CpkScopeInfo, *generated.SequenceNumberAccessConditions, *generated.ModifiedAccessConditions) { if o == nil { return nil, nil, nil, nil, nil, nil } - options := &pageBlobClientUploadPagesOptions{ + options := &generated.PageBlobClientUploadPagesOptions{ TransactionalContentCRC64: o.TransactionalContentCRC64, TransactionalContentMD5: o.TransactionalContentMD5, + Range: exported.FormatHTTPRange(o.Range), } - if o.PageRange != nil { - options.Range = o.PageRange.format() - } - - leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) return options, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, o.SequenceNumberAccessConditions, modifiedAccessConditions } -// PageBlobUploadPagesResponse contains the response from method PageBlobClient.UploadPages. -type PageBlobUploadPagesResponse struct { - pageBlobClientUploadPagesResponse -} - -func toPageBlobUploadPagesResponse(resp pageBlobClientUploadPagesResponse) PageBlobUploadPagesResponse { - return PageBlobUploadPagesResponse{resp} -} - // --------------------------------------------------------------------------------------------------------------------- -// PageBlobUploadPagesFromURLOptions provides set of configurations for UploadPagesFromURL operation -type PageBlobUploadPagesFromURLOptions struct { +// UploadPagesFromURLOptions contains the optional parameters for the Client.UploadPagesFromURL method. +type UploadPagesFromURLOptions struct { // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. CopySourceAuthorization *string // Specify the md5 calculated for the range of bytes that must be read from the copy source. @@ -129,75 +126,57 @@ type PageBlobUploadPagesFromURLOptions struct { // Specify the crc64 calculated for the range of bytes that must be read from the copy source. SourceContentCRC64 []byte - CpkInfo *CpkInfo + CpkInfo *blob.CpkInfo - CpkScopeInfo *CpkScopeInfo + CpkScopeInfo *blob.CpkScopeInfo SequenceNumberAccessConditions *SequenceNumberAccessConditions - SourceModifiedAccessConditions *SourceModifiedAccessConditions + SourceModifiedAccessConditions *blob.SourceModifiedAccessConditions - BlobAccessConditions *BlobAccessConditions + AccessConditions *blob.AccessConditions } -func (o *PageBlobUploadPagesFromURLOptions) format() (*pageBlobClientUploadPagesFromURLOptions, *CpkInfo, *CpkScopeInfo, - *LeaseAccessConditions, *SequenceNumberAccessConditions, *ModifiedAccessConditions, *SourceModifiedAccessConditions) { +func (o *UploadPagesFromURLOptions) format() (*generated.PageBlobClientUploadPagesFromURLOptions, *generated.CpkInfo, *generated.CpkScopeInfo, + *generated.LeaseAccessConditions, *generated.SequenceNumberAccessConditions, *generated.ModifiedAccessConditions, *generated.SourceModifiedAccessConditions) { if o == nil { return nil, nil, nil, nil, nil, nil, nil } - options := &pageBlobClientUploadPagesFromURLOptions{ + options := &generated.PageBlobClientUploadPagesFromURLOptions{ SourceContentMD5: o.SourceContentMD5, SourceContentcrc64: o.SourceContentCRC64, CopySourceAuthorization: o.CopySourceAuthorization, } - leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) return options, o.CpkInfo, o.CpkScopeInfo, leaseAccessConditions, o.SequenceNumberAccessConditions, modifiedAccessConditions, o.SourceModifiedAccessConditions } -// PageBlobUploadPagesFromURLResponse contains the response from method PageBlobClient.UploadPagesFromURL -type PageBlobUploadPagesFromURLResponse struct { - pageBlobClientUploadPagesFromURLResponse -} - -func toPageBlobUploadPagesFromURLResponse(resp pageBlobClientUploadPagesFromURLResponse) PageBlobUploadPagesFromURLResponse { - return PageBlobUploadPagesFromURLResponse{resp} -} - // --------------------------------------------------------------------------------------------------------------------- -// PageBlobClearPagesOptions provides set of configurations for PageBlobClient.ClearPages operation -type PageBlobClearPagesOptions struct { - CpkInfo *CpkInfo - CpkScopeInfo *CpkScopeInfo +// ClearPagesOptions contains the optional parameters for the Client.ClearPages operation +type ClearPagesOptions struct { + CpkInfo *blob.CpkInfo + CpkScopeInfo *blob.CpkScopeInfo SequenceNumberAccessConditions *SequenceNumberAccessConditions - BlobAccessConditions *BlobAccessConditions + AccessConditions *blob.AccessConditions } -func (o *PageBlobClearPagesOptions) format() (*LeaseAccessConditions, *CpkInfo, - *CpkScopeInfo, *SequenceNumberAccessConditions, *ModifiedAccessConditions) { +func (o *ClearPagesOptions) format() (*generated.LeaseAccessConditions, *generated.CpkInfo, + *generated.CpkScopeInfo, *generated.SequenceNumberAccessConditions, *generated.ModifiedAccessConditions) { if o == nil { return nil, nil, nil, nil, nil } - leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) return leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, o.SequenceNumberAccessConditions, modifiedAccessConditions } -// PageBlobClearPagesResponse contains the response from method PageBlobClient.ClearPages -type PageBlobClearPagesResponse struct { - pageBlobClientClearPagesResponse -} - -func toPageBlobClearPagesResponse(resp pageBlobClientClearPagesResponse) PageBlobClearPagesResponse { - return PageBlobClearPagesResponse{resp} -} - // --------------------------------------------------------------------------------------------------------------------- -// PageBlobGetPageRangesOptions provides set of configurations for GetPageRanges operation -type PageBlobGetPageRangesOptions struct { +// GetPageRangesOptions contains the optional parameters for the Client.NewGetPageRangesPager method. +type GetPageRangesOptions struct { Marker *string // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value // greater than 5000, the server will return up to 5000 items. Note that if the @@ -215,43 +194,34 @@ type PageBlobGetPageRangesOptions struct { // specified by prevsnapshot is the older of the two. Note that incremental // snapshots are currently supported only for blobs created on or after January 1, 2016. PrevSnapshot *string - // Optional, you can specify whether a particular range of the blob is read - PageRange *HttpRange + // Range specifies a range of bytes. The default value is all bytes. + Range blob.HTTPRange // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more // information on working with blob snapshots, see Creating a Snapshot of a Blob. // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] Snapshot *string - BlobAccessConditions *BlobAccessConditions + AccessConditions *blob.AccessConditions } -func (o *PageBlobGetPageRangesOptions) format() (*pageBlobClientGetPageRangesOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { +func (o *GetPageRangesOptions) format() (*generated.PageBlobClientGetPageRangesOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { if o == nil { return nil, nil, nil } - leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() - return &pageBlobClientGetPageRangesOptions{ + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &generated.PageBlobClientGetPageRangesOptions{ Marker: o.Marker, Maxresults: o.MaxResults, - Range: o.PageRange.format(), + Range: exported.FormatHTTPRange(o.Range), Snapshot: o.Snapshot, }, leaseAccessConditions, modifiedAccessConditions } -// PageBlobGetPageRangesPager provides operations for iterating over paged responses -type PageBlobGetPageRangesPager struct { - *pageBlobClientGetPageRangesPager -} - -func toPageBlobGetPageRangesPager(resp *pageBlobClientGetPageRangesPager) *PageBlobGetPageRangesPager { - return &PageBlobGetPageRangesPager{resp} -} - // --------------------------------------------------------------------------------------------------------------------- -// PageBlobGetPageRangesDiffOptions provides set of configurations for PageBlobClient.GetPageRangesDiff operation -type PageBlobGetPageRangesDiffOptions struct { +// GetPageRangesDiffOptions contains the optional parameters for the Client.NewGetPageRangesDiffPager method. +type GetPageRangesDiffOptions struct { // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The // operation returns the NextMarker value within the response body if the listing // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used @@ -274,115 +244,90 @@ type PageBlobGetPageRangesDiffOptions struct { // specified by prevsnapshot is the older of the two. Note that incremental // snapshots are currently supported only for blobs created on or after January 1, 2016. PrevSnapshot *string - // Optional, you can specify whether a particular range of the blob is read - PageRange *HttpRange + // Range specifies a range of bytes. The default value is all bytes. + Range blob.HTTPRange // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more // information on working with blob snapshots, see Creating a Snapshot of a Blob. // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] Snapshot *string - BlobAccessConditions *BlobAccessConditions + AccessConditions *blob.AccessConditions } -func (o *PageBlobGetPageRangesDiffOptions) format() (*pageBlobClientGetPageRangesDiffOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { +func (o *GetPageRangesDiffOptions) format() (*generated.PageBlobClientGetPageRangesDiffOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { if o == nil { return nil, nil, nil } - leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() - return &pageBlobClientGetPageRangesDiffOptions{ + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &generated.PageBlobClientGetPageRangesDiffOptions{ Marker: o.Marker, Maxresults: o.MaxResults, PrevSnapshotURL: o.PrevSnapshotURL, Prevsnapshot: o.PrevSnapshot, - Range: o.PageRange.format(), + Range: exported.FormatHTTPRange(o.Range), Snapshot: o.Snapshot, }, leaseAccessConditions, modifiedAccessConditions } -// PageBlobGetPageRangesDiffPager provides operations for iterating over paged responses -type PageBlobGetPageRangesDiffPager struct { - *pageBlobClientGetPageRangesDiffPager -} - -func toPageBlobGetPageRangesDiffPager(resp *pageBlobClientGetPageRangesDiffPager) *PageBlobGetPageRangesDiffPager { - return &PageBlobGetPageRangesDiffPager{resp} -} - // --------------------------------------------------------------------------------------------------------------------- -// PageBlobResizeOptions provides set of configurations for PageBlobClient.Resize operation -type PageBlobResizeOptions struct { - CpkInfo *CpkInfo - CpkScopeInfo *CpkScopeInfo - BlobAccessConditions *BlobAccessConditions +// ResizeOptions contains the optional parameters for the Client.Resize method. +type ResizeOptions struct { + CpkInfo *blob.CpkInfo + CpkScopeInfo *blob.CpkScopeInfo + AccessConditions *blob.AccessConditions } -func (o *PageBlobResizeOptions) format() (*pageBlobClientResizeOptions, *LeaseAccessConditions, *CpkInfo, *CpkScopeInfo, *ModifiedAccessConditions) { +func (o *ResizeOptions) format() (*generated.PageBlobClientResizeOptions, *generated.LeaseAccessConditions, + *generated.CpkInfo, *generated.CpkScopeInfo, *generated.ModifiedAccessConditions) { if o == nil { return nil, nil, nil, nil, nil } - leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) return nil, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions } -// PageBlobResizeResponse contains the response from method PageBlobClient.Resize -type PageBlobResizeResponse struct { - pageBlobClientResizeResponse -} - -func toPageBlobResizeResponse(resp pageBlobClientResizeResponse) PageBlobResizeResponse { - return PageBlobResizeResponse{resp} -} - // --------------------------------------------------------------------------------------------------------------------- -// PageBlobUpdateSequenceNumberOptions provides set of configurations for PageBlobClient.UpdateSequenceNumber operation -type PageBlobUpdateSequenceNumberOptions struct { +// UpdateSequenceNumberOptions contains the optional parameters for the Client.UpdateSequenceNumber method. +type UpdateSequenceNumberOptions struct { ActionType *SequenceNumberActionType - BlobSequenceNumber *int64 + SequenceNumber *int64 - BlobAccessConditions *BlobAccessConditions + AccessConditions *blob.AccessConditions } -func (o *PageBlobUpdateSequenceNumberOptions) format() (*SequenceNumberActionType, *pageBlobClientUpdateSequenceNumberOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { +func (o *UpdateSequenceNumberOptions) format() (*generated.SequenceNumberActionType, *generated.PageBlobClientUpdateSequenceNumberOptions, + *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { if o == nil { return nil, nil, nil, nil } - options := &pageBlobClientUpdateSequenceNumberOptions{ - BlobSequenceNumber: o.BlobSequenceNumber, + options := &generated.PageBlobClientUpdateSequenceNumberOptions{ + BlobSequenceNumber: o.SequenceNumber, } if *o.ActionType == SequenceNumberActionTypeIncrement { options.BlobSequenceNumber = nil } - leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) return o.ActionType, options, leaseAccessConditions, modifiedAccessConditions } -// PageBlobUpdateSequenceNumberResponse contains the response from method PageBlobClient.UpdateSequenceNumber -type PageBlobUpdateSequenceNumberResponse struct { - pageBlobClientUpdateSequenceNumberResponse -} - -func toPageBlobUpdateSequenceNumberResponse(resp pageBlobClientUpdateSequenceNumberResponse) PageBlobUpdateSequenceNumberResponse { - return PageBlobUpdateSequenceNumberResponse{resp} -} - // --------------------------------------------------------------------------------------------------------------------- -// PageBlobCopyIncrementalOptions provides set of configurations for PageBlobClient.StartCopyIncremental operation -type PageBlobCopyIncrementalOptions struct { - ModifiedAccessConditions *ModifiedAccessConditions +// CopyIncrementalOptions contains the optional parameters for the Client.StartCopyIncremental method. +type CopyIncrementalOptions struct { + ModifiedAccessConditions *blob.ModifiedAccessConditions } -func (o *PageBlobCopyIncrementalOptions) format() (*pageBlobClientCopyIncrementalOptions, *ModifiedAccessConditions) { +func (o *CopyIncrementalOptions) format() (*generated.PageBlobClientCopyIncrementalOptions, *generated.ModifiedAccessConditions) { if o == nil { return nil, nil } @@ -390,13 +335,4 @@ func (o *PageBlobCopyIncrementalOptions) format() (*pageBlobClientCopyIncrementa return nil, o.ModifiedAccessConditions } -// PageBlobCopyIncrementalResponse contains the response from method PageBlobClient.StartCopyIncremental -type PageBlobCopyIncrementalResponse struct { - pageBlobClientCopyIncrementalResponse -} - -func toPageBlobCopyIncrementalResponse(resp pageBlobClientCopyIncrementalResponse) PageBlobCopyIncrementalResponse { - return PageBlobCopyIncrementalResponse{resp} -} - // --------------------------------------------------------------------------------------------------------------------- diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/responses.go new file mode 100644 index 000000000..876efbab1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/responses.go @@ -0,0 +1,38 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package pageblob + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// CreateResponse contains the response from method Client.Create. +type CreateResponse = generated.PageBlobClientCreateResponse + +// UploadPagesResponse contains the response from method Client.UploadPages. +type UploadPagesResponse = generated.PageBlobClientUploadPagesResponse + +// UploadPagesFromURLResponse contains the response from method Client.UploadPagesFromURL. +type UploadPagesFromURLResponse = generated.PageBlobClientUploadPagesFromURLResponse + +// ClearPagesResponse contains the response from method Client.ClearPages. +type ClearPagesResponse = generated.PageBlobClientClearPagesResponse + +// GetPageRangesResponse contains the response from method Client.NewGetPageRangesPager. +type GetPageRangesResponse = generated.PageBlobClientGetPageRangesResponse + +// GetPageRangesDiffResponse contains the response from method Client.NewGetPageRangesDiffPager. +type GetPageRangesDiffResponse = generated.PageBlobClientGetPageRangesDiffResponse + +// ResizeResponse contains the response from method Client.Resize. +type ResizeResponse = generated.PageBlobClientResizeResponse + +// UpdateSequenceNumberResponse contains the response from method Client.UpdateSequenceNumber. +type UpdateSequenceNumberResponse = generated.PageBlobClientUpdateSequenceNumberResponse + +// CopyIncrementalResponse contains the response from method Client.StartCopyIncremental. +type CopyIncrementalResponse = generated.PageBlobClientCopyIncrementalResponse diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/responses.go new file mode 100644 index 000000000..86b05d098 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/responses.go @@ -0,0 +1,51 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package azblob + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" +) + +// CreateContainerResponse contains the response from method container.Client.Create. +type CreateContainerResponse = service.CreateContainerResponse + +// DeleteContainerResponse contains the response from method container.Client.Delete +type DeleteContainerResponse = service.DeleteContainerResponse + +// DeleteBlobResponse contains the response from method blob.Client.Delete. +type DeleteBlobResponse = blob.DeleteResponse + +// UploadResponse contains the response from method blockblob.Client.CommitBlockList. +type UploadResponse = blockblob.CommitBlockListResponse + +// DownloadStreamResponse wraps AutoRest generated BlobDownloadResponse and helps to provide info for retry. +type DownloadStreamResponse = blob.DownloadStreamResponse + +// ListBlobsFlatResponse contains the response from method container.Client.ListBlobFlatSegment. +type ListBlobsFlatResponse = container.ListBlobsFlatResponse + +// ListContainersResponse contains the response from method service.Client.ListContainersSegment. +type ListContainersResponse = service.ListContainersResponse + +// UploadBufferResponse contains the response from method Client.UploadBuffer/Client.UploadFile. +type UploadBufferResponse = blockblob.UploadBufferResponse + +// UploadFileResponse contains the response from method Client.UploadBuffer/Client.UploadFile. +type UploadFileResponse = blockblob.UploadFileResponse + +// UploadStreamResponse contains the response from method Client.CommitBlockList. +type UploadStreamResponse = blockblob.CommitBlockListResponse + +// ListContainersSegmentResponse - An enumeration of containers +type ListContainersSegmentResponse = generated.ListContainersSegmentResponse + +// ListBlobsFlatSegmentResponse - An enumeration of blobs +type ListBlobsFlatSegmentResponse = generated.ListBlobsFlatSegmentResponse diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/account.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/account.go new file mode 100644 index 000000000..fa76ed95c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/account.go @@ -0,0 +1,316 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package sas + +import ( + "bytes" + "errors" + "fmt" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" +) + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential = exported.SharedKeyCredential + +// UserDelegationCredential contains an account's name and its user delegation key. +type UserDelegationCredential = exported.UserDelegationCredential + +// AccountSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage account. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-an-account-sas +type AccountSignatureValues struct { + Version string `param:"sv"` // If not specified, this format to SASVersion + Protocol Protocol `param:"spr"` // See the SASProtocol* constants + StartTime time.Time `param:"st"` // Not specified if IsZero + ExpiryTime time.Time `param:"se"` // Not specified if IsZero + Permissions string `param:"sp"` // Create by initializing a AccountSASPermissions and then call String() + IPRange IPRange `param:"sip"` + Services string `param:"ss"` // Create by initializing AccountSASServices and then call String() + ResourceTypes string `param:"srt"` // Create by initializing AccountSASResourceTypes and then call String() +} + +// SignWithSharedKey uses an account's shared key credential to sign this signature values to produce +// the proper SAS query parameters. +func (v AccountSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKeyCredential) (QueryParameters, error) { + // https://docs.microsoft.com/en-us/rest/api/storageservices/Constructing-an-Account-SAS + if v.ExpiryTime.IsZero() || v.Permissions == "" || v.ResourceTypes == "" || v.Services == "" { + return QueryParameters{}, errors.New("account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType") + } + if v.Version == "" { + v.Version = Version + } + perms, err := parseAccountPermissions(v.Permissions) + if err != nil { + return QueryParameters{}, err + } + v.Permissions = perms.String() + + startTime, expiryTime, _ := formatTimesForSigning(v.StartTime, v.ExpiryTime, time.Time{}) + + stringToSign := strings.Join([]string{ + sharedKeyCredential.AccountName(), + v.Permissions, + v.Services, + v.ResourceTypes, + startTime, + expiryTime, + v.IPRange.String(), + string(v.Protocol), + v.Version, + ""}, // That is right, the account SAS requires a terminating extra newline + "\n") + + signature, err := exported.ComputeHMACSHA256(sharedKeyCredential, stringToSign) + if err != nil { + return QueryParameters{}, err + } + p := QueryParameters{ + // Common SAS parameters + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + + // Account-specific SAS parameters + services: v.Services, + resourceTypes: v.ResourceTypes, + + // Calculated SAS signature + signature: signature, + } + + return p, nil +} + +// SignWithUserDelegation uses an account's UserDelegationKey to sign this signature values to produce the proper SAS query parameters. +func (v AccountSignatureValues) SignWithUserDelegation(userDelegationCredential *UserDelegationCredential) (QueryParameters, error) { + // https://docs.microsoft.com/en-us/rest/api/storageservices/Constructing-an-Account-SAS + if v.ExpiryTime.IsZero() || v.Permissions == "" || v.ResourceTypes == "" || v.Services == "" { + return QueryParameters{}, errors.New("account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType") + } + if v.Version == "" { + v.Version = Version + } + + perms, err := parseAccountPermissions(v.Permissions) + if err != nil { + return QueryParameters{}, err + } + v.Permissions = perms.String() + + startTime, expiryTime, _ := formatTimesForSigning(v.StartTime, v.ExpiryTime, time.Time{}) + + stringToSign := strings.Join([]string{ + exported.GetAccountName(userDelegationCredential), + v.Permissions, + v.Services, + v.ResourceTypes, + startTime, + expiryTime, + v.IPRange.String(), + string(v.Protocol), + v.Version, + ""}, // That is right, the account SAS requires a terminating extra newline + "\n") + + signature, err := exported.ComputeUDCHMACSHA256(userDelegationCredential, stringToSign) + if err != nil { + return QueryParameters{}, err + } + p := QueryParameters{ + // Common SAS parameters + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + + // Account-specific SAS parameters + services: v.Services, + resourceTypes: v.ResourceTypes, + + // Calculated SAS signature + signature: signature, + } + + udk := exported.GetUDKParams(userDelegationCredential) + + //User delegation SAS specific parameters + p.signedOID = *udk.SignedOID + p.signedTID = *udk.SignedTID + p.signedStart = *udk.SignedStart + p.signedExpiry = *udk.SignedExpiry + p.signedService = *udk.SignedService + p.signedVersion = *udk.SignedVersion + + return p, nil +} + +// AccountPermissions type simplifies creating the permissions string for an Azure Storage Account SAS. +// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Permissions field. +type AccountPermissions struct { + Read, Write, Delete, DeletePreviousVersion, List, Add, Create, Update, Process, Tag, FilterByTags bool +} + +// String produces the SAS permissions string for an Azure Storage account. +// Call this method to set AccountSASSignatureValues's Permissions field. +func (p *AccountPermissions) String() string { + var buffer bytes.Buffer + if p.Read { + buffer.WriteRune('r') + } + if p.Write { + buffer.WriteRune('w') + } + if p.Delete { + buffer.WriteRune('d') + } + if p.DeletePreviousVersion { + buffer.WriteRune('x') + } + if p.List { + buffer.WriteRune('l') + } + if p.Add { + buffer.WriteRune('a') + } + if p.Create { + buffer.WriteRune('c') + } + if p.Update { + buffer.WriteRune('u') + } + if p.Process { + buffer.WriteRune('p') + } + if p.Tag { + buffer.WriteRune('t') + } + if p.FilterByTags { + buffer.WriteRune('f') + } + return buffer.String() +} + +// Parse initializes the AccountSASPermissions' fields from a string. +func parseAccountPermissions(s string) (AccountPermissions, error) { + p := AccountPermissions{} // Clear out the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + case 'l': + p.List = true + case 'a': + p.Add = true + case 'c': + p.Create = true + case 'u': + p.Update = true + case 'p': + p.Process = true + case 'x': + p.Process = true + case 't': + p.Tag = true + case 'f': + p.FilterByTags = true + default: + return AccountPermissions{}, fmt.Errorf("invalid permission character: '%v'", r) + } + } + return p, nil +} + +// AccountServices type simplifies creating the services string for an Azure Storage Account SAS. +// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Services field. +type AccountServices struct { + Blob, Queue, File bool +} + +// String produces the SAS services string for an Azure Storage account. +// Call this method to set AccountSASSignatureValues's Services field. +func (s *AccountServices) String() string { + var buffer bytes.Buffer + if s.Blob { + buffer.WriteRune('b') + } + if s.Queue { + buffer.WriteRune('q') + } + if s.File { + buffer.WriteRune('f') + } + return buffer.String() +} + +// Parse initializes the AccountSASServices' fields from a string. +/*func parseAccountServices(str string) (AccountServices, error) { + s := AccountServices{} // Clear out the flags + for _, r := range str { + switch r { + case 'b': + s.Blob = true + case 'q': + s.Queue = true + case 'f': + s.File = true + default: + return AccountServices{}, fmt.Errorf("invalid service character: '%v'", r) + } + } + return s, nil +}*/ + +// AccountResourceTypes type simplifies creating the resource types string for an Azure Storage Account SAS. +// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's ResourceTypes field. +type AccountResourceTypes struct { + Service, Container, Object bool +} + +// String produces the SAS resource types string for an Azure Storage account. +// Call this method to set AccountSASSignatureValues's ResourceTypes field. +func (rt *AccountResourceTypes) String() string { + var buffer bytes.Buffer + if rt.Service { + buffer.WriteRune('s') + } + if rt.Container { + buffer.WriteRune('c') + } + if rt.Object { + buffer.WriteRune('o') + } + return buffer.String() +} + +// Parse initializes the AccountResourceTypes's fields from a string. +/*func parseAccountResourceTypes(s string) (AccountResourceTypes, error) { + rt := AccountResourceTypes{} // Clear out the flags + for _, r := range s { + switch r { + case 's': + rt.Service = true + case 'c': + rt.Container = true + case 'o': + rt.Object = true + default: + return AccountResourceTypes{}, fmt.Errorf("invalid resource type: '%v'", r) + } + } + return rt, nil +}*/ diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go new file mode 100644 index 000000000..1a9c8c12d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go @@ -0,0 +1,440 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package sas + +import ( + "errors" + "net" + "net/url" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" +) + +// TimeFormat represents the format of a SAS start or expiry time. Use it when formatting/parsing a time.Time. +const ( + TimeFormat = "2006-01-02T15:04:05Z" // "2017-07-27T00:00:00Z" // ISO 8601 +) + +var ( + // Version is the default version encoded in the SAS token. + Version = "2020-02-10" +) + +// TimeFormats ISO 8601 format. +// Please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details. +var timeFormats = []string{"2006-01-02T15:04:05.0000000Z", TimeFormat, "2006-01-02T15:04Z", "2006-01-02"} + +// Protocol indicates the http/https. +type Protocol string + +const ( + // ProtocolHTTPS can be specified for a SAS protocol + ProtocolHTTPS Protocol = "https" + + // ProtocolHTTPSandHTTP can be specified for a SAS protocol + ProtocolHTTPSandHTTP Protocol = "https,http" +) + +// FormatTimesForSigning converts a time.Time to a snapshotTimeFormat string suitable for a +// Field's StartTime or ExpiryTime fields. Returns "" if value.IsZero(). +func formatTimesForSigning(startTime, expiryTime, snapshotTime time.Time) (string, string, string) { + ss := "" + if !startTime.IsZero() { + ss = formatTimeWithDefaultFormat(&startTime) + } + se := "" + if !expiryTime.IsZero() { + se = formatTimeWithDefaultFormat(&expiryTime) + } + sh := "" + if !snapshotTime.IsZero() { + sh = snapshotTime.Format(exported.SnapshotTimeFormat) + } + return ss, se, sh +} + +// formatTimeWithDefaultFormat format time with ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ". +func formatTimeWithDefaultFormat(t *time.Time) string { + return formatTime(t, TimeFormat) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used +} + +// formatTime format time with given format, use ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ" by default. +func formatTime(t *time.Time, format string) string { + if format != "" { + return t.Format(format) + } + return t.Format(TimeFormat) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used +} + +// ParseTime try to parse a SAS time string. +func parseTime(val string) (t time.Time, timeFormat string, err error) { + for _, sasTimeFormat := range timeFormats { + t, err = time.Parse(sasTimeFormat, val) + if err == nil { + timeFormat = sasTimeFormat + break + } + } + + if err != nil { + err = errors.New("fail to parse time with IOS 8601 formats, please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details") + } + + return +} + +// IPRange represents a SAS IP range's start IP and (optionally) end IP. +type IPRange struct { + Start net.IP // Not specified if length = 0 + End net.IP // Not specified if length = 0 +} + +// String returns a string representation of an IPRange. +func (ipr *IPRange) String() string { + if len(ipr.Start) == 0 { + return "" + } + start := ipr.Start.String() + if len(ipr.End) == 0 { + return start + } + return start + "-" + ipr.End.String() +} + +// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas + +// QueryParameters object represents the components that make up an Azure Storage SAS' query parameters. +// You parse a map of query parameters into its fields by calling NewQueryParameters(). You add the components +// to a query parameter map by calling AddToValues(). +// NOTE: Changing any field requires computing a new SAS signature using a XxxSASSignatureValues type. +// This type defines the components used by all Azure Storage resources (Containers, Blobs, Files, & Queues). +type QueryParameters struct { + // All members are immutable or values so copies of this struct are goroutine-safe. + version string `param:"sv"` + services string `param:"ss"` + resourceTypes string `param:"srt"` + protocol Protocol `param:"spr"` + startTime time.Time `param:"st"` + expiryTime time.Time `param:"se"` + snapshotTime time.Time `param:"snapshot"` + ipRange IPRange `param:"sip"` + identifier string `param:"si"` + resource string `param:"sr"` + permissions string `param:"sp"` + signature string `param:"sig"` + cacheControl string `param:"rscc"` + contentDisposition string `param:"rscd"` + contentEncoding string `param:"rsce"` + contentLanguage string `param:"rscl"` + contentType string `param:"rsct"` + signedOID string `param:"skoid"` + signedTID string `param:"sktid"` + signedStart time.Time `param:"skt"` + signedService string `param:"sks"` + signedExpiry time.Time `param:"ske"` + signedVersion string `param:"skv"` + signedDirectoryDepth string `param:"sdd"` + preauthorizedAgentObjectID string `param:"saoid"` + agentObjectID string `param:"suoid"` + correlationID string `param:"scid"` + // private member used for startTime and expiryTime formatting. + stTimeFormat string + seTimeFormat string +} + +// PreauthorizedAgentObjectID returns preauthorizedAgentObjectID +func (p *QueryParameters) PreauthorizedAgentObjectID() string { + return p.preauthorizedAgentObjectID +} + +// AgentObjectID returns agentObjectID +func (p *QueryParameters) AgentObjectID() string { + return p.agentObjectID +} + +// SignedCorrelationID returns signedCorrelationID +func (p *QueryParameters) SignedCorrelationID() string { + return p.correlationID +} + +// SignedOID returns signedOID +func (p *QueryParameters) SignedOID() string { + return p.signedOID +} + +// SignedTID returns signedTID +func (p *QueryParameters) SignedTID() string { + return p.signedTID +} + +// SignedStart returns signedStart +func (p *QueryParameters) SignedStart() time.Time { + return p.signedStart +} + +// SignedExpiry returns signedExpiry +func (p *QueryParameters) SignedExpiry() time.Time { + return p.signedExpiry +} + +// SignedService returns signedService +func (p *QueryParameters) SignedService() string { + return p.signedService +} + +// SignedVersion returns signedVersion +func (p *QueryParameters) SignedVersion() string { + return p.signedVersion +} + +// SnapshotTime returns snapshotTime +func (p *QueryParameters) SnapshotTime() time.Time { + return p.snapshotTime +} + +// Version returns version +func (p *QueryParameters) Version() string { + return p.version +} + +// Services returns services +func (p *QueryParameters) Services() string { + return p.services +} + +// ResourceTypes returns resourceTypes +func (p *QueryParameters) ResourceTypes() string { + return p.resourceTypes +} + +// Protocol returns protocol +func (p *QueryParameters) Protocol() Protocol { + return p.protocol +} + +// StartTime returns startTime +func (p *QueryParameters) StartTime() time.Time { + return p.startTime +} + +// ExpiryTime returns expiryTime +func (p *QueryParameters) ExpiryTime() time.Time { + return p.expiryTime +} + +// IPRange returns ipRange +func (p *QueryParameters) IPRange() IPRange { + return p.ipRange +} + +// Identifier returns identifier +func (p *QueryParameters) Identifier() string { + return p.identifier +} + +// Resource returns resource +func (p *QueryParameters) Resource() string { + return p.resource +} + +// Permissions returns permissions +func (p *QueryParameters) Permissions() string { + return p.permissions +} + +// Signature returns signature +func (p *QueryParameters) Signature() string { + return p.signature +} + +// CacheControl returns cacheControl +func (p *QueryParameters) CacheControl() string { + return p.cacheControl +} + +// ContentDisposition returns contentDisposition +func (p *QueryParameters) ContentDisposition() string { + return p.contentDisposition +} + +// ContentEncoding returns contentEncoding +func (p *QueryParameters) ContentEncoding() string { + return p.contentEncoding +} + +// ContentLanguage returns contentLanguage +func (p *QueryParameters) ContentLanguage() string { + return p.contentLanguage +} + +// ContentType returns sontentType +func (p *QueryParameters) ContentType() string { + return p.contentType +} + +// SignedDirectoryDepth returns signedDirectoryDepth +func (p *QueryParameters) SignedDirectoryDepth() string { + return p.signedDirectoryDepth +} + +// Encode encodes the SAS query parameters into URL encoded form sorted by key. +func (p *QueryParameters) Encode() string { + v := url.Values{} + + if p.version != "" { + v.Add("sv", p.version) + } + if p.services != "" { + v.Add("ss", p.services) + } + if p.resourceTypes != "" { + v.Add("srt", p.resourceTypes) + } + if p.protocol != "" { + v.Add("spr", string(p.protocol)) + } + if !p.startTime.IsZero() { + v.Add("st", formatTime(&(p.startTime), p.stTimeFormat)) + } + if !p.expiryTime.IsZero() { + v.Add("se", formatTime(&(p.expiryTime), p.seTimeFormat)) + } + if len(p.ipRange.Start) > 0 { + v.Add("sip", p.ipRange.String()) + } + if p.identifier != "" { + v.Add("si", p.identifier) + } + if p.resource != "" { + v.Add("sr", p.resource) + } + if p.permissions != "" { + v.Add("sp", p.permissions) + } + if p.signedOID != "" { + v.Add("skoid", p.signedOID) + v.Add("sktid", p.signedTID) + v.Add("skt", p.signedStart.Format(TimeFormat)) + v.Add("ske", p.signedExpiry.Format(TimeFormat)) + v.Add("sks", p.signedService) + v.Add("skv", p.signedVersion) + } + if p.signature != "" { + v.Add("sig", p.signature) + } + if p.cacheControl != "" { + v.Add("rscc", p.cacheControl) + } + if p.contentDisposition != "" { + v.Add("rscd", p.contentDisposition) + } + if p.contentEncoding != "" { + v.Add("rsce", p.contentEncoding) + } + if p.contentLanguage != "" { + v.Add("rscl", p.contentLanguage) + } + if p.contentType != "" { + v.Add("rsct", p.contentType) + } + if p.signedDirectoryDepth != "" { + v.Add("sdd", p.signedDirectoryDepth) + } + if p.preauthorizedAgentObjectID != "" { + v.Add("saoid", p.preauthorizedAgentObjectID) + } + if p.agentObjectID != "" { + v.Add("suoid", p.agentObjectID) + } + if p.correlationID != "" { + v.Add("scid", p.correlationID) + } + + return v.Encode() +} + +// NewQueryParameters creates and initializes a QueryParameters object based on the +// query parameter map's passed-in values. If deleteSASParametersFromValues is true, +// all SAS-related query parameters are removed from the passed-in map. If +// deleteSASParametersFromValues is false, the map passed-in map is unaltered. +func NewQueryParameters(values url.Values, deleteSASParametersFromValues bool) QueryParameters { + p := QueryParameters{} + for k, v := range values { + val := v[0] + isSASKey := true + switch strings.ToLower(k) { + case "sv": + p.version = val + case "ss": + p.services = val + case "srt": + p.resourceTypes = val + case "spr": + p.protocol = Protocol(val) + case "snapshot": + p.snapshotTime, _ = time.Parse(exported.SnapshotTimeFormat, val) + case "st": + p.startTime, p.stTimeFormat, _ = parseTime(val) + case "se": + p.expiryTime, p.seTimeFormat, _ = parseTime(val) + case "sip": + dashIndex := strings.Index(val, "-") + if dashIndex == -1 { + p.ipRange.Start = net.ParseIP(val) + } else { + p.ipRange.Start = net.ParseIP(val[:dashIndex]) + p.ipRange.End = net.ParseIP(val[dashIndex+1:]) + } + case "si": + p.identifier = val + case "sr": + p.resource = val + case "sp": + p.permissions = val + case "sig": + p.signature = val + case "rscc": + p.cacheControl = val + case "rscd": + p.contentDisposition = val + case "rsce": + p.contentEncoding = val + case "rscl": + p.contentLanguage = val + case "rsct": + p.contentType = val + case "skoid": + p.signedOID = val + case "sktid": + p.signedTID = val + case "skt": + p.signedStart, _ = time.Parse(TimeFormat, val) + case "ske": + p.signedExpiry, _ = time.Parse(TimeFormat, val) + case "sks": + p.signedService = val + case "skv": + p.signedVersion = val + case "sdd": + p.signedDirectoryDepth = val + case "saoid": + p.preauthorizedAgentObjectID = val + case "suoid": + p.agentObjectID = val + case "scid": + p.correlationID = val + default: + isSASKey = false // We didn't recognize the query parameter + } + if isSASKey && deleteSASParametersFromValues { + delete(values, k) + } + } + return p +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_service.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go similarity index 51% rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_service.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go index 488baed8c..98d853d4e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_service.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go @@ -4,22 +4,24 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -package azblob +package sas import ( "bytes" "fmt" "strings" "time" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" ) -// BlobSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage container or blob. +// BlobSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage container or blob. // For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-a-service-sas -type BlobSASSignatureValues struct { - Version string `param:"sv"` // If not specified, this defaults to SASVersion - Protocol SASProtocol `param:"spr"` // See the SASProtocol* constants - StartTime time.Time `param:"st"` // Not specified if IsZero - ExpiryTime time.Time `param:"se"` // Not specified if IsZero +type BlobSignatureValues struct { + Version string `param:"sv"` // If not specified, this defaults to Version + Protocol Protocol `param:"spr"` // See the Protocol* constants + StartTime time.Time `param:"st"` // Not specified if IsZero + ExpiryTime time.Time `param:"se"` // Not specified if IsZero SnapshotTime time.Time Permissions string `param:"sp"` // Create by initializing a ContainerSASPermissions or BlobSASPermissions and then call String() IPRange IPRange `param:"sip"` @@ -45,81 +47,40 @@ func getDirectoryDepth(path string) string { return fmt.Sprint(strings.Count(path, "/") + 1) } -// NewSASQueryParameters uses an account's StorageAccountCredential to sign this signature values to produce -// the proper SAS query parameters. -// See: StorageAccountCredential. Compatible with both UserDelegationCredential and SharedKeyCredential -func (v BlobSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *SharedKeyCredential) (SASQueryParameters, error) { - resource := "c" +// SignWithSharedKey uses an account's SharedKeyCredential to sign this signature values to produce the proper SAS query parameters. +func (v BlobSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKeyCredential) (QueryParameters, error) { if sharedKeyCredential == nil { - return SASQueryParameters{}, fmt.Errorf("cannot sign SAS query without Shared Key Credential") + return QueryParameters{}, fmt.Errorf("cannot sign SAS query without Shared Key Credential") + } + + //Make sure the permission characters are in the correct order + perms, err := parseBlobPermissions(v.Permissions) + if err != nil { + return QueryParameters{}, err } + v.Permissions = perms.String() + resource := "c" if !v.SnapshotTime.IsZero() { resource = "bs" - //Make sure the permission characters are in the correct order - perms := &BlobSASPermissions{} - if err := perms.Parse(v.Permissions); err != nil { - return SASQueryParameters{}, err - } - v.Permissions = perms.String() } else if v.BlobVersion != "" { resource = "bv" - //Make sure the permission characters are in the correct order - perms := &BlobSASPermissions{} - if err := perms.Parse(v.Permissions); err != nil { - return SASQueryParameters{}, err - } - v.Permissions = perms.String() } else if v.Directory != "" { resource = "d" v.BlobName = "" - perms := &BlobSASPermissions{} - if err := perms.Parse(v.Permissions); err != nil { - return SASQueryParameters{}, err - } - v.Permissions = perms.String() } else if v.BlobName == "" { - // Make sure the permission characters are in the correct order - perms := &ContainerSASPermissions{} - if err := perms.Parse(v.Permissions); err != nil { - return SASQueryParameters{}, err - } - v.Permissions = perms.String() + // do nothing } else { resource = "b" - // Make sure the permission characters are in the correct order - perms := &BlobSASPermissions{} - if err := perms.Parse(v.Permissions); err != nil { - return SASQueryParameters{}, err - } - v.Permissions = perms.String() } + if v.Version == "" { - v.Version = SASVersion + v.Version = Version } - startTime, expiryTime, snapshotTime := FormatTimesForSASSigning(v.StartTime, v.ExpiryTime, v.SnapshotTime) + startTime, expiryTime, snapshotTime := formatTimesForSigning(v.StartTime, v.ExpiryTime, v.SnapshotTime) signedIdentifier := v.Identifier - //udk := sharedKeyCredential.getUDKParams() - // - //if udk != nil { - // udkStart, udkExpiry, _ := FormatTimesForSASSigning(udk.SignedStart, udk.SignedExpiry, time.Time{}) - // //I don't like this answer to combining the functions - // //But because signedIdentifier and the user delegation key strings share a place, this is an _OK_ way to do it. - // signedIdentifier = strings.Join([]string{ - // udk.SignedOid, - // udk.SignedTid, - // udkStart, - // udkExpiry, - // udk.SignedService, - // udk.SignedVersion, - // v.PreauthorizedAgentObjectId, - // v.AgentObjectId, - // v.CorrelationId, - // }, "\n") - //} - // String to sign: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx stringToSign := strings.Join([]string{ v.Permissions, @@ -139,13 +100,114 @@ func (v BlobSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *Share v.ContentType}, // rsct "\n") - signature := "" - signature, err := sharedKeyCredential.ComputeHMACSHA256(stringToSign) + signature, err := exported.ComputeHMACSHA256(sharedKeyCredential, stringToSign) + if err != nil { + return QueryParameters{}, err + } + + p := QueryParameters{ + // Common SAS parameters + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + + // Container/Blob-specific SAS parameters + resource: resource, + identifier: v.Identifier, + cacheControl: v.CacheControl, + contentDisposition: v.ContentDisposition, + contentEncoding: v.ContentEncoding, + contentLanguage: v.ContentLanguage, + contentType: v.ContentType, + snapshotTime: v.SnapshotTime, + signedDirectoryDepth: getDirectoryDepth(v.Directory), + preauthorizedAgentObjectID: v.PreauthorizedAgentObjectId, + agentObjectID: v.AgentObjectId, + correlationID: v.CorrelationId, + // Calculated SAS signature + signature: signature, + } + + return p, nil +} + +// SignWithUserDelegation uses an account's UserDelegationCredential to sign this signature values to produce the proper SAS query parameters. +func (v BlobSignatureValues) SignWithUserDelegation(userDelegationCredential *UserDelegationCredential) (QueryParameters, error) { + if userDelegationCredential == nil { + return QueryParameters{}, fmt.Errorf("cannot sign SAS query without User Delegation Key") + } + + //Make sure the permission characters are in the correct order + perms, err := parseBlobPermissions(v.Permissions) + if err != nil { + return QueryParameters{}, err + } + v.Permissions = perms.String() + + resource := "c" + if !v.SnapshotTime.IsZero() { + resource = "bs" + } else if v.BlobVersion != "" { + resource = "bv" + } else if v.Directory != "" { + resource = "d" + v.BlobName = "" + } else if v.BlobName == "" { + // do nothing + } else { + resource = "b" + } + + if v.Version == "" { + v.Version = Version + } + startTime, expiryTime, snapshotTime := formatTimesForSigning(v.StartTime, v.ExpiryTime, v.SnapshotTime) + + udk := exported.GetUDKParams(userDelegationCredential) + + udkStart, udkExpiry, _ := formatTimesForSigning(*udk.SignedStart, *udk.SignedExpiry, time.Time{}) + //I don't like this answer to combining the functions + //But because signedIdentifier and the user delegation key strings share a place, this is an _OK_ way to do it. + signedIdentifier := strings.Join([]string{ + *udk.SignedOID, + *udk.SignedTID, + udkStart, + udkExpiry, + *udk.SignedService, + *udk.SignedVersion, + v.PreauthorizedAgentObjectId, + v.AgentObjectId, + v.CorrelationId, + }, "\n") + + // String to sign: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx + stringToSign := strings.Join([]string{ + v.Permissions, + startTime, + expiryTime, + getCanonicalName(exported.GetAccountName(userDelegationCredential), v.ContainerName, v.BlobName, v.Directory), + signedIdentifier, + v.IPRange.String(), + string(v.Protocol), + v.Version, + resource, + snapshotTime, // signed timestamp + v.CacheControl, // rscc + v.ContentDisposition, // rscd + v.ContentEncoding, // rsce + v.ContentLanguage, // rscl + v.ContentType}, // rsct + "\n") + + signature, err := exported.ComputeUDCHMACSHA256(userDelegationCredential, stringToSign) if err != nil { - return SASQueryParameters{}, err + return QueryParameters{}, err } - p := SASQueryParameters{ + p := QueryParameters{ // Common SAS parameters version: v.Version, protocol: v.Protocol, @@ -164,22 +226,20 @@ func (v BlobSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *Share contentType: v.ContentType, snapshotTime: v.SnapshotTime, signedDirectoryDepth: getDirectoryDepth(v.Directory), - preauthorizedAgentObjectId: v.PreauthorizedAgentObjectId, - agentObjectId: v.AgentObjectId, - correlationId: v.CorrelationId, + preauthorizedAgentObjectID: v.PreauthorizedAgentObjectId, + agentObjectID: v.AgentObjectId, + correlationID: v.CorrelationId, // Calculated SAS signature signature: signature, } - ////User delegation SAS specific parameters - //if udk != nil { - // p.signedOid = udk.SignedOid - // p.signedTid = udk.SignedTid - // p.signedStart = udk.SignedStart - // p.signedExpiry = udk.SignedExpiry - // p.signedService = udk.SignedService - // p.signedVersion = udk.SignedVersion - //} + //User delegation SAS specific parameters + p.signedOID = *udk.SignedOID + p.signedTID = *udk.SignedTID + p.signedStart = *udk.SignedStart + p.signedExpiry = *udk.SignedExpiry + p.signedService = *udk.SignedService + p.signedVersion = *udk.SignedVersion return p, nil } @@ -197,17 +257,17 @@ func getCanonicalName(account string, containerName string, blobName string, dir return strings.Join(elements, "") } -// ContainerSASPermissions type simplifies creating the permissions string for an Azure Storage container SAS. +// ContainerPermissions type simplifies creating the permissions string for an Azure Storage container SAS. // Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field. // All permissions descriptions can be found here: https://docs.microsoft.com/en-us/rest/api/storageservices/create-service-sas#permissions-for-a-directory-container-or-blob -type ContainerSASPermissions struct { +type ContainerPermissions struct { Read, Add, Create, Write, Delete, DeletePreviousVersion, List, Tag bool Execute, ModifyOwnership, ModifyPermissions bool // Hierarchical Namespace only } // String produces the SAS permissions string for an Azure Storage container. // Call this method to set BlobSASSignatureValues's Permissions field. -func (p ContainerSASPermissions) String() string { +func (p *ContainerPermissions) String() string { var b bytes.Buffer if p.Read { b.WriteRune('r') @@ -245,9 +305,9 @@ func (p ContainerSASPermissions) String() string { return b.String() } -// Parse initializes the ContainerSASPermissions's fields from a string. -func (p *ContainerSASPermissions) Parse(s string) error { - *p = ContainerSASPermissions{} // Clear the flags +// Parse initializes the ContainerSASPermissions' fields from a string. +/*func parseContainerPermissions(s string) (ContainerPermissions, error) { + p := ContainerPermissions{} // Clear the flags for _, r := range s { switch r { case 'r': @@ -273,21 +333,21 @@ func (p *ContainerSASPermissions) Parse(s string) error { case 'p': p.ModifyPermissions = true default: - return fmt.Errorf("invalid permission: '%v'", r) + return ContainerPermissions{}, fmt.Errorf("invalid permission: '%v'", r) } } - return nil -} + return p, nil +}*/ -// BlobSASPermissions type simplifies creating the permissions string for an Azure Storage blob SAS. +// BlobPermissions type simplifies creating the permissions string for an Azure Storage blob SAS. // Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field. -type BlobSASPermissions struct { +type BlobPermissions struct { Read, Add, Create, Write, Delete, DeletePreviousVersion, Tag, List, Move, Execute, Ownership, Permissions bool } // String produces the SAS permissions string for an Azure Storage blob. -// Call this method to set BlobSASSignatureValues's Permissions field. -func (p BlobSASPermissions) String() string { +// Call this method to set BlobSignatureValues's Permissions field. +func (p *BlobPermissions) String() string { var b bytes.Buffer if p.Read { b.WriteRune('r') @@ -329,8 +389,8 @@ func (p BlobSASPermissions) String() string { } // Parse initializes the BlobSASPermissions's fields from a string. -func (p *BlobSASPermissions) Parse(s string) error { - *p = BlobSASPermissions{} // Clear the flags +func parseBlobPermissions(s string) (BlobPermissions, error) { + p := BlobPermissions{} // Clear the flags for _, r := range s { switch r { case 'r': @@ -358,8 +418,8 @@ func (p *BlobSASPermissions) Parse(s string) error { case 'p': p.Permissions = true default: - return fmt.Errorf("invalid permission: '%v'", r) + return BlobPermissions{}, fmt.Errorf("invalid permission: '%v'", r) } } - return nil + return p, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_parsing_urls.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/url_parts.go similarity index 68% rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_parsing_urls.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/url_parts.go index 062587604..57fe053f0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_parsing_urls.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/url_parts.go @@ -2,71 +2,52 @@ // +build go1.18 // Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. +// Licensed under the MIT License. See License.txt in the project root for license information. -package azblob +package sas import ( - "net" "net/url" "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" ) const ( - snapshot = "snapshot" - versionId = "versionid" - SnapshotTimeFormat = "2006-01-02T15:04:05.0000000Z07:00" + snapshot = "snapshot" + versionId = "versionid" ) -// BlobURLParts object represents the components that make up an Azure Storage Container/Blob URL. You parse an -// existing URL into its parts by calling NewBlobURLParts(). You construct a URL from parts by calling URL(). +// IPEndpointStyleInfo is used for IP endpoint style URL when working with Azure storage emulator. +// Ex: "https://10.132.141.33/accountname/containername" +type IPEndpointStyleInfo struct { + AccountName string // "" if not using IP endpoint style +} + +// URLParts object represents the components that make up an Azure Storage Container/Blob URL. // NOTE: Changing any SAS-related field requires computing a new SAS signature. -type BlobURLParts struct { +type URLParts struct { Scheme string // Ex: "https://" Host string // Ex: "account.blob.core.windows.net", "10.132.141.33", "10.132.141.33:80" IPEndpointStyleInfo IPEndpointStyleInfo ContainerName string // "" if no container BlobName string // "" if no blob Snapshot string // "" if not a snapshot - SAS SASQueryParameters + SAS QueryParameters UnparsedParams string VersionID string // "" if not versioning enabled } -// IPEndpointStyleInfo is used for IP endpoint style URL when working with Azure storage emulator. -// Ex: "https://10.132.141.33/accountname/containername" -type IPEndpointStyleInfo struct { - AccountName string // "" if not using IP endpoint style -} - -// isIPEndpointStyle checkes if URL's host is IP, in this case the storage account endpoint will be composed as: -// http(s)://IP(:port)/storageaccount/container/... -// As url's Host property, host could be both host or host:port -func isIPEndpointStyle(host string) bool { - if host == "" { - return false - } - if h, _, err := net.SplitHostPort(host); err == nil { - host = h - } - // For IPv6, there could be case where SplitHostPort fails for cannot finding port. - // In this case, eliminate the '[' and ']' in the URL. - // For details about IPv6 URL, please refer to https://tools.ietf.org/html/rfc2732 - if host[0] == '[' && host[len(host)-1] == ']' { - host = host[1 : len(host)-1] - } - return net.ParseIP(host) != nil -} - -// NewBlobURLParts parses a URL initializing BlobURLParts' fields including any SAS-related & snapshot query parameters. Any other -// query parameters remain in the UnparsedParams field. This method overwrites all fields in the BlobURLParts object. -func NewBlobURLParts(u string) (BlobURLParts, error) { +// ParseURL parses a URL initializing URLParts' fields including any SAS-related & snapshot query parameters. +// Any other query parameters remain in the UnparsedParams field. +func ParseURL(u string) (URLParts, error) { uri, err := url.Parse(u) if err != nil { - return BlobURLParts{}, err + return URLParts{}, err } - up := BlobURLParts{ + up := URLParts{ Scheme: uri.Scheme, Host: uri.Host, } @@ -77,7 +58,7 @@ func NewBlobURLParts(u string) (BlobURLParts, error) { if path[0] == '/' { path = path[1:] // If path starts with a slash, remove it } - if isIPEndpointStyle(up.Host) { + if shared.IsIPEndpointStyle(up.Host) { if accountEndIndex := strings.Index(path, "/"); accountEndIndex == -1 { // Slash not found; path has account name & no container name or blob up.IPEndpointStyleInfo.AccountName = path path = "" // No ContainerName present in the URL so path should be empty @@ -114,27 +95,16 @@ func NewBlobURLParts(u string) (BlobURLParts, error) { delete(paramsMap, "versionId") // delete "versionId" from paramsMap } - up.SAS = newSASQueryParameters(paramsMap, true) + up.SAS = NewQueryParameters(paramsMap, true) up.UnparsedParams = paramsMap.Encode() return up, nil } -type caseInsensitiveValues url.Values // map[string][]string -func (values caseInsensitiveValues) Get(key string) ([]string, bool) { - key = strings.ToLower(key) - for k, v := range values { - if strings.ToLower(k) == key { - return v, true - } - } - return []string{}, false -} - -// URL returns a URL object whose fields are initialized from the BlobURLParts fields. The URL's RawQuery +// String returns a URL object whose fields are initialized from the URLParts fields. The URL's RawQuery // field contains the SAS, snapshot, and unparsed query parameters. -func (up BlobURLParts) URL() string { +func (up URLParts) String() string { path := "" - if isIPEndpointStyle(up.Host) && up.IPEndpointStyleInfo.AccountName != "" { + if shared.IsIPEndpointStyle(up.Host) && up.IPEndpointStyleInfo.AccountName != "" { path += "/" + up.IPEndpointStyleInfo.AccountName } // Concatenate container & blob names (if they exist) @@ -148,8 +118,8 @@ func (up BlobURLParts) URL() string { rawQuery := up.UnparsedParams //If no snapshot is initially provided, fill it in from the SAS query properties to help the user - if up.Snapshot == "" && !up.SAS.snapshotTime.IsZero() { - up.Snapshot = up.SAS.snapshotTime.Format(SnapshotTimeFormat) + if up.Snapshot == "" && !up.SAS.SnapshotTime().IsZero() { + up.Snapshot = up.SAS.SnapshotTime().Format(exported.SnapshotTimeFormat) } // Concatenate blob version id query parameter (if it exists) @@ -182,3 +152,15 @@ func (up BlobURLParts) URL() string { } return u.String() } + +type caseInsensitiveValues url.Values // map[string][]string + +func (values caseInsensitiveValues) Get(key string) ([]string, bool) { + key = strings.ToLower(key) + for k, v := range values { + if strings.ToLower(k) == key { + return v, true + } + } + return []string{}, false +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/section_writer.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/section_writer.go deleted file mode 100644 index d2e89f5b2..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/section_writer.go +++ /dev/null @@ -1,53 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import ( - "errors" - "io" -) - -type sectionWriter struct { - count int64 - offset int64 - position int64 - writerAt io.WriterAt -} - -func newSectionWriter(c io.WriterAt, off int64, count int64) *sectionWriter { - return §ionWriter{ - count: count, - offset: off, - writerAt: c, - } -} - -func (c *sectionWriter) Write(p []byte) (int, error) { - remaining := c.count - c.position - - if remaining <= 0 { - return 0, errors.New("end of section reached") - } - - slice := p - - if int64(len(slice)) > remaining { - slice = slice[:remaining] - } - - n, err := c.writerAt.WriteAt(slice, c.offset+c.position) - c.position += int64(n) - if err != nil { - return n, err - } - - if len(p) > n { - return n, errors.New("not enough space for all bytes") - } - - return n, nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go new file mode 100644 index 000000000..724d92b91 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go @@ -0,0 +1,287 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package service + +import ( + "context" + "errors" + "net/http" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" +) + +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions struct { + azcore.ClientOptions +} + +// Client represents a URL to the Azure Blob Storage service allowing you to manipulate blob containers. +type Client base.Client[generated.ServiceClient] + +// NewClient creates an instance of Client with the specified values. +// - serviceURL - the URL of the storage account e.g. https://.blob.core.windows.net/ +// - cred - an Azure AD credential, typically obtained via the azidentity module +// - options - client options; pass nil to accept the default values +func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { + authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewServiceClient(serviceURL, pl, nil)), nil +} + +// NewClientWithNoCredential creates an instance of Client with the specified values. +// This is used to anonymously access a storage account or with a shared access signature (SAS) token. +// - serviceURL - the URL of the storage account e.g. https://.blob.core.windows.net/? +// - options - client options; pass nil to accept the default values +func NewClientWithNoCredential(serviceURL string, options *ClientOptions) (*Client, error) { + conOptions := shared.GetClientOptions(options) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewServiceClient(serviceURL, pl, nil)), nil +} + +// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. +// - serviceURL - the URL of the storage account e.g. https://.blob.core.windows.net/ +// - cred - a SharedKeyCredential created with the matching storage account and access key +// - options - client options; pass nil to accept the default values +func NewClientWithSharedKeyCredential(serviceURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { + authPolicy := exported.NewSharedKeyCredPolicy(cred) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewServiceClient(serviceURL, pl, cred)), nil +} + +// NewClientFromConnectionString creates an instance of Client with the specified values. +// - connectionString - a connection string for the desired storage account +// - options - client options; pass nil to accept the default values +func NewClientFromConnectionString(connectionString string, options *ClientOptions) (*Client, error) { + parsed, err := shared.ParseConnectionString(connectionString) + if err != nil { + return nil, err + } + + if parsed.AccountKey != "" && parsed.AccountName != "" { + credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey) + if err != nil { + return nil, err + } + return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options) + } + + return NewClientWithNoCredential(parsed.ServiceURL, options) +} + +// GetUserDelegationCredential obtains a UserDelegationKey object using the base ServiceURL object. +// OAuth is required for this call, as well as any role that can delegate access to the storage account. +func (s *Client) GetUserDelegationCredential(ctx context.Context, info KeyInfo, o *GetUserDelegationCredentialOptions) (*UserDelegationCredential, error) { + url, err := blob.ParseURL(s.URL()) + if err != nil { + return nil, err + } + + getUserDelegationKeyOptions := o.format() + udk, err := s.generated().GetUserDelegationKey(ctx, info, getUserDelegationKeyOptions) + if err != nil { + return nil, err + } + + return exported.NewUserDelegationCredential(strings.Split(url.Host, ".")[0], udk.UserDelegationKey), nil +} + +func (s *Client) generated() *generated.ServiceClient { + return base.InnerClient((*base.Client[generated.ServiceClient])(s)) +} + +func (s *Client) sharedKey() *SharedKeyCredential { + return base.SharedKey((*base.Client[generated.ServiceClient])(s)) +} + +// URL returns the URL endpoint used by the Client object. +func (s *Client) URL() string { + return s.generated().Endpoint() +} + +// NewContainerClient creates a new ContainerClient object by concatenating containerName to the end of +// Client's URL. The new ContainerClient uses the same request policy pipeline as the Client. +// To change the pipeline, create the ContainerClient and then call its WithPipeline method passing in the +// desired pipeline object. Or, call this package's NewContainerClient instead of calling this object's +// NewContainerClient method. +func (s *Client) NewContainerClient(containerName string) *container.Client { + containerURL := runtime.JoinPaths(s.generated().Endpoint(), containerName) + return (*container.Client)(base.NewContainerClient(containerURL, s.generated().Pipeline(), s.sharedKey())) +} + +// CreateContainer is a lifecycle method to creates a new container under the specified account. +// If the container with the same name already exists, a ResourceExistsError will be raised. +// This method returns a client with which to interact with the newly created container. +func (s *Client) CreateContainer(ctx context.Context, containerName string, options *CreateContainerOptions) (CreateContainerResponse, error) { + containerClient := s.NewContainerClient(containerName) + containerCreateResp, err := containerClient.Create(ctx, options) + return containerCreateResp, err +} + +// DeleteContainer is a lifecycle method that marks the specified container for deletion. +// The container and any blobs contained within it are later deleted during garbage collection. +// If the container is not found, a ResourceNotFoundError will be raised. +func (s *Client) DeleteContainer(ctx context.Context, containerName string, options *DeleteContainerOptions) (DeleteContainerResponse, error) { + containerClient := s.NewContainerClient(containerName) + containerDeleteResp, err := containerClient.Delete(ctx, options) + return containerDeleteResp, err +} + +// RestoreContainer restores soft-deleted container +// Operation will only be successful if used within the specified number of days set in the delete retention policy +func (s *Client) RestoreContainer(ctx context.Context, deletedContainerName string, deletedContainerVersion string, options *RestoreContainerOptions) (RestoreContainerResponse, error) { + containerClient := s.NewContainerClient(deletedContainerName) + containerRestoreResp, err := containerClient.Restore(ctx, deletedContainerVersion, options) + return containerRestoreResp, err +} + +// GetAccountInfo provides account level information +func (s *Client) GetAccountInfo(ctx context.Context, o *GetAccountInfoOptions) (GetAccountInfoResponse, error) { + getAccountInfoOptions := o.format() + resp, err := s.generated().GetAccountInfo(ctx, getAccountInfoOptions) + return resp, err +} + +// NewListContainersPager operation returns a pager of the containers under the specified account. +// Use an empty Marker to start enumeration from the beginning. Container names are returned in lexicographic order. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-containers2. +func (s *Client) NewListContainersPager(o *ListContainersOptions) *runtime.Pager[ListContainersResponse] { + listOptions := generated.ServiceClientListContainersSegmentOptions{} + if o != nil { + if o.Include.Deleted { + listOptions.Include = append(listOptions.Include, generated.ListContainersIncludeTypeDeleted) + } + if o.Include.Metadata { + listOptions.Include = append(listOptions.Include, generated.ListContainersIncludeTypeMetadata) + } + listOptions.Marker = o.Marker + listOptions.Maxresults = o.MaxResults + listOptions.Prefix = o.Prefix + } + return runtime.NewPager(runtime.PagingHandler[ListContainersResponse]{ + More: func(page ListContainersResponse) bool { + return page.NextMarker != nil && len(*page.NextMarker) > 0 + }, + Fetcher: func(ctx context.Context, page *ListContainersResponse) (ListContainersResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = s.generated().ListContainersSegmentCreateRequest(ctx, &listOptions) + } else { + listOptions.Marker = page.Marker + req, err = s.generated().ListContainersSegmentCreateRequest(ctx, &listOptions) + } + if err != nil { + return ListContainersResponse{}, err + } + resp, err := s.generated().Pipeline().Do(req) + if err != nil { + return ListContainersResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ListContainersResponse{}, runtime.NewResponseError(resp) + } + return s.generated().ListContainersSegmentHandleResponse(resp) + }, + }) +} + +// GetProperties - gets the properties of a storage account's Blob service, including properties for Storage Analytics +// and CORS (Cross-Origin Resource Sharing) rules. +func (s *Client) GetProperties(ctx context.Context, o *GetPropertiesOptions) (GetPropertiesResponse, error) { + getPropertiesOptions := o.format() + resp, err := s.generated().GetProperties(ctx, getPropertiesOptions) + return resp, err +} + +// SetProperties Sets the properties of a storage account's Blob service, including Azure Storage Analytics. +// If an element (e.g. analytics_logging) is left as None, the existing settings on the service for that functionality are preserved. +func (s *Client) SetProperties(ctx context.Context, o *SetPropertiesOptions) (SetPropertiesResponse, error) { + properties, setPropertiesOptions := o.format() + resp, err := s.generated().SetProperties(ctx, properties, setPropertiesOptions) + return resp, err +} + +// GetStatistics Retrieves statistics related to replication for the Blob service. +// It is only available when read-access geo-redundant replication is enabled for the storage account. +// With geo-redundant replication, Azure Storage maintains your data durable +// in two locations. In both locations, Azure Storage constantly maintains +// multiple healthy replicas of your data. The location where you read, +// create, update, or delete data is the primary storage account location. +// The primary location exists in the region you choose at the time you +// create an account via the Azure Management Azure classic portal, for +// example, North Central US. The location to which your data is replicated +// is the secondary location. The secondary location is automatically +// determined based on the location of the primary; it is in a second data +// center that resides in the same region as the primary location. Read-only +// access is available from the secondary location, if read-access geo-redundant +// replication is enabled for your storage account. +func (s *Client) GetStatistics(ctx context.Context, o *GetStatisticsOptions) (GetStatisticsResponse, error) { + getStatisticsOptions := o.format() + resp, err := s.generated().GetStatistics(ctx, getStatisticsOptions) + + return resp, err +} + +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at account. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +// This validity can be checked with CanGetAccountSASToken(). +func (s *Client) GetSASURL(resources sas.AccountResourceTypes, permissions sas.AccountPermissions, services sas.AccountServices, start time.Time, expiry time.Time) (string, error) { + if s.sharedKey() == nil { + return "", errors.New("SAS can only be signed with a SharedKeyCredential") + } + + qps, err := sas.AccountSignatureValues{ + Version: sas.Version, + Protocol: sas.ProtocolHTTPS, + Permissions: permissions.String(), + Services: services.String(), + ResourceTypes: resources.String(), + StartTime: start.UTC(), + ExpiryTime: expiry.UTC(), + }.SignWithSharedKey(s.sharedKey()) + if err != nil { + return "", err + } + + endpoint := s.URL() + if !strings.HasSuffix(endpoint, "/") { + // add a trailing slash to be consistent with the portal + endpoint += "/" + } + endpoint += "?" + qps.Encode() + + return endpoint, nil +} + +// FilterBlobs operation finds all blobs in the storage account whose tags match a given search expression. +// Filter blobs searches across all containers within a storage account but can be scoped within the expression to a single container. +// https://docs.microsoft.com/en-us/rest/api/storageservices/find-blobs-by-tags +// eg. "dog='germanshepherd' and penguin='emperorpenguin'" +// To specify a container, eg. "@container=’containerName’ and Name = ‘C’" +func (s *Client) FilterBlobs(ctx context.Context, o *FilterBlobsOptions) (FilterBlobsResponse, error) { + serviceFilterBlobsOptions := o.format() + resp, err := s.generated().FilterBlobs(ctx, serviceFilterBlobsOptions) + return resp, err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/constants.go new file mode 100644 index 000000000..20665fc2b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/constants.go @@ -0,0 +1,92 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package service + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +const ( + // ContainerNameRoot is the special Azure Storage name used to identify a storage account's root container. + ContainerNameRoot = "$root" + + // ContainerNameLogs is the special Azure Storage name used to identify a storage account's logs container. + ContainerNameLogs = "$logs" +) + +// SKUName defines values for SkuName - LRS, GRS, RAGRS, ZRS, Premium LRS +type SKUName = generated.SKUName + +const ( + SKUNameStandardLRS SKUName = generated.SKUNameStandardLRS + SKUNameStandardGRS SKUName = generated.SKUNameStandardGRS + SKUNameStandardRAGRS SKUName = generated.SKUNameStandardRAGRS + SKUNameStandardZRS SKUName = generated.SKUNameStandardZRS + SKUNamePremiumLRS SKUName = generated.SKUNamePremiumLRS +) + +// PossibleSKUNameValues returns the possible values for the SKUName const type. +func PossibleSKUNameValues() []SKUName { + return generated.PossibleSKUNameValues() +} + +// ListContainersIncludeType defines values for ListContainersIncludeType +type ListContainersIncludeType = generated.ListContainersIncludeType + +const ( + ListContainersIncludeTypeMetadata ListContainersIncludeType = generated.ListContainersIncludeTypeMetadata + ListContainersIncludeTypeDeleted ListContainersIncludeType = generated.ListContainersIncludeTypeDeleted + ListContainersIncludeTypeSystem ListContainersIncludeType = generated.ListContainersIncludeTypeSystem +) + +// PossibleListContainersIncludeTypeValues returns the possible values for the ListContainersIncludeType const type. +func PossibleListContainersIncludeTypeValues() []ListContainersIncludeType { + return generated.PossibleListContainersIncludeTypeValues() +} + +// AccountKind defines values for AccountKind +type AccountKind = generated.AccountKind + +const ( + AccountKindStorage AccountKind = generated.AccountKindStorage + AccountKindBlobStorage AccountKind = generated.AccountKindBlobStorage + AccountKindStorageV2 AccountKind = generated.AccountKindStorageV2 + AccountKindFileStorage AccountKind = generated.AccountKindFileStorage + AccountKindBlockBlobStorage AccountKind = generated.AccountKindBlockBlobStorage +) + +// PossibleAccountKindValues returns the possible values for the AccountKind const type. +func PossibleAccountKindValues() []AccountKind { + return generated.PossibleAccountKindValues() +} + +// BlobGeoReplicationStatus - The status of the secondary location +type BlobGeoReplicationStatus = generated.BlobGeoReplicationStatus + +const ( + BlobGeoReplicationStatusLive BlobGeoReplicationStatus = generated.BlobGeoReplicationStatusLive + BlobGeoReplicationStatusBootstrap BlobGeoReplicationStatus = generated.BlobGeoReplicationStatusBootstrap + BlobGeoReplicationStatusUnavailable BlobGeoReplicationStatus = generated.BlobGeoReplicationStatusUnavailable +) + +// PossibleBlobGeoReplicationStatusValues returns the possible values for the BlobGeoReplicationStatus const type. +func PossibleBlobGeoReplicationStatusValues() []BlobGeoReplicationStatus { + return generated.PossibleBlobGeoReplicationStatusValues() +} + +// PublicAccessType defines values for AccessType - private (default) or blob or container +type PublicAccessType = generated.PublicAccessType + +const ( + PublicAccessTypeBlob PublicAccessType = generated.PublicAccessTypeBlob + PublicAccessTypeContainer PublicAccessType = generated.PublicAccessTypeContainer +) + +// PossiblePublicAccessTypeValues returns the possible values for the PublicAccessType const type. +func PossiblePublicAccessTypeValues() []PublicAccessType { + return generated.PossiblePublicAccessTypeValues() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/models.go new file mode 100644 index 000000000..b0354cd90 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/models.go @@ -0,0 +1,220 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package service + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential = exported.SharedKeyCredential + +// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the +// storage account's name and either its primary or secondary key. +func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) { + return exported.NewSharedKeyCredential(accountName, accountKey) +} + +// UserDelegationCredential contains an account's name and its user delegation key. +type UserDelegationCredential = exported.UserDelegationCredential + +// UserDelegationKey contains UserDelegationKey. +type UserDelegationKey = generated.UserDelegationKey + +// KeyInfo contains KeyInfo struct. +type KeyInfo = generated.KeyInfo + +// GetUserDelegationCredentialOptions contains optional parameters for Service.GetUserDelegationKey method +type GetUserDelegationCredentialOptions struct { + // placeholder for future options +} + +func (o *GetUserDelegationCredentialOptions) format() *generated.ServiceClientGetUserDelegationKeyOptions { + return nil +} + +// AccessConditions identifies container-specific access conditions which you optionally set. +type AccessConditions = exported.ContainerAccessConditions + +// CpkInfo contains a group of parameters for the BlobClient.Download method. +type CpkInfo = generated.CpkInfo + +// CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +type CpkScopeInfo = generated.CpkScopeInfo + +// CreateContainerOptions contains the optional parameters for the container.Client.Create method. +type CreateContainerOptions = container.CreateOptions + +// DeleteContainerOptions contains the optional parameters for the container.Client.Delete method. +type DeleteContainerOptions = container.DeleteOptions + +// RestoreContainerOptions contains the optional parameters for the container.Client.Restore method. +type RestoreContainerOptions = container.RestoreOptions + +// CorsRule - CORS is an HTTP feature that enables a web application running under one domain to access resources in another +// domain. Web browsers implement a security restriction known as same-origin policy that +// prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin +// domain) to call APIs in another domain +type CorsRule = generated.CorsRule + +// RetentionPolicy - the retention policy which determines how long the associated data should persist +type RetentionPolicy = generated.RetentionPolicy + +// Metrics - a summary of request statistics grouped by API in hour or minute aggregates for blobs +type Metrics = generated.Metrics + +// Logging - Azure Analytics Logging settings. +type Logging = generated.Logging + +// StaticWebsite - The properties that enable an account to host a static website +type StaticWebsite = generated.StaticWebsite + +// StorageServiceProperties - Storage Service Properties. +type StorageServiceProperties = generated.StorageServiceProperties + +// StorageServiceStats - Stats for the storage service. +type StorageServiceStats = generated.StorageServiceStats + +// --------------------------------------------------------------------------------------------------------------------- + +// GetAccountInfoOptions provides set of options for Client.GetAccountInfo +type GetAccountInfoOptions struct { + // placeholder for future options +} + +func (o *GetAccountInfoOptions) format() *generated.ServiceClientGetAccountInfoOptions { + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetPropertiesOptions contains the optional parameters for the Client.GetProperties method. +type GetPropertiesOptions struct { + // placeholder for future options +} + +func (o *GetPropertiesOptions) format() *generated.ServiceClientGetPropertiesOptions { + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ListContainersOptions provides set of configurations for ListContainers operation +type ListContainersOptions struct { + Include ListContainersInclude + + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing operation did not return all containers + // remaining to be listed with the current page. The NextMarker value can be used as the value for the marker parameter in + // a subsequent call to request the next page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify max results, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the listing operation crosses a partition boundary, + // then the service will return a continuation token for retrieving the remainder of the results. For this reason, it is possible + // that the service will return fewer results than specified by max results, or than the default of 5000. + MaxResults *int32 + + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string +} + +// ListContainersInclude indicates what additional information the service should return with each container. +type ListContainersInclude struct { + // Tells the service whether to return metadata for each container. + Metadata bool + + // Tells the service whether to return soft-deleted containers. + Deleted bool +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetPropertiesOptions provides set of options for Client.SetProperties +type SetPropertiesOptions struct { + // The set of CORS rules. + Cors []*CorsRule + + // The default version to use for requests to the Blob service if an incoming request's version is not specified. Possible + // values include version 2008-10-27 and all more recent versions + DefaultServiceVersion *string + + // the retention policy which determines how long the associated data should persist + DeleteRetentionPolicy *RetentionPolicy + + // a summary of request statistics grouped by API in hour or minute aggregates for blobs + HourMetrics *Metrics + + // Azure Analytics Logging settings. + Logging *Logging + + // a summary of request statistics grouped by API in hour or minute aggregates for blobs + MinuteMetrics *Metrics + + // The properties that enable an account to host a static website + StaticWebsite *StaticWebsite +} + +func (o *SetPropertiesOptions) format() (generated.StorageServiceProperties, *generated.ServiceClientSetPropertiesOptions) { + if o == nil { + return generated.StorageServiceProperties{}, nil + } + + return generated.StorageServiceProperties{ + Cors: o.Cors, + DefaultServiceVersion: o.DefaultServiceVersion, + DeleteRetentionPolicy: o.DeleteRetentionPolicy, + HourMetrics: o.HourMetrics, + Logging: o.Logging, + MinuteMetrics: o.MinuteMetrics, + StaticWebsite: o.StaticWebsite, + }, nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetStatisticsOptions provides set of options for Client.GetStatistics +type GetStatisticsOptions struct { + // placeholder for future options +} + +func (o *GetStatisticsOptions) format() *generated.ServiceClientGetStatisticsOptions { + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// FilterBlobsOptions provides set of options for Client.FindBlobsByTags +type FilterBlobsOptions struct { + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + MaxResults *int32 + // Filters the results to return only to return only blobs whose tags match the specified expression. + Where *string +} + +func (o *FilterBlobsOptions) format() *generated.ServiceClientFilterBlobsOptions { + if o == nil { + return nil + } + return &generated.ServiceClientFilterBlobsOptions{ + Marker: o.Marker, + Maxresults: o.MaxResults, + Where: o.Where, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/responses.go new file mode 100644 index 000000000..788514cca --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/responses.go @@ -0,0 +1,41 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package service + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// CreateContainerResponse contains the response from method container.Client.Create. +type CreateContainerResponse = generated.ContainerClientCreateResponse + +// DeleteContainerResponse contains the response from method container.Client.Delete +type DeleteContainerResponse = generated.ContainerClientDeleteResponse + +// RestoreContainerResponse contains the response from method container.Client.Restore +type RestoreContainerResponse = generated.ContainerClientRestoreResponse + +// GetAccountInfoResponse contains the response from method Client.GetAccountInfo. +type GetAccountInfoResponse = generated.ServiceClientGetAccountInfoResponse + +// ListContainersResponse contains the response from method Client.ListContainersSegment. +type ListContainersResponse = generated.ServiceClientListContainersSegmentResponse + +// GetPropertiesResponse contains the response from method Client.GetProperties. +type GetPropertiesResponse = generated.ServiceClientGetPropertiesResponse + +// SetPropertiesResponse contains the response from method Client.SetProperties. +type SetPropertiesResponse = generated.ServiceClientSetPropertiesResponse + +// GetStatisticsResponse contains the response from method Client.GetStatistics. +type GetStatisticsResponse = generated.ServiceClientGetStatisticsResponse + +// FilterBlobsResponse contains the response from method Client.FilterBlobs. +type FilterBlobsResponse = generated.ServiceClientFilterBlobsResponse + +// GetUserDelegationKeyResponse contains the response from method ServiceClient.GetUserDelegationKey. +type GetUserDelegationKeyResponse = generated.ServiceClientGetUserDelegationKeyResponse diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/test-resources.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/test-resources.json new file mode 100644 index 000000000..b23e56abb --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/test-resources.json @@ -0,0 +1,513 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "baseName": { + "type": "String" + }, + "tenantId": { + "type": "string", + "defaultValue": "72f988bf-86f1-41af-91ab-2d7cd011db47", + "metadata": { + "description": "The tenant ID to which the application and resources belong." + } + }, + "testApplicationOid": { + "type": "string", + "metadata": { + "description": "The principal to assign the role to. This is application object id." + } + } + }, + "variables": { + "mgmtApiVersion": "2019-06-01", + "authorizationApiVersion": "2018-09-01-preview", + "blobDataContributorRoleId": "[concat('/subscriptions/', subscription().subscriptionId, '/providers/Microsoft.Authorization/roleDefinitions/ba92f5b4-2d11-453d-a403-e96b0029c9fe')]", + "contributorRoleId": "[concat('/subscriptions/', subscription().subscriptionId, '/providers/Microsoft.Authorization/roleDefinitions/b24988ac-6180-42a0-ab88-20f7382dd24c')]", + "blobDataOwnerRoleId": "[concat('/subscriptions/', subscription().subscriptionId, '/providers/Microsoft.Authorization/roleDefinitions/b7e6dc6d-f1e8-4753-8033-0f276bb0955b')]", + "primaryAccountName": "[concat(parameters('baseName'), 'prim')]", + "primaryEncryptionScopeName": "encryptionScope", + "primaryEncryptionScope": "[concat(parameters('baseName'), 'prim', concat('/', variables('primaryEncryptionScopeName')))]", + "secondaryAccountName": "[concat(parameters('baseName'), 'sec')]", + "premiumAccountName": "[concat(parameters('baseName'), 'prem')]", + "dataLakeAccountName": "[concat(parameters('baseName'), 'dtlk')]", + "softDeleteAccountName": "[concat(parameters('baseName'), 'sftdl')]", + "premiumFileAccountName": "[concat(parameters('baseName'), 'pfile')]", + "webjobsPrimaryAccountName": "[concat(parameters('baseName'), 'wjprim')]", + "webjobsSecondaryAccountName": "[concat(parameters('baseName'), 'wjsec')]", + "location": "[resourceGroup().location]", + "resourceGroupName": "[resourceGroup().name]", + "subscriptionId": "[subscription().subscriptionId]", + "encryption": { + "services": { + "file": { + "enabled": true + }, + "blob": { + "enabled": true + } + }, + "keySource": "Microsoft.Storage" + }, + "networkAcls": { + "bypass": "AzureServices", + "virtualNetworkRules": [], + "ipRules": [], + "defaultAction": "Allow" + } + }, + "resources": [ + { + "type": "Microsoft.Authorization/roleAssignments", + "apiVersion": "[variables('authorizationApiVersion')]", + "name": "[guid(concat('dataContributorRoleId', resourceGroup().id))]", + "properties": { + "roleDefinitionId": "[variables('blobDataContributorRoleId')]", + "principalId": "[parameters('testApplicationOid')]" + } + }, + { + "type": "Microsoft.Authorization/roleAssignments", + "apiVersion": "[variables('authorizationApiVersion')]", + "name": "[guid(concat('contributorRoleId', resourceGroup().id))]", + "properties": { + "roleDefinitionId": "[variables('contributorRoleId')]", + "principalId": "[parameters('testApplicationOid')]" + } + }, + { + "type": "Microsoft.Authorization/roleAssignments", + "apiVersion": "[variables('authorizationApiVersion')]", + "name": "[guid(concat('blobDataOwnerRoleId', resourceGroup().id))]", + "properties": { + "roleDefinitionId": "[variables('blobDataOwnerRoleId')]", + "principalId": "[parameters('testApplicationOid')]" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('primaryAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts/blobServices", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[concat(variables('primaryAccountName'), '/default')]", + "properties": { + "isVersioningEnabled": true, + "lastAccessTimeTrackingPolicy": { + "enable": true, + "name": "AccessTimeTracking", + "trackingGranularityInDays": 1, + "blobType": [ + "blockBlob" + ] + } + }, + "dependsOn": [ + "[variables('primaryAccountName')]" + ] + }, + { + "type": "Microsoft.Storage/storageAccounts/encryptionScopes", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('primaryEncryptionScope')]", + "properties": { + "source": "Microsoft.Storage", + "state": "Enabled" + }, + "dependsOn": [ + "[variables('primaryAccountName')]" + ] + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('secondaryAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('premiumAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Premium_LRS", + "tier": "Premium" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('dataLakeAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "isHnsEnabled": true, + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts/blobServices", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[concat(variables('dataLakeAccountName'), '/default')]", + "properties": { + "containerDeleteRetentionPolicy": { + "enabled": true, + "days": 1 + } + }, + "dependsOn": [ + "[variables('dataLakeAccountName')]" + ] + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('softDeleteAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts/blobServices", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[concat(variables('softDeleteAccountName'), '/default')]", + "properties": { + "deleteRetentionPolicy": { + "enabled": true, + "days": 1 + }, + "containerDeleteRetentionPolicy": { + "enabled": true, + "days": 1 + } + }, + "dependsOn": [ + "[variables('softDeleteAccountName')]" + ] + }, + { + "type": "Microsoft.Storage/storageAccounts/fileServices", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[concat(variables('softDeleteAccountName'), '/default')]", + "properties": { + "shareDeleteRetentionPolicy": { + "enabled": true, + "days": 1 + } + }, + "dependsOn": [ + "[variables('softDeleteAccountName')]" + ] + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('premiumFileAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Premium_LRS", + "tier": "Premium" + }, + "kind": "FileStorage", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('webjobsPrimaryAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('webjobsSecondaryAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + } + ], + "functions": [ + { + "namespace": "url", + "members": { + "serviceEndpointSuffix": { + "parameters": [ + { + "name": "endpoint", + "type": "string" + } + ], + "output": { + "type": "string", + "value": "[substring(parameters('endpoint'), add(indexOf(parameters('endpoint'), '.'),1), sub(length(parameters('endpoint')), add(indexOf(parameters('endpoint'), '.'),2)))]" + } + } + } + }, + { + "namespace": "connectionString", + "members": { + "create": { + "parameters": [ + { + "name": "accountName", + "type": "string" + }, + { + "name": "accountKey", + "type": "string" + }, + { + "name": "blobEndpoint", + "type": "string" + }, + { + "name": "queueEndpoint", + "type": "string" + }, + { + "name": "fileEndpoint", + "type": "string" + }, + { + "name": "tableEndpoint", + "type": "string" + } + ], + "output": { + "type": "string", + "value": "[concat('DefaultEndpointsProtocol=https;AccountName=', parameters('accountName'), ';AccountKey=', parameters('accountKey'), ';BlobEndpoint=', parameters('blobEndpoint'), ';QueueEndpoint=', parameters('queueEndpoint'), ';FileEndpoint=', parameters('fileEndpoint'), ';TableEndpoint=', parameters('tableEndpoint'))]" + } + } + } + } + ], + "outputs": { + "AZURE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('primaryAccountName')]" + }, + "AZURE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "PRIMARY_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" + }, + "PRIMARY_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" + }, + "PRIMARY_STORAGE_ACCOUNT_QUEUE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue)]" + }, + "PRIMARY_STORAGE_ACCOUNT_TABLE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "SECONDARY_AZURE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('secondaryAccountName')]" + }, + "SECONDARY_AZURE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "SECONDARY_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" + }, + "SECONDARY_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" + }, + "SECONDARY_STORAGE_ACCOUNT_QUEUE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue)]" + }, + "SECONDARY_STORAGE_ACCOUNT_TABLE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "BLOB_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('secondaryAccountName')]" + }, + "BLOB_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "PREMIUM_AZURE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('premiumAccountName')]" + }, + "PREMIUM_AZURE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('premiumAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "PREMIUM_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('premiumAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" + }, + "DATALAKE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('dataLakeAccountName')]" + }, + "DATALAKE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "DATALAKE_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" + }, + "DATALAKE_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" + }, + "DATALAKE_STORAGE_ACCOUNT_QUEUE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue)]" + }, + "DATALAKE_STORAGE_ACCOUNT_TABLE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('softDeleteAccountName')]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_QUEUE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue)]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_TABLE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "PREMIUM_FILE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('premiumFileAccountName')]" + }, + "PREMIUM_FILE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('premiumFileAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "PREMIUM_FILE_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('premiumFileAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" + }, + "AZUREWEBJOBSSTORAGE": { + "type": "string", + "value": "[connectionString.create(variables('webjobsPrimaryAccountName'), listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).keys[0].value, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "AZUREWEBJOBSSECONDARYSTORAGE": { + "type": "string", + "value": "[connectionString.create(variables('webjobsSecondaryAccountName'), listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).keys[0].value, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "RESOURCE_GROUP_NAME": { + "type": "string", + "value": "[variables('resourceGroupName')]" + }, + "SUBSCRIPTION_ID": { + "type": "string", + "value": "[variables('subscriptionId')]" + }, + "LOCATION": { + "type": "string", + "value": "[variables('location')]" + }, + "AZURE_STORAGE_ENCRYPTION_SCOPE": { + "type": "string", + "value": "[variables('primaryEncryptionScopeName')]" + } + } + } + \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_append_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_append_blob_client.go deleted file mode 100644 index 25490ab59..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_append_blob_client.go +++ /dev/null @@ -1,154 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import ( - "context" - "io" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" -) - -// AppendBlobClient represents a client to an Azure Storage append blob; -type AppendBlobClient struct { - BlobClient - client *appendBlobClient -} - -// NewAppendBlobClient creates an AppendBlobClient with the specified URL, Azure AD credential, and options. -func NewAppendBlobClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*AppendBlobClient, error) { - authPolicy := runtime.NewBearerTokenPolicy(cred, []string{tokenScope}, nil) - conOptions := getConnectionOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - conn := newConnection(blobURL, conOptions) - - return &AppendBlobClient{ - client: newAppendBlobClient(conn.Endpoint(), conn.Pipeline()), - BlobClient: BlobClient{ - client: newBlobClient(conn.Endpoint(), conn.Pipeline()), - }, - }, nil -} - -// NewAppendBlobClientWithNoCredential creates an AppendBlobClient with the specified URL and options. -func NewAppendBlobClientWithNoCredential(blobURL string, options *ClientOptions) (*AppendBlobClient, error) { - conOptions := getConnectionOptions(options) - conn := newConnection(blobURL, conOptions) - - return &AppendBlobClient{ - client: newAppendBlobClient(conn.Endpoint(), conn.Pipeline()), - BlobClient: BlobClient{ - client: newBlobClient(conn.Endpoint(), conn.Pipeline()), - }, - }, nil -} - -// NewAppendBlobClientWithSharedKey creates an AppendBlobClient with the specified URL, shared key, and options. -func NewAppendBlobClientWithSharedKey(blobURL string, cred *SharedKeyCredential, options *ClientOptions) (*AppendBlobClient, error) { - authPolicy := newSharedKeyCredPolicy(cred) - conOptions := getConnectionOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - conn := newConnection(blobURL, conOptions) - - return &AppendBlobClient{ - client: newAppendBlobClient(conn.Endpoint(), conn.Pipeline()), - BlobClient: BlobClient{ - client: newBlobClient(conn.Endpoint(), conn.Pipeline()), - sharedKey: cred, - }, - }, nil -} - -// WithSnapshot creates a new AppendBlobURL object identical to the source but with the specified snapshot timestamp. -// Pass "" to remove the snapshot returning a URL to the base blob. -func (ab *AppendBlobClient) WithSnapshot(snapshot string) (*AppendBlobClient, error) { - p, err := NewBlobURLParts(ab.URL()) - if err != nil { - return nil, err - } - - p.Snapshot = snapshot - endpoint := p.URL() - pipeline := ab.client.pl - - return &AppendBlobClient{ - client: newAppendBlobClient(endpoint, pipeline), - BlobClient: BlobClient{ - client: newBlobClient(endpoint, pipeline), - sharedKey: ab.sharedKey, - }, - }, nil -} - -// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. -// Pass "" to remove the versionID returning a URL to the base blob. -func (ab *AppendBlobClient) WithVersionID(versionID string) (*AppendBlobClient, error) { - p, err := NewBlobURLParts(ab.URL()) - if err != nil { - return nil, err - } - - p.VersionID = versionID - endpoint := p.URL() - pipeline := ab.client.pl - - return &AppendBlobClient{ - client: newAppendBlobClient(endpoint, pipeline), - BlobClient: BlobClient{ - client: newBlobClient(endpoint, pipeline), - sharedKey: ab.sharedKey, - }, - }, nil -} - -// Create creates a 0-size append blob. Call AppendBlock to append data to an append blob. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. -func (ab *AppendBlobClient) Create(ctx context.Context, options *AppendBlobCreateOptions) (AppendBlobCreateResponse, error) { - appendBlobAppendBlockOptions, blobHttpHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions := options.format() - - resp, err := ab.client.Create(ctx, 0, appendBlobAppendBlockOptions, blobHttpHeaders, - leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) - - return toAppendBlobCreateResponse(resp), handleError(err) -} - -// AppendBlock writes a stream to a new block of data to the end of the existing append blob. -// This method panics if the stream is not at position 0. -// Note that the http client closes the body stream after the request is sent to the service. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block. -func (ab *AppendBlobClient) AppendBlock(ctx context.Context, body io.ReadSeekCloser, options *AppendBlobAppendBlockOptions) (AppendBlobAppendBlockResponse, error) { - count, err := validateSeekableStreamAt0AndGetCount(body) - if err != nil { - return AppendBlobAppendBlockResponse{}, nil - } - - appendOptions, appendPositionAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions, leaseAccessConditions := options.format() - - resp, err := ab.client.AppendBlock(ctx, count, body, appendOptions, leaseAccessConditions, appendPositionAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions) - - return toAppendBlobAppendBlockResponse(resp), handleError(err) -} - -// AppendBlockFromURL copies a new block of data from source URL to the end of the existing append blob. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block-from-url. -func (ab *AppendBlobClient) AppendBlockFromURL(ctx context.Context, source string, o *AppendBlobAppendBlockFromURLOptions) (AppendBlobAppendBlockFromURLResponse, error) { - appendBlockFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions := o.format() - - // content length should be 0 on * from URL. always. It's a 400 if it isn't. - resp, err := ab.client.AppendBlockFromURL(ctx, source, 0, appendBlockFromURLOptions, cpkInfo, cpkScopeInfo, - leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) - return toAppendBlobAppendBlockFromURLResponse(resp), handleError(err) -} - -// SealAppendBlob - The purpose of Append Blob Seal is to allow users and applications to seal append blobs, marking them as read only. -// https://docs.microsoft.com/en-us/rest/api/storageservices/append-blob-seal -func (ab *AppendBlobClient) SealAppendBlob(ctx context.Context, options *AppendBlobSealOptions) (AppendBlobSealResponse, error) { - leaseAccessConditions, modifiedAccessConditions, positionAccessConditions := options.format() - resp, err := ab.client.Seal(ctx, nil, leaseAccessConditions, modifiedAccessConditions, positionAccessConditions) - return toAppendBlobSealResponse(resp), handleError(err) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_blob_client.go deleted file mode 100644 index 9543d14f8..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_blob_client.go +++ /dev/null @@ -1,278 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import ( - "context" - "errors" - "time" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" -) - -// BlobClient represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob. -type BlobClient struct { - client *blobClient - sharedKey *SharedKeyCredential -} - -// NewBlobClient creates a BlobClient object using the specified URL, Azure AD credential, and options. -func NewBlobClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*BlobClient, error) { - authPolicy := runtime.NewBearerTokenPolicy(cred, []string{tokenScope}, nil) - conOptions := getConnectionOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - conn := newConnection(blobURL, conOptions) - - return &BlobClient{ - client: newBlobClient(conn.Endpoint(), conn.Pipeline()), - }, nil -} - -// NewBlobClientWithNoCredential creates a BlobClient object using the specified URL and options. -func NewBlobClientWithNoCredential(blobURL string, options *ClientOptions) (*BlobClient, error) { - conOptions := getConnectionOptions(options) - conn := newConnection(blobURL, conOptions) - - return &BlobClient{ - client: newBlobClient(conn.Endpoint(), conn.Pipeline()), - }, nil -} - -// NewBlobClientWithSharedKey creates a BlobClient object using the specified URL, shared key, and options. -func NewBlobClientWithSharedKey(blobURL string, cred *SharedKeyCredential, options *ClientOptions) (*BlobClient, error) { - authPolicy := newSharedKeyCredPolicy(cred) - conOptions := getConnectionOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - conn := newConnection(blobURL, conOptions) - - return &BlobClient{ - client: newBlobClient(blobURL, conn.Pipeline()), - sharedKey: cred, - }, nil -} - -// NewBlobClientFromConnectionString creates BlobClient from a connection String -//nolint -func NewBlobClientFromConnectionString(connectionString, containerName, blobName string, options *ClientOptions) (*BlobClient, error) { - containerClient, err := NewContainerClientFromConnectionString(connectionString, containerName, options) - if err != nil { - return nil, err - } - return containerClient.NewBlobClient(blobName) -} - -// URL returns the URL endpoint used by the BlobClient object. -func (b *BlobClient) URL() string { - return b.client.endpoint -} - -// WithSnapshot creates a new BlobClient object identical to the source but with the specified snapshot timestamp. -// Pass "" to remove the snapshot returning a URL to the base blob. -func (b *BlobClient) WithSnapshot(snapshot string) (*BlobClient, error) { - p, err := NewBlobURLParts(b.URL()) - if err != nil { - return nil, err - } - p.Snapshot = snapshot - - pipeline := b.client.pl - return &BlobClient{ - client: newBlobClient(p.URL(), pipeline), - sharedKey: b.sharedKey, - }, nil -} - -// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. -// Pass "" to remove the versionID returning a URL to the base blob. -func (b *BlobClient) WithVersionID(versionID string) (*BlobClient, error) { - p, err := NewBlobURLParts(b.URL()) - if err != nil { - return nil, err - } - p.VersionID = versionID - - pipeline := b.client.pl - return &BlobClient{ - client: newBlobClient(p.URL(), pipeline), - sharedKey: b.sharedKey, - }, nil -} - -// Download reads a range of bytes from a blob. The response also includes the blob's properties and metadata. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob. -func (b *BlobClient) Download(ctx context.Context, options *BlobDownloadOptions) (BlobDownloadResponse, error) { - o, lease, cpk, accessConditions := options.format() - dr, err := b.client.Download(ctx, o, lease, cpk, accessConditions) - if err != nil { - return BlobDownloadResponse{}, handleError(err) - } - - offset := int64(0) - count := int64(CountToEnd) - - if options != nil && options.Offset != nil { - offset = *options.Offset - } - - if options != nil && options.Count != nil { - count = *options.Count - } - - eTag := "" - if dr.ETag != nil { - eTag = *dr.ETag - } - return BlobDownloadResponse{ - b: b, - blobClientDownloadResponse: dr, - ctx: ctx, - getInfo: HTTPGetterInfo{Offset: offset, Count: count, ETag: eTag}, - ObjectReplicationRules: deserializeORSPolicies(dr.ObjectReplicationRules), - }, err -} - -// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. -// Note that deleting a blob also deletes all its snapshots. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob. -func (b *BlobClient) Delete(ctx context.Context, o *BlobDeleteOptions) (BlobDeleteResponse, error) { - basics, leaseInfo, accessConditions := o.format() - resp, err := b.client.Delete(ctx, basics, leaseInfo, accessConditions) - - return toBlobDeleteResponse(resp), handleError(err) -} - -// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob. -func (b *BlobClient) Undelete(ctx context.Context, o *BlobUndeleteOptions) (BlobUndeleteResponse, error) { - undeleteOptions := o.format() - resp, err := b.client.Undelete(ctx, undeleteOptions) - - return toBlobUndeleteResponse(resp), handleError(err) -} - -// SetTier operation sets the tier on a blob. The operation is allowed on a page -// blob in a premium storage account and on a block blob in a blob storage account (locally -// redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and -// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation -// does not update the blob's ETag. -// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers. -func (b *BlobClient) SetTier(ctx context.Context, tier AccessTier, options *BlobSetTierOptions) (BlobSetTierResponse, error) { - basics, lease, accessConditions := options.format() - resp, err := b.client.SetTier(ctx, tier, basics, lease, accessConditions) - - return toBlobSetTierResponse(resp), handleError(err) -} - -// GetProperties returns the blob's properties. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties. -func (b *BlobClient) GetProperties(ctx context.Context, options *BlobGetPropertiesOptions) (BlobGetPropertiesResponse, error) { - basics, lease, cpk, access := options.format() - resp, err := b.client.GetProperties(ctx, basics, lease, cpk, access) - - return toGetBlobPropertiesResponse(resp), handleError(err) -} - -// SetHTTPHeaders changes a blob's HTTP headers. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. -func (b *BlobClient) SetHTTPHeaders(ctx context.Context, blobHttpHeaders BlobHTTPHeaders, options *BlobSetHTTPHeadersOptions) (BlobSetHTTPHeadersResponse, error) { - basics, lease, access := options.format() - resp, err := b.client.SetHTTPHeaders(ctx, basics, &blobHttpHeaders, lease, access) - - return toBlobSetHTTPHeadersResponse(resp), handleError(err) -} - -// SetMetadata changes a blob's metadata. -// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata. -func (b *BlobClient) SetMetadata(ctx context.Context, metadata map[string]string, options *BlobSetMetadataOptions) (BlobSetMetadataResponse, error) { - basics := blobClientSetMetadataOptions{ - Metadata: metadata, - } - lease, cpk, cpkScope, access := options.format() - resp, err := b.client.SetMetadata(ctx, &basics, lease, cpk, cpkScope, access) - - return toBlobSetMetadataResponse(resp), handleError(err) -} - -// CreateSnapshot creates a read-only snapshot of a blob. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob. -func (b *BlobClient) CreateSnapshot(ctx context.Context, options *BlobCreateSnapshotOptions) (BlobCreateSnapshotResponse, error) { - // CreateSnapshot does NOT panic if the user tries to create a snapshot using a URL that already has a snapshot query parameter - // because checking this would be a performance hit for a VERY unusual path, and we don't think the common case should suffer this - // performance hit. - basics, cpk, cpkScope, access, lease := options.format() - resp, err := b.client.CreateSnapshot(ctx, basics, cpk, cpkScope, access, lease) - - return toBlobCreateSnapshotResponse(resp), handleError(err) -} - -// StartCopyFromURL copies the data at the source URL to a blob. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob. -func (b *BlobClient) StartCopyFromURL(ctx context.Context, copySource string, options *BlobStartCopyOptions) (BlobStartCopyFromURLResponse, error) { - basics, srcAccess, destAccess, lease := options.format() - resp, err := b.client.StartCopyFromURL(ctx, copySource, basics, srcAccess, destAccess, lease) - - return toBlobStartCopyFromURLResponse(resp), handleError(err) -} - -// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob. -func (b *BlobClient) AbortCopyFromURL(ctx context.Context, copyID string, options *BlobAbortCopyOptions) (BlobAbortCopyFromURLResponse, error) { - basics, lease := options.format() - resp, err := b.client.AbortCopyFromURL(ctx, copyID, basics, lease) - - return toBlobAbortCopyFromURLResponse(resp), handleError(err) -} - -// SetTags operation enables users to set tags on a blob or specific blob version, but not snapshot. -// Each call to this operation replaces all existing tags attached to the blob. -// To remove all tags from the blob, call this operation with no tags set. -// https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tags -func (b *BlobClient) SetTags(ctx context.Context, options *BlobSetTagsOptions) (BlobSetTagsResponse, error) { - blobSetTagsOptions, modifiedAccessConditions, leaseAccessConditions := options.format() - resp, err := b.client.SetTags(ctx, blobSetTagsOptions, modifiedAccessConditions, leaseAccessConditions) - - return toBlobSetTagsResponse(resp), handleError(err) -} - -// GetTags operation enables users to get tags on a blob or specific blob version, or snapshot. -// https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-tags -func (b *BlobClient) GetTags(ctx context.Context, options *BlobGetTagsOptions) (BlobGetTagsResponse, error) { - blobGetTagsOptions, modifiedAccessConditions, leaseAccessConditions := options.format() - resp, err := b.client.GetTags(ctx, blobGetTagsOptions, modifiedAccessConditions, leaseAccessConditions) - - return toBlobGetTagsResponse(resp), handleError(err) - -} - -// GetSASToken is a convenience method for generating a SAS token for the currently pointed at blob. -// It can only be used if the credential supplied during creation was a SharedKeyCredential. -func (b *BlobClient) GetSASToken(permissions BlobSASPermissions, start time.Time, expiry time.Time) (SASQueryParameters, error) { - urlParts, _ := NewBlobURLParts(b.URL()) - - t, err := time.Parse(SnapshotTimeFormat, urlParts.Snapshot) - - if err != nil { - t = time.Time{} - } - - if b.sharedKey == nil { - return SASQueryParameters{}, errors.New("credential is not a SharedKeyCredential. SAS can only be signed with a SharedKeyCredential") - } - - return BlobSASSignatureValues{ - ContainerName: urlParts.ContainerName, - BlobName: urlParts.BlobName, - SnapshotTime: t, - Version: SASVersion, - - Permissions: permissions.String(), - - StartTime: start.UTC(), - ExpiryTime: expiry.UTC(), - }.NewSASQueryParameters(b.sharedKey) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_blob_lease_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_blob_lease_client.go deleted file mode 100644 index a9273dfb6..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_blob_lease_client.go +++ /dev/null @@ -1,98 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import ( - "context" - "errors" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" -) - -// BlobLeaseClient represents lease client on blob -type BlobLeaseClient struct { - BlobClient - leaseID *string -} - -// NewBlobLeaseClient is constructor for BlobLeaseClient -func (b *BlobClient) NewBlobLeaseClient(leaseID *string) (*BlobLeaseClient, error) { - if leaseID == nil { - generatedUuid, err := uuid.New() - if err != nil { - return nil, err - } - leaseID = to.Ptr(generatedUuid.String()) - } - return &BlobLeaseClient{ - BlobClient: *b, - leaseID: leaseID, - }, nil -} - -// AcquireLease acquires a lease on the blob for write and delete operations. -//The lease Duration must be between 15 and 60 seconds, or infinite (-1). -// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. -func (blc *BlobLeaseClient) AcquireLease(ctx context.Context, options *BlobAcquireLeaseOptions) (BlobAcquireLeaseResponse, error) { - blobAcquireLeaseOptions, modifiedAccessConditions := options.format() - blobAcquireLeaseOptions.ProposedLeaseID = blc.leaseID - - resp, err := blc.client.AcquireLease(ctx, &blobAcquireLeaseOptions, modifiedAccessConditions) - return toBlobAcquireLeaseResponse(resp), handleError(err) -} - -// BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) -// constant to break a fixed-Duration lease when it expires or an infinite lease immediately. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. -func (blc *BlobLeaseClient) BreakLease(ctx context.Context, options *BlobBreakLeaseOptions) (BlobBreakLeaseResponse, error) { - blobBreakLeaseOptions, modifiedAccessConditions := options.format() - resp, err := blc.client.BreakLease(ctx, blobBreakLeaseOptions, modifiedAccessConditions) - return toBlobBreakLeaseResponse(resp), handleError(err) -} - -// ChangeLease changes the blob's lease ID. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. -func (blc *BlobLeaseClient) ChangeLease(ctx context.Context, options *BlobChangeLeaseOptions) (BlobChangeLeaseResponse, error) { - if blc.leaseID == nil { - return BlobChangeLeaseResponse{}, errors.New("leaseID cannot be nil") - } - proposedLeaseID, changeLeaseOptions, modifiedAccessConditions, err := options.format() - if err != nil { - return BlobChangeLeaseResponse{}, err - } - resp, err := blc.client.ChangeLease(ctx, *blc.leaseID, *proposedLeaseID, changeLeaseOptions, modifiedAccessConditions) - - // If lease has been changed successfully, set the leaseID in client - if err == nil { - blc.leaseID = proposedLeaseID - } - - return toBlobChangeLeaseResponse(resp), handleError(err) -} - -// RenewLease renews the blob's previously-acquired lease. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. -func (blc *BlobLeaseClient) RenewLease(ctx context.Context, options *BlobRenewLeaseOptions) (BlobRenewLeaseResponse, error) { - if blc.leaseID == nil { - return BlobRenewLeaseResponse{}, errors.New("leaseID cannot be nil") - } - renewLeaseBlobOptions, modifiedAccessConditions := options.format() - resp, err := blc.client.RenewLease(ctx, *blc.leaseID, renewLeaseBlobOptions, modifiedAccessConditions) - return toBlobRenewLeaseResponse(resp), handleError(err) -} - -// ReleaseLease releases the blob's previously-acquired lease. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. -func (blc *BlobLeaseClient) ReleaseLease(ctx context.Context, options *ReleaseLeaseBlobOptions) (BlobReleaseLeaseResponse, error) { - if blc.leaseID == nil { - return BlobReleaseLeaseResponse{}, errors.New("leaseID cannot be nil") - } - renewLeaseBlobOptions, modifiedAccessConditions := options.format() - resp, err := blc.client.ReleaseLease(ctx, *blc.leaseID, renewLeaseBlobOptions, modifiedAccessConditions) - return toBlobReleaseLeaseResponse(resp), handleError(err) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_block_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_block_blob_client.go deleted file mode 100644 index b080128c8..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_block_blob_client.go +++ /dev/null @@ -1,201 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import ( - "context" - "io" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" -) - -// BlockBlobClient defines a set of operations applicable to block blobs. -type BlockBlobClient struct { - BlobClient - client *blockBlobClient -} - -// NewBlockBlobClient creates a BlockBlobClient object using the specified URL, Azure AD credential, and options. -func NewBlockBlobClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*BlockBlobClient, error) { - authPolicy := runtime.NewBearerTokenPolicy(cred, []string{tokenScope}, nil) - conOptions := getConnectionOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - conn := newConnection(blobURL, conOptions) - - bClient := newBlobClient(conn.Endpoint(), conn.Pipeline()) - return &BlockBlobClient{ - client: newBlockBlobClient(bClient.endpoint, bClient.pl), - BlobClient: BlobClient{ - client: bClient, - }, - }, nil -} - -// NewBlockBlobClientWithNoCredential creates a BlockBlobClient object using the specified URL and options. -func NewBlockBlobClientWithNoCredential(blobURL string, options *ClientOptions) (*BlockBlobClient, error) { - conOptions := getConnectionOptions(options) - conn := newConnection(blobURL, conOptions) - - bClient := newBlobClient(conn.Endpoint(), conn.Pipeline()) - return &BlockBlobClient{ - client: newBlockBlobClient(bClient.endpoint, bClient.pl), - BlobClient: BlobClient{ - client: bClient, - }, - }, nil -} - -// NewBlockBlobClientWithSharedKey creates a BlockBlobClient object using the specified URL, shared key, and options. -func NewBlockBlobClientWithSharedKey(blobURL string, cred *SharedKeyCredential, options *ClientOptions) (*BlockBlobClient, error) { - authPolicy := newSharedKeyCredPolicy(cred) - conOptions := getConnectionOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - conn := newConnection(blobURL, conOptions) - - bClient := newBlobClient(conn.Endpoint(), conn.Pipeline()) - return &BlockBlobClient{ - client: newBlockBlobClient(bClient.endpoint, bClient.pl), - BlobClient: BlobClient{ - client: bClient, - sharedKey: cred, - }, - }, nil -} - -// WithSnapshot creates a new BlockBlobClient object identical to the source but with the specified snapshot timestamp. -// Pass "" to remove the snapshot returning a URL to the base blob. -func (bb *BlockBlobClient) WithSnapshot(snapshot string) (*BlockBlobClient, error) { - p, err := NewBlobURLParts(bb.URL()) - if err != nil { - return nil, err - } - - p.Snapshot = snapshot - endpoint := p.URL() - bClient := newBlobClient(endpoint, bb.client.pl) - - return &BlockBlobClient{ - client: newBlockBlobClient(bClient.endpoint, bClient.pl), - BlobClient: BlobClient{ - client: bClient, - sharedKey: bb.sharedKey, - }, - }, nil -} - -// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. -// Pass "" to remove the versionID returning a URL to the base blob. -func (bb *BlockBlobClient) WithVersionID(versionID string) (*BlockBlobClient, error) { - p, err := NewBlobURLParts(bb.URL()) - if err != nil { - return nil, err - } - - p.VersionID = versionID - endpoint := p.URL() - bClient := newBlobClient(endpoint, bb.client.pl) - - return &BlockBlobClient{ - client: newBlockBlobClient(bClient.endpoint, bClient.pl), - BlobClient: BlobClient{ - client: bClient, - sharedKey: bb.sharedKey, - }, - }, nil -} - -// Upload creates a new block blob or overwrites an existing block blob. -// Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not -// supported with Upload; the content of the existing blob is overwritten with the new content. To -// perform a partial update of a block blob, use StageBlock and CommitBlockList. -// This method panics if the stream is not at position 0. -// Note that the http client closes the body stream after the request is sent to the service. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. -func (bb *BlockBlobClient) Upload(ctx context.Context, body io.ReadSeekCloser, options *BlockBlobUploadOptions) (BlockBlobUploadResponse, error) { - count, err := validateSeekableStreamAt0AndGetCount(body) - if err != nil { - return BlockBlobUploadResponse{}, err - } - - basics, httpHeaders, leaseInfo, cpkV, cpkN, accessConditions := options.format() - - resp, err := bb.client.Upload(ctx, count, body, basics, httpHeaders, leaseInfo, cpkV, cpkN, accessConditions) - - return toBlockBlobUploadResponse(resp), handleError(err) -} - -// StageBlock uploads the specified block to the block blob's "staging area" to be later committed by a call to CommitBlockList. -// Note that the http client closes the body stream after the request is sent to the service. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block. -func (bb *BlockBlobClient) StageBlock(ctx context.Context, base64BlockID string, body io.ReadSeekCloser, - options *BlockBlobStageBlockOptions) (BlockBlobStageBlockResponse, error) { - count, err := validateSeekableStreamAt0AndGetCount(body) - if err != nil { - return BlockBlobStageBlockResponse{}, err - } - - stageBlockOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo := options.format() - resp, err := bb.client.StageBlock(ctx, base64BlockID, count, body, stageBlockOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo) - - return toBlockBlobStageBlockResponse(resp), handleError(err) -} - -// StageBlockFromURL copies the specified block from a source URL to the block blob's "staging area" to be later committed by a call to CommitBlockList. -// If count is CountToEnd (0), then data is read from specified offset to the end. -// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-from-url. -func (bb *BlockBlobClient) StageBlockFromURL(ctx context.Context, base64BlockID string, sourceURL string, - contentLength int64, options *BlockBlobStageBlockFromURLOptions) (BlockBlobStageBlockFromURLResponse, error) { - - stageBlockFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions := options.format() - - resp, err := bb.client.StageBlockFromURL(ctx, base64BlockID, contentLength, sourceURL, stageBlockFromURLOptions, - cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions) - - return toBlockBlobStageBlockFromURLResponse(resp), handleError(err) -} - -// CommitBlockList writes a blob by specifying the list of block IDs that make up the blob. -// In order to be written as part of a blob, a block must have been successfully written -// to the server in a prior PutBlock operation. You can call PutBlockList to update a blob -// by uploading only those blocks that have changed, then committing the new and existing -// blocks together. Any blocks not specified in the block list and permanently deleted. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block-list. -func (bb *BlockBlobClient) CommitBlockList(ctx context.Context, base64BlockIDs []string, options *BlockBlobCommitBlockListOptions) (BlockBlobCommitBlockListResponse, error) { - // this is a code smell in the generated code - blockIds := make([]*string, len(base64BlockIDs)) - for k, v := range base64BlockIDs { - blockIds[k] = to.Ptr(v) - } - - blockLookupList := BlockLookupList{Latest: blockIds} - commitOptions, headers, leaseAccess, cpkInfo, cpkScope, modifiedAccess := options.format() - - resp, err := bb.client.CommitBlockList(ctx, blockLookupList, commitOptions, headers, leaseAccess, cpkInfo, cpkScope, modifiedAccess) - - return toBlockBlobCommitBlockListResponse(resp), handleError(err) -} - -// GetBlockList returns the list of blocks that have been uploaded as part of a block blob using the specified block list filter. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-block-list. -func (bb *BlockBlobClient) GetBlockList(ctx context.Context, listType BlockListType, options *BlockBlobGetBlockListOptions) (BlockBlobGetBlockListResponse, error) { - o, lac, mac := options.format() - - resp, err := bb.client.GetBlockList(ctx, listType, o, lac, mac) - - return toBlockBlobGetBlockListResponse(resp), handleError(err) -} - -// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB. -// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url. -func (bb *BlockBlobClient) CopyFromURL(ctx context.Context, source string, options *BlockBlobCopyFromURLOptions) (BlockBlobCopyFromURLResponse, error) { - copyOptions, smac, mac, lac := options.format() - resp, err := bb.BlobClient.client.CopyFromURL(ctx, source, copyOptions, smac, mac, lac) - - return toBlockBlobCopyFromURLResponse(resp), handleError(err) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_connection_string.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_connection_string.go deleted file mode 100644 index 2c23b8f4e..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_connection_string.go +++ /dev/null @@ -1,88 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import ( - "errors" - "fmt" - "strings" -) - -var errConnectionString = errors.New("connection string is either blank or malformed. The expected connection string " + - "should contain key value pairs separated by semicolons. For example 'DefaultEndpointsProtocol=https;AccountName=;" + - "AccountKey=;EndpointSuffix=core.windows.net'") - -// convertConnStrToMap converts a connection string (in format key1=value1;key2=value2;key3=value3;) into a map of key-value pairs -func convertConnStrToMap(connStr string) (map[string]string, error) { - ret := make(map[string]string) - connStr = strings.TrimRight(connStr, ";") - - splitString := strings.Split(connStr, ";") - if len(splitString) == 0 { - return ret, errConnectionString - } - for _, stringPart := range splitString { - parts := strings.SplitN(stringPart, "=", 2) - if len(parts) != 2 { - return ret, errConnectionString - } - ret[parts[0]] = parts[1] - } - return ret, nil -} - -// parseConnectionString parses a connection string into a service URL and a SharedKeyCredential or a service url with the -// SharedAccessSignature combined. -func parseConnectionString(connectionString string) (string, *SharedKeyCredential, error) { - var serviceURL string - var cred *SharedKeyCredential - - defaultScheme := "https" - defaultSuffix := "core.windows.net" - - connStrMap, err := convertConnStrToMap(connectionString) - if err != nil { - return "", nil, err - } - - accountName, ok := connStrMap["AccountName"] - if !ok { - return "", nil, errConnectionString - } - accountKey, ok := connStrMap["AccountKey"] - if !ok { - sharedAccessSignature, ok := connStrMap["SharedAccessSignature"] - if !ok { - return "", nil, errConnectionString - } - return fmt.Sprintf("%v://%v.blob.%v/?%v", defaultScheme, accountName, defaultSuffix, sharedAccessSignature), nil, nil - } - - protocol, ok := connStrMap["DefaultEndpointsProtocol"] - if !ok { - protocol = defaultScheme - } - - suffix, ok := connStrMap["EndpointSuffix"] - if !ok { - suffix = defaultSuffix - } - - blobEndpoint, ok := connStrMap["BlobEndpoint"] - if ok { - cred, err = NewSharedKeyCredential(accountName, accountKey) - return blobEndpoint, cred, err - } - serviceURL = fmt.Sprintf("%v://%v.blob.%v", protocol, accountName, suffix) - - cred, err = NewSharedKeyCredential(accountName, accountKey) - if err != nil { - return "", nil, err - } - - return serviceURL, cred, nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_container_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_container_client.go deleted file mode 100644 index 12c4a18df..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_container_client.go +++ /dev/null @@ -1,253 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import ( - "context" - "errors" - "time" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" -) - -// ContainerClient represents a URL to the Azure Storage container allowing you to manipulate its blobs. -type ContainerClient struct { - client *containerClient - sharedKey *SharedKeyCredential -} - -// URL returns the URL endpoint used by the ContainerClient object. -func (c *ContainerClient) URL() string { - return c.client.endpoint -} - -// NewContainerClient creates a ContainerClient object using the specified URL, Azure AD credential, and options. -func NewContainerClient(containerURL string, cred azcore.TokenCredential, options *ClientOptions) (*ContainerClient, error) { - authPolicy := runtime.NewBearerTokenPolicy(cred, []string{tokenScope}, nil) - conOptions := getConnectionOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - conn := newConnection(containerURL, conOptions) - - return &ContainerClient{ - client: newContainerClient(conn.Endpoint(), conn.Pipeline()), - }, nil -} - -// NewContainerClientWithNoCredential creates a ContainerClient object using the specified URL and options. -func NewContainerClientWithNoCredential(containerURL string, options *ClientOptions) (*ContainerClient, error) { - conOptions := getConnectionOptions(options) - conn := newConnection(containerURL, conOptions) - - return &ContainerClient{ - client: newContainerClient(conn.Endpoint(), conn.Pipeline()), - }, nil -} - -// NewContainerClientWithSharedKey creates a ContainerClient object using the specified URL, shared key, and options. -func NewContainerClientWithSharedKey(containerURL string, cred *SharedKeyCredential, options *ClientOptions) (*ContainerClient, error) { - authPolicy := newSharedKeyCredPolicy(cred) - conOptions := getConnectionOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - conn := newConnection(containerURL, conOptions) - - return &ContainerClient{ - client: newContainerClient(conn.Endpoint(), conn.Pipeline()), - sharedKey: cred, - }, nil -} - -// NewContainerClientFromConnectionString creates a ContainerClient object using connection string of an account -func NewContainerClientFromConnectionString(connectionString string, containerName string, options *ClientOptions) (*ContainerClient, error) { - svcClient, err := NewServiceClientFromConnectionString(connectionString, options) - if err != nil { - return nil, err - } - return svcClient.NewContainerClient(containerName) -} - -// NewBlobClient creates a new BlobClient object by concatenating blobName to the end of -// ContainerClient's URL. The new BlobClient uses the same request policy pipeline as the ContainerClient. -// To change the pipeline, create the BlobClient and then call its WithPipeline method passing in the -// desired pipeline object. Or, call this package's NewBlobClient instead of calling this object's -// NewBlobClient method. -func (c *ContainerClient) NewBlobClient(blobName string) (*BlobClient, error) { - blobURL := appendToURLPath(c.URL(), blobName) - - return &BlobClient{ - client: newBlobClient(blobURL, c.client.pl), - sharedKey: c.sharedKey, - }, nil -} - -// NewAppendBlobClient creates a new AppendBlobURL object by concatenating blobName to the end of -// ContainerClient's URL. The new AppendBlobURL uses the same request policy pipeline as the ContainerClient. -// To change the pipeline, create the AppendBlobURL and then call its WithPipeline method passing in the -// desired pipeline object. Or, call this package's NewAppendBlobClient instead of calling this object's -// NewAppendBlobClient method. -func (c *ContainerClient) NewAppendBlobClient(blobName string) (*AppendBlobClient, error) { - blobURL := appendToURLPath(c.URL(), blobName) - - return &AppendBlobClient{ - BlobClient: BlobClient{ - client: newBlobClient(blobURL, c.client.pl), - sharedKey: c.sharedKey, - }, - client: newAppendBlobClient(blobURL, c.client.pl), - }, nil -} - -// NewBlockBlobClient creates a new BlockBlobClient object by concatenating blobName to the end of -// ContainerClient's URL. The new BlockBlobClient uses the same request policy pipeline as the ContainerClient. -// To change the pipeline, create the BlockBlobClient and then call its WithPipeline method passing in the -// desired pipeline object. Or, call this package's NewBlockBlobClient instead of calling this object's -// NewBlockBlobClient method. -func (c *ContainerClient) NewBlockBlobClient(blobName string) (*BlockBlobClient, error) { - blobURL := appendToURLPath(c.URL(), blobName) - - return &BlockBlobClient{ - BlobClient: BlobClient{ - client: newBlobClient(blobURL, c.client.pl), - sharedKey: c.sharedKey, - }, - client: newBlockBlobClient(blobURL, c.client.pl), - }, nil -} - -// NewPageBlobClient creates a new PageBlobURL object by concatenating blobName to the end of ContainerClient's URL. The new PageBlobURL uses the same request policy pipeline as the ContainerClient. -// To change the pipeline, create the PageBlobURL and then call its WithPipeline method passing in the -// desired pipeline object. Or, call this package's NewPageBlobClient instead of calling this object's -// NewPageBlobClient method. -func (c *ContainerClient) NewPageBlobClient(blobName string) (*PageBlobClient, error) { - blobURL := appendToURLPath(c.URL(), blobName) - - return &PageBlobClient{ - BlobClient: BlobClient{ - client: newBlobClient(blobURL, c.client.pl), - sharedKey: c.sharedKey, - }, - client: newPageBlobClient(blobURL, c.client.pl), - }, nil -} - -// Create creates a new container within a storage account. If a container with the same name already exists, the operation fails. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/create-container. -func (c *ContainerClient) Create(ctx context.Context, options *ContainerCreateOptions) (ContainerCreateResponse, error) { - basics, cpkInfo := options.format() - resp, err := c.client.Create(ctx, basics, cpkInfo) - - return toContainerCreateResponse(resp), handleError(err) -} - -// Delete marks the specified container for deletion. The container and any blobs contained within it are later deleted during garbage collection. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-container. -func (c *ContainerClient) Delete(ctx context.Context, o *ContainerDeleteOptions) (ContainerDeleteResponse, error) { - basics, leaseInfo, accessConditions := o.format() - resp, err := c.client.Delete(ctx, basics, leaseInfo, accessConditions) - - return toContainerDeleteResponse(resp), handleError(err) -} - -// GetProperties returns the container's properties. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-metadata. -func (c *ContainerClient) GetProperties(ctx context.Context, o *ContainerGetPropertiesOptions) (ContainerGetPropertiesResponse, error) { - // NOTE: GetMetadata actually calls GetProperties internally because GetProperties returns the metadata AND the properties. - // This allows us to not expose a GetProperties method at all simplifying the API. - // The optionals are nil, like they were in track 1.5 - options, leaseAccess := o.format() - resp, err := c.client.GetProperties(ctx, options, leaseAccess) - - return toContainerGetPropertiesResponse(resp), handleError(err) -} - -// SetMetadata sets the container's metadata. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-metadata. -func (c *ContainerClient) SetMetadata(ctx context.Context, o *ContainerSetMetadataOptions) (ContainerSetMetadataResponse, error) { - metadataOptions, lac, mac := o.format() - resp, err := c.client.SetMetadata(ctx, metadataOptions, lac, mac) - - return toContainerSetMetadataResponse(resp), handleError(err) -} - -// GetAccessPolicy returns the container's access policy. The access policy indicates whether container's blobs may be accessed publicly. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-acl. -func (c *ContainerClient) GetAccessPolicy(ctx context.Context, o *ContainerGetAccessPolicyOptions) (ContainerGetAccessPolicyResponse, error) { - options, ac := o.format() - resp, err := c.client.GetAccessPolicy(ctx, options, ac) - - return toContainerGetAccessPolicyResponse(resp), handleError(err) -} - -// SetAccessPolicy sets the container's permissions. The access policy indicates whether blobs in a container may be accessed publicly. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-acl. -func (c *ContainerClient) SetAccessPolicy(ctx context.Context, o *ContainerSetAccessPolicyOptions) (ContainerSetAccessPolicyResponse, error) { - accessPolicy, mac, lac := o.format() - resp, err := c.client.SetAccessPolicy(ctx, accessPolicy, mac, lac) - - return toContainerSetAccessPolicyResponse(resp), handleError(err) -} - -// ListBlobsFlat returns a pager for blobs starting from the specified Marker. Use an empty -// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs. -func (c *ContainerClient) ListBlobsFlat(o *ContainerListBlobsFlatOptions) *ContainerListBlobFlatPager { - listOptions := o.format() - pager := c.client.ListBlobFlatSegment(listOptions) - - // override the advancer - pager.advancer = func(ctx context.Context, response containerClientListBlobFlatSegmentResponse) (*policy.Request, error) { - listOptions.Marker = response.NextMarker - return c.client.listBlobFlatSegmentCreateRequest(ctx, listOptions) - } - - return toContainerListBlobFlatSegmentPager(pager) -} - -// ListBlobsHierarchy returns a channel of blobs starting from the specified Marker. Use an empty -// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order. -// After getting a segment, process it, and then call ListBlobsHierarchicalSegment again (passing the the -// previously-returned Marker) to get the next segment. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs. -// AutoPagerTimeout specifies the amount of time with no read operations before the channel times out and closes. Specify no time and it will be ignored. -// AutoPagerBufferSize specifies the channel's buffer size. -// Both the blob item channel and error channel should be watched. Only one error will be released via this channel (or a nil error, to register a clean exit.) -func (c *ContainerClient) ListBlobsHierarchy(delimiter string, o *ContainerListBlobsHierarchyOptions) *ContainerListBlobHierarchyPager { - listOptions := o.format() - pager := c.client.ListBlobHierarchySegment(delimiter, listOptions) - - // override the advancer - pager.advancer = func(ctx context.Context, response containerClientListBlobHierarchySegmentResponse) (*policy.Request, error) { - listOptions.Marker = response.NextMarker - return c.client.listBlobHierarchySegmentCreateRequest(ctx, delimiter, listOptions) - } - - return toContainerListBlobHierarchySegmentPager(pager) -} - -// GetSASURL is a convenience method for generating a SAS token for the currently pointed at container. -// It can only be used if the credential supplied during creation was a SharedKeyCredential. -func (c *ContainerClient) GetSASURL(permissions ContainerSASPermissions, start time.Time, expiry time.Time) (string, error) { - if c.sharedKey == nil { - return "", errors.New("SAS can only be signed with a SharedKeyCredential") - } - - urlParts, err := NewBlobURLParts(c.URL()) - if err != nil { - return "", err - } - - // Containers do not have snapshots, nor versions. - urlParts.SAS, err = BlobSASSignatureValues{ - ContainerName: urlParts.ContainerName, - Permissions: permissions.String(), - StartTime: start.UTC(), - ExpiryTime: expiry.UTC(), - }.NewSASQueryParameters(c.sharedKey) - - return urlParts.URL(), err -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_container_lease_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_container_lease_client.go deleted file mode 100644 index 395a72a89..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_container_lease_client.go +++ /dev/null @@ -1,102 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import ( - "context" - "errors" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" -) - -//ContainerLeaseClient represents lease client of container -type ContainerLeaseClient struct { - ContainerClient - leaseID *string -} - -// NewContainerLeaseClient is constructor of ContainerLeaseClient -func (c *ContainerClient) NewContainerLeaseClient(leaseID *string) (*ContainerLeaseClient, error) { - if leaseID == nil { - generatedUuid, err := uuid.New() - if err != nil { - return nil, err - } - leaseID = to.Ptr(generatedUuid.String()) - } - return &ContainerLeaseClient{ - ContainerClient: *c, - leaseID: leaseID, - }, nil -} - -// AcquireLease acquires a lease on the container for delete operations. The lease Duration must be between 15 to 60 seconds, or infinite (-1). -// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. -func (clc *ContainerLeaseClient) AcquireLease(ctx context.Context, options *ContainerAcquireLeaseOptions) (ContainerAcquireLeaseResponse, error) { - containerAcquireLeaseOptions, modifiedAccessConditions := options.format() - containerAcquireLeaseOptions.ProposedLeaseID = clc.leaseID - - resp, err := clc.client.AcquireLease(ctx, &containerAcquireLeaseOptions, modifiedAccessConditions) - if err == nil && resp.LeaseID != nil { - clc.leaseID = resp.LeaseID - } - return toContainerAcquireLeaseResponse(resp), handleError(err) -} - -// BreakLease breaks the container's previously-acquired lease (if it exists). -// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. -func (clc *ContainerLeaseClient) BreakLease(ctx context.Context, options *ContainerBreakLeaseOptions) (ContainerBreakLeaseResponse, error) { - containerBreakLeaseOptions, modifiedAccessConditions := options.format() - resp, err := clc.client.BreakLease(ctx, containerBreakLeaseOptions, modifiedAccessConditions) - return toContainerBreakLeaseResponse(resp), handleError(err) -} - -// ChangeLease changes the container's lease ID. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. -func (clc *ContainerLeaseClient) ChangeLease(ctx context.Context, options *ContainerChangeLeaseOptions) (ContainerChangeLeaseResponse, error) { - if clc.leaseID == nil { - return ContainerChangeLeaseResponse{}, errors.New("leaseID cannot be nil") - } - - proposedLeaseID, changeLeaseOptions, modifiedAccessConditions, err := options.format() - if err != nil { - return ContainerChangeLeaseResponse{}, err - } - - resp, err := clc.client.ChangeLease(ctx, *clc.leaseID, *proposedLeaseID, changeLeaseOptions, modifiedAccessConditions) - if err == nil && resp.LeaseID != nil { - clc.leaseID = resp.LeaseID - } - return toContainerChangeLeaseResponse(resp), handleError(err) -} - -// ReleaseLease releases the container's previously-acquired lease. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. -func (clc *ContainerLeaseClient) ReleaseLease(ctx context.Context, options *ContainerReleaseLeaseOptions) (ContainerReleaseLeaseResponse, error) { - if clc.leaseID == nil { - return ContainerReleaseLeaseResponse{}, errors.New("leaseID cannot be nil") - } - containerReleaseLeaseOptions, modifiedAccessConditions := options.format() - resp, err := clc.client.ReleaseLease(ctx, *clc.leaseID, containerReleaseLeaseOptions, modifiedAccessConditions) - - return toContainerReleaseLeaseResponse(resp), handleError(err) -} - -// RenewLease renews the container's previously-acquired lease. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. -func (clc *ContainerLeaseClient) RenewLease(ctx context.Context, options *ContainerRenewLeaseOptions) (ContainerRenewLeaseResponse, error) { - if clc.leaseID == nil { - return ContainerRenewLeaseResponse{}, errors.New("leaseID cannot be nil") - } - renewLeaseBlobOptions, modifiedAccessConditions := options.format() - resp, err := clc.client.RenewLease(ctx, *clc.leaseID, renewLeaseBlobOptions, modifiedAccessConditions) - if err == nil && resp.LeaseID != nil { - clc.leaseID = resp.LeaseID - } - return toContainerRenewLeaseResponse(resp), handleError(err) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_page_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_page_blob_client.go deleted file mode 100644 index 507993b9e..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_page_blob_client.go +++ /dev/null @@ -1,261 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import ( - "context" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "io" - "net/url" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" -) - -// PageBlobClient represents a client to an Azure Storage page blob; -type PageBlobClient struct { - BlobClient - client *pageBlobClient -} - -// NewPageBlobClient creates a ServiceClient object using the specified URL, Azure AD credential, and options. -// Example of serviceURL: https://.blob.core.windows.net -func NewPageBlobClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*PageBlobClient, error) { - authPolicy := runtime.NewBearerTokenPolicy(cred, []string{tokenScope}, nil) - conOptions := getConnectionOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - conn := newConnection(blobURL, conOptions) - - return &PageBlobClient{ - client: newPageBlobClient(conn.Endpoint(), conn.Pipeline()), - BlobClient: BlobClient{ - client: newBlobClient(conn.Endpoint(), conn.Pipeline()), - }, - }, nil -} - -// NewPageBlobClientWithNoCredential creates a ServiceClient object using the specified URL and options. -// Example of serviceURL: https://.blob.core.windows.net? -func NewPageBlobClientWithNoCredential(blobURL string, options *ClientOptions) (*PageBlobClient, error) { - conOptions := getConnectionOptions(options) - conn := newConnection(blobURL, conOptions) - - return &PageBlobClient{ - client: newPageBlobClient(conn.Endpoint(), conn.Pipeline()), - BlobClient: BlobClient{ - client: newBlobClient(conn.Endpoint(), conn.Pipeline()), - }, - }, nil -} - -// NewPageBlobClientWithSharedKey creates a ServiceClient object using the specified URL, shared key, and options. -// Example of serviceURL: https://.blob.core.windows.net -func NewPageBlobClientWithSharedKey(blobURL string, cred *SharedKeyCredential, options *ClientOptions) (*PageBlobClient, error) { - authPolicy := newSharedKeyCredPolicy(cred) - conOptions := getConnectionOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - conn := newConnection(blobURL, conOptions) - - return &PageBlobClient{ - client: newPageBlobClient(conn.Endpoint(), conn.Pipeline()), - BlobClient: BlobClient{ - client: newBlobClient(conn.Endpoint(), conn.Pipeline()), - sharedKey: cred, - }, - }, nil -} - -// WithSnapshot creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp. -// Pass "" to remove the snapshot returning a URL to the base blob. -func (pb *PageBlobClient) WithSnapshot(snapshot string) (*PageBlobClient, error) { - p, err := NewBlobURLParts(pb.URL()) - if err != nil { - return nil, err - } - p.Snapshot = snapshot - - endpoint := p.URL() - pipeline := pb.client.pl - return &PageBlobClient{ - client: newPageBlobClient(endpoint, pipeline), - BlobClient: BlobClient{ - client: newBlobClient(endpoint, pipeline), - sharedKey: pb.sharedKey, - }, - }, nil -} - -// WithVersionID creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp. -// Pass "" to remove the version returning a URL to the base blob. -func (pb *PageBlobClient) WithVersionID(versionID string) (*PageBlobClient, error) { - p, err := NewBlobURLParts(pb.URL()) - if err != nil { - return nil, err - } - - p.VersionID = versionID - endpoint := p.URL() - - pipeline := pb.client.pl - return &PageBlobClient{ - client: newPageBlobClient(endpoint, pipeline), - BlobClient: BlobClient{ - client: newBlobClient(endpoint, pipeline), - sharedKey: pb.sharedKey, - }, - }, nil -} - -// Create creates a page blob of the specified length. Call PutPage to upload data to a page blob. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. -func (pb *PageBlobClient) Create(ctx context.Context, size int64, o *PageBlobCreateOptions) (PageBlobCreateResponse, error) { - createOptions, HTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions := o.format() - - resp, err := pb.client.Create(ctx, 0, size, createOptions, HTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) - - return toPageBlobCreateResponse(resp), handleError(err) -} - -// UploadPages writes 1 or more pages to the page blob. The start offset and the stream size must be a multiple of 512 bytes. -// This method panics if the stream is not at position 0. -// Note that the http client closes the body stream after the request is sent to the service. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page. -func (pb *PageBlobClient) UploadPages(ctx context.Context, body io.ReadSeekCloser, options *PageBlobUploadPagesOptions) (PageBlobUploadPagesResponse, error) { - count, err := validateSeekableStreamAt0AndGetCount(body) - - if err != nil { - return PageBlobUploadPagesResponse{}, err - } - - uploadPagesOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions := options.format() - - resp, err := pb.client.UploadPages(ctx, count, body, uploadPagesOptions, leaseAccessConditions, - cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) - - return toPageBlobUploadPagesResponse(resp), handleError(err) -} - -// UploadPagesFromURL copies 1 or more pages from a source URL to the page blob. -// The sourceOffset specifies the start offset of source data to copy from. -// The destOffset specifies the start offset of data in page blob will be written to. -// The count must be a multiple of 512 bytes. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page-from-url. -func (pb *PageBlobClient) UploadPagesFromURL(ctx context.Context, source string, sourceOffset, destOffset, count int64, - options *PageBlobUploadPagesFromURLOptions) (PageBlobUploadPagesFromURLResponse, error) { - - uploadPagesFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, sequenceNumberAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions := options.format() - - resp, err := pb.client.UploadPagesFromURL(ctx, source, rangeToString(sourceOffset, count), 0, - rangeToString(destOffset, count), uploadPagesFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, - sequenceNumberAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) - - return toPageBlobUploadPagesFromURLResponse(resp), handleError(err) -} - -// ClearPages frees the specified pages from the page blob. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page. -func (pb *PageBlobClient) ClearPages(ctx context.Context, pageRange HttpRange, options *PageBlobClearPagesOptions) (PageBlobClearPagesResponse, error) { - clearOptions := &pageBlobClientClearPagesOptions{ - Range: pageRange.format(), - } - - leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions := options.format() - - resp, err := pb.client.ClearPages(ctx, 0, clearOptions, leaseAccessConditions, cpkInfo, - cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) - - return toPageBlobClearPagesResponse(resp), handleError(err) -} - -// GetPageRanges returns the list of valid page ranges for a page blob or snapshot of a page blob. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges. -func (pb *PageBlobClient) GetPageRanges(options *PageBlobGetPageRangesOptions) *PageBlobGetPageRangesPager { - getPageRangesOptions, leaseAccessConditions, modifiedAccessConditions := options.format() - - pageBlobGetPageRangesPager := pb.client.GetPageRanges(getPageRangesOptions, leaseAccessConditions, modifiedAccessConditions) - - // Fixing Advancer - pageBlobGetPageRangesPager.advancer = func(ctx context.Context, response pageBlobClientGetPageRangesResponse) (*policy.Request, error) { - getPageRangesOptions.Marker = response.NextMarker - req, err := pb.client.getPageRangesCreateRequest(ctx, getPageRangesOptions, leaseAccessConditions, modifiedAccessConditions) - if err != nil { - return nil, handleError(err) - } - queryValues, err := url.ParseQuery(req.Raw().URL.RawQuery) - if err != nil { - return nil, handleError(err) - } - req.Raw().URL.RawQuery = queryValues.Encode() - return req, nil - } - - return toPageBlobGetPageRangesPager(pageBlobGetPageRangesPager) -} - -// GetPageRangesDiff gets the collection of page ranges that differ between a specified snapshot and this page blob. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges. -func (pb *PageBlobClient) GetPageRangesDiff(options *PageBlobGetPageRangesDiffOptions) *PageBlobGetPageRangesDiffPager { - getPageRangesDiffOptions, leaseAccessConditions, modifiedAccessConditions := options.format() - - getPageRangesDiffPager := pb.client.GetPageRangesDiff(getPageRangesDiffOptions, leaseAccessConditions, modifiedAccessConditions) - - // Fixing Advancer - getPageRangesDiffPager.advancer = func(ctx context.Context, response pageBlobClientGetPageRangesDiffResponse) (*policy.Request, error) { - getPageRangesDiffOptions.Marker = response.NextMarker - req, err := pb.client.getPageRangesDiffCreateRequest(ctx, getPageRangesDiffOptions, leaseAccessConditions, modifiedAccessConditions) - if err != nil { - return nil, handleError(err) - } - queryValues, err := url.ParseQuery(req.Raw().URL.RawQuery) - if err != nil { - return nil, handleError(err) - } - req.Raw().URL.RawQuery = queryValues.Encode() - return req, nil - } - - return toPageBlobGetPageRangesDiffPager(getPageRangesDiffPager) -} - -// Resize resizes the page blob to the specified size (which must be a multiple of 512). -// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. -func (pb *PageBlobClient) Resize(ctx context.Context, size int64, options *PageBlobResizeOptions) (PageBlobResizeResponse, error) { - resizeOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions := options.format() - - resp, err := pb.client.Resize(ctx, size, resizeOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) - - return toPageBlobResizeResponse(resp), handleError(err) -} - -// UpdateSequenceNumber sets the page blob's sequence number. -func (pb *PageBlobClient) UpdateSequenceNumber(ctx context.Context, options *PageBlobUpdateSequenceNumberOptions) (PageBlobUpdateSequenceNumberResponse, error) { - actionType, updateOptions, lac, mac := options.format() - resp, err := pb.client.UpdateSequenceNumber(ctx, *actionType, updateOptions, lac, mac) - - return toPageBlobUpdateSequenceNumberResponse(resp), handleError(err) -} - -// StartCopyIncremental begins an operation to start an incremental copy from one page blob's snapshot to this page blob. -// The snapshot is copied such that only the differential changes between the previously copied snapshot are transferred to the destination. -// The copied snapshots are complete copies of the original snapshot and can be read or copied from as usual. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/incremental-copy-blob and -// https://docs.microsoft.com/en-us/azure/virtual-machines/windows/incremental-snapshots. -func (pb *PageBlobClient) StartCopyIncremental(ctx context.Context, copySource string, prevSnapshot string, options *PageBlobCopyIncrementalOptions) (PageBlobCopyIncrementalResponse, error) { - copySourceURL, err := url.Parse(copySource) - if err != nil { - return PageBlobCopyIncrementalResponse{}, err - } - - queryParams := copySourceURL.Query() - queryParams.Set("snapshot", prevSnapshot) - copySourceURL.RawQuery = queryParams.Encode() - - pageBlobCopyIncrementalOptions, modifiedAccessConditions := options.format() - resp, err := pb.client.CopyIncremental(ctx, copySourceURL.String(), pageBlobCopyIncrementalOptions, modifiedAccessConditions) - - return toPageBlobCopyIncrementalResponse(resp), handleError(err) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_response_error.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_response_error.go deleted file mode 100644 index 3f9878439..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_response_error.go +++ /dev/null @@ -1,17 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import "net/http" - -// ResponseError is a wrapper of error passed from service -type ResponseError interface { - Error() string - Unwrap() error - RawResponse() *http.Response - NonRetriable() -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_response_helpers.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_response_helpers.go deleted file mode 100644 index dda993d1c..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_response_helpers.go +++ /dev/null @@ -1,35 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -// GetHTTPHeaders returns the user-modifiable properties for this blob. -func (bgpr BlobGetPropertiesResponse) GetHTTPHeaders() BlobHTTPHeaders { - return BlobHTTPHeaders{ - BlobContentType: bgpr.ContentType, - BlobContentEncoding: bgpr.ContentEncoding, - BlobContentLanguage: bgpr.ContentLanguage, - BlobContentDisposition: bgpr.ContentDisposition, - BlobCacheControl: bgpr.CacheControl, - BlobContentMD5: bgpr.ContentMD5, - } -} - -/////////////////////////////////////////////////////////////////////////////// - -// GetHTTPHeaders returns the user-modifiable properties for this blob. -func (r BlobDownloadResponse) GetHTTPHeaders() BlobHTTPHeaders { - return BlobHTTPHeaders{ - BlobContentType: r.ContentType, - BlobContentEncoding: r.ContentEncoding, - BlobContentLanguage: r.ContentLanguage, - BlobContentDisposition: r.ContentDisposition, - BlobCacheControl: r.CacheControl, - BlobContentMD5: r.ContentMD5, - } -} - -/////////////////////////////////////////////////////////////////////////////// diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_account.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_account.go deleted file mode 100644 index b4104def5..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_account.go +++ /dev/null @@ -1,243 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import ( - "bytes" - "errors" - "fmt" - "strings" - "time" -) - -// AccountSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage account. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-an-account-sas -type AccountSASSignatureValues struct { - Version string `param:"sv"` // If not specified, this defaults to SASVersion - Protocol SASProtocol `param:"spr"` // See the SASProtocol* constants - StartTime time.Time `param:"st"` // Not specified if IsZero - ExpiryTime time.Time `param:"se"` // Not specified if IsZero - Permissions string `param:"sp"` // Create by initializing a AccountSASPermissions and then call String() - IPRange IPRange `param:"sip"` - Services string `param:"ss"` // Create by initializing AccountSASServices and then call String() - ResourceTypes string `param:"srt"` // Create by initializing AccountSASResourceTypes and then call String() -} - -// Sign uses an account's shared key credential to sign this signature values to produce -// the proper SAS query parameters. -func (v AccountSASSignatureValues) Sign(sharedKeyCredential *SharedKeyCredential) (SASQueryParameters, error) { - // https://docs.microsoft.com/en-us/rest/api/storageservices/Constructing-an-Account-SAS - if v.ExpiryTime.IsZero() || v.Permissions == "" || v.ResourceTypes == "" || v.Services == "" { - return SASQueryParameters{}, errors.New("account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType") - } - if v.Version == "" { - v.Version = SASVersion - } - perms := &AccountSASPermissions{} - if err := perms.Parse(v.Permissions); err != nil { - return SASQueryParameters{}, err - } - v.Permissions = perms.String() - - startTime, expiryTime, _ := FormatTimesForSASSigning(v.StartTime, v.ExpiryTime, time.Time{}) - - stringToSign := strings.Join([]string{ - sharedKeyCredential.AccountName(), - v.Permissions, - v.Services, - v.ResourceTypes, - startTime, - expiryTime, - v.IPRange.String(), - string(v.Protocol), - v.Version, - ""}, // That right, the account SAS requires a terminating extra newline - "\n") - - signature, err := sharedKeyCredential.ComputeHMACSHA256(stringToSign) - if err != nil { - return SASQueryParameters{}, err - } - p := SASQueryParameters{ - // Common SAS parameters - version: v.Version, - protocol: v.Protocol, - startTime: v.StartTime, - expiryTime: v.ExpiryTime, - permissions: v.Permissions, - ipRange: v.IPRange, - - // Account-specific SAS parameters - services: v.Services, - resourceTypes: v.ResourceTypes, - - // Calculated SAS signature - signature: signature, - } - - return p, nil -} - -// AccountSASPermissions type simplifies creating the permissions string for an Azure Storage Account SAS. -// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Permissions field. -type AccountSASPermissions struct { - Read, Write, Delete, DeletePreviousVersion, List, Add, Create, Update, Process, Tag, FilterByTags bool -} - -// String produces the SAS permissions string for an Azure Storage account. -// Call this method to set AccountSASSignatureValues's Permissions field. -func (p AccountSASPermissions) String() string { - var buffer bytes.Buffer - if p.Read { - buffer.WriteRune('r') - } - if p.Write { - buffer.WriteRune('w') - } - if p.Delete { - buffer.WriteRune('d') - } - if p.DeletePreviousVersion { - buffer.WriteRune('x') - } - if p.List { - buffer.WriteRune('l') - } - if p.Add { - buffer.WriteRune('a') - } - if p.Create { - buffer.WriteRune('c') - } - if p.Update { - buffer.WriteRune('u') - } - if p.Process { - buffer.WriteRune('p') - } - if p.Tag { - buffer.WriteRune('t') - } - if p.FilterByTags { - buffer.WriteRune('f') - } - return buffer.String() -} - -// Parse initializes the AccountSASPermissions's fields from a string. -func (p *AccountSASPermissions) Parse(s string) error { - *p = AccountSASPermissions{} // Clear out the flags - for _, r := range s { - switch r { - case 'r': - p.Read = true - case 'w': - p.Write = true - case 'd': - p.Delete = true - case 'l': - p.List = true - case 'a': - p.Add = true - case 'c': - p.Create = true - case 'u': - p.Update = true - case 'p': - p.Process = true - case 'x': - p.Process = true - case 't': - p.Tag = true - case 'f': - p.FilterByTags = true - default: - return fmt.Errorf("invalid permission character: '%v'", r) - } - } - return nil -} - -// AccountSASServices type simplifies creating the services string for an Azure Storage Account SAS. -// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Services field. -type AccountSASServices struct { - Blob, Queue, File bool -} - -// String produces the SAS services string for an Azure Storage account. -// Call this method to set AccountSASSignatureValues's Services field. -func (s AccountSASServices) String() string { - var buffer bytes.Buffer - if s.Blob { - buffer.WriteRune('b') - } - if s.Queue { - buffer.WriteRune('q') - } - if s.File { - buffer.WriteRune('f') - } - return buffer.String() -} - -// Parse initializes the AccountSASServices' fields from a string. -func (s *AccountSASServices) Parse(str string) error { - *s = AccountSASServices{} // Clear out the flags - for _, r := range str { - switch r { - case 'b': - s.Blob = true - case 'q': - s.Queue = true - case 'f': - s.File = true - default: - return fmt.Errorf("invalid service character: '%v'", r) - } - } - return nil -} - -// AccountSASResourceTypes type simplifies creating the resource types string for an Azure Storage Account SAS. -// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's ResourceTypes field. -type AccountSASResourceTypes struct { - Service, Container, Object bool -} - -// String produces the SAS resource types string for an Azure Storage account. -// Call this method to set AccountSASSignatureValues's ResourceTypes field. -func (rt AccountSASResourceTypes) String() string { - var buffer bytes.Buffer - if rt.Service { - buffer.WriteRune('s') - } - if rt.Container { - buffer.WriteRune('c') - } - if rt.Object { - buffer.WriteRune('o') - } - return buffer.String() -} - -// Parse initializes the AccountSASResourceType's fields from a string. -func (rt *AccountSASResourceTypes) Parse(s string) error { - *rt = AccountSASResourceTypes{} // Clear out the flags - for _, r := range s { - switch r { - case 's': - rt.Service = true - case 'c': - rt.Container = true - case 'o': - rt.Object = true - default: - return fmt.Errorf("invalid resource type: '%v'", r) - } - } - return nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_query_params.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_query_params.go deleted file mode 100644 index 7efbec9b8..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_query_params.go +++ /dev/null @@ -1,427 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import ( - "errors" - "net" - "net/url" - "strings" - "time" -) - -// SASProtocol indicates the http/https. -type SASProtocol string - -const ( - // SASProtocolHTTPS can be specified for a SAS protocol - SASProtocolHTTPS SASProtocol = "https" - - // SASProtocolHTTPSandHTTP can be specified for a SAS protocol - //SASProtocolHTTPSandHTTP SASProtocol = "https,http" -) - -// FormatTimesForSASSigning converts a time.Time to a snapshotTimeFormat string suitable for a -// SASField's StartTime or ExpiryTime fields. Returns "" if value.IsZero(). -func FormatTimesForSASSigning(startTime, expiryTime, snapshotTime time.Time) (string, string, string) { - ss := "" - if !startTime.IsZero() { - ss = formatSASTimeWithDefaultFormat(&startTime) - } - se := "" - if !expiryTime.IsZero() { - se = formatSASTimeWithDefaultFormat(&expiryTime) - } - sh := "" - if !snapshotTime.IsZero() { - sh = snapshotTime.Format(SnapshotTimeFormat) - } - return ss, se, sh -} - -// SASTimeFormat represents the format of a SAS start or expiry time. Use it when formatting/parsing a time.Time. -const SASTimeFormat = "2006-01-02T15:04:05Z" //"2017-07-27T00:00:00Z" // ISO 8601 -var SASTimeFormats = []string{"2006-01-02T15:04:05.0000000Z", SASTimeFormat, "2006-01-02T15:04Z", "2006-01-02"} // ISO 8601 formats, please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details. - -// formatSASTimeWithDefaultFormat format time with ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ". -func formatSASTimeWithDefaultFormat(t *time.Time) string { - return formatSASTime(t, SASTimeFormat) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used -} - -// formatSASTime format time with given format, use ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ" by default. -func formatSASTime(t *time.Time, format string) string { - if format != "" { - return t.Format(format) - } - return t.Format(SASTimeFormat) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used -} - -// parseSASTimeString try to parse sas time string. -func parseSASTimeString(val string) (t time.Time, timeFormat string, err error) { - for _, sasTimeFormat := range SASTimeFormats { - t, err = time.Parse(sasTimeFormat, val) - if err == nil { - timeFormat = sasTimeFormat - break - } - } - - if err != nil { - err = errors.New("fail to parse time with IOS 8601 formats, please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details") - } - - return -} - -// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas - -// SASQueryParameters object represents the components that make up an Azure Storage SAS' query parameters. -// You parse a map of query parameters into its fields by calling NewSASQueryParameters(). You add the components -// to a query parameter map by calling AddToValues(). -// NOTE: Changing any field requires computing a new SAS signature using a XxxSASSignatureValues type. -// This type defines the components used by all Azure Storage resources (Containers, Blobs, Files, & Queues). -type SASQueryParameters struct { - // All members are immutable or values so copies of this struct are goroutine-safe. - version string `param:"sv"` - services string `param:"ss"` - resourceTypes string `param:"srt"` - protocol SASProtocol `param:"spr"` - startTime time.Time `param:"st"` - expiryTime time.Time `param:"se"` - snapshotTime time.Time `param:"snapshot"` - ipRange IPRange `param:"sip"` - identifier string `param:"si"` - resource string `param:"sr"` - permissions string `param:"sp"` - signature string `param:"sig"` - cacheControl string `param:"rscc"` - contentDisposition string `param:"rscd"` - contentEncoding string `param:"rsce"` - contentLanguage string `param:"rscl"` - contentType string `param:"rsct"` - signedOid string `param:"skoid"` - signedTid string `param:"sktid"` - signedStart time.Time `param:"skt"` - signedService string `param:"sks"` - signedExpiry time.Time `param:"ske"` - signedVersion string `param:"skv"` - signedDirectoryDepth string `param:"sdd"` - preauthorizedAgentObjectId string `param:"saoid"` - agentObjectId string `param:"suoid"` - correlationId string `param:"scid"` - // private member used for startTime and expiryTime formatting. - stTimeFormat string - seTimeFormat string -} - -// PreauthorizedAgentObjectId returns preauthorizedAgentObjectId -func (p *SASQueryParameters) PreauthorizedAgentObjectId() string { - return p.preauthorizedAgentObjectId -} - -// AgentObjectId returns agentObjectId -func (p *SASQueryParameters) AgentObjectId() string { - return p.agentObjectId -} - -// SignedCorrelationId returns signedCorrelationId -func (p *SASQueryParameters) SignedCorrelationId() string { - return p.correlationId -} - -// SignedTid returns aignedTid -func (p *SASQueryParameters) SignedTid() string { - return p.signedTid -} - -// SignedStart returns signedStart -func (p *SASQueryParameters) SignedStart() time.Time { - return p.signedStart -} - -// SignedExpiry returns signedExpiry -func (p *SASQueryParameters) SignedExpiry() time.Time { - return p.signedExpiry -} - -// SignedService returns signedService -func (p *SASQueryParameters) SignedService() string { - return p.signedService -} - -// SignedVersion returns signedVersion -func (p *SASQueryParameters) SignedVersion() string { - return p.signedVersion -} - -// SnapshotTime returns snapshotTime -func (p *SASQueryParameters) SnapshotTime() time.Time { - return p.snapshotTime -} - -// Version returns version -func (p *SASQueryParameters) Version() string { - return p.version -} - -// Services returns services -func (p *SASQueryParameters) Services() string { - return p.services -} - -// ResourceTypes returns resourceTypes -func (p *SASQueryParameters) ResourceTypes() string { - return p.resourceTypes -} - -// Protocol returns protocol -func (p *SASQueryParameters) Protocol() SASProtocol { - return p.protocol -} - -// StartTime returns startTime -func (p *SASQueryParameters) StartTime() time.Time { - return p.startTime -} - -// ExpiryTime returns expiryTime -func (p *SASQueryParameters) ExpiryTime() time.Time { - return p.expiryTime -} - -// IPRange returns ipRange -func (p *SASQueryParameters) IPRange() IPRange { - return p.ipRange -} - -// Identifier returns identifier -func (p *SASQueryParameters) Identifier() string { - return p.identifier -} - -// Resource returns resource -func (p *SASQueryParameters) Resource() string { - return p.resource -} - -// Permissions returns permissions -func (p *SASQueryParameters) Permissions() string { - return p.permissions -} - -// Signature returns signature -func (p *SASQueryParameters) Signature() string { - return p.signature -} - -// CacheControl returns cacheControl -func (p *SASQueryParameters) CacheControl() string { - return p.cacheControl -} - -// ContentDisposition returns contentDisposition -func (p *SASQueryParameters) ContentDisposition() string { - return p.contentDisposition -} - -// ContentEncoding returns contentEncoding -func (p *SASQueryParameters) ContentEncoding() string { - return p.contentEncoding -} - -// ContentLanguage returns contentLanguage -func (p *SASQueryParameters) ContentLanguage() string { - return p.contentLanguage -} - -// ContentType returns sontentType -func (p *SASQueryParameters) ContentType() string { - return p.contentType -} - -// SignedDirectoryDepth returns signedDirectoryDepth -func (p *SASQueryParameters) SignedDirectoryDepth() string { - return p.signedDirectoryDepth -} - -// IPRange represents a SAS IP range's start IP and (optionally) end IP. -type IPRange struct { - Start net.IP // Not specified if length = 0 - End net.IP // Not specified if length = 0 -} - -// String returns a string representation of an IPRange. -func (ipr *IPRange) String() string { - if len(ipr.Start) == 0 { - return "" - } - start := ipr.Start.String() - if len(ipr.End) == 0 { - return start - } - return start + "-" + ipr.End.String() -} - -// NewSASQueryParameters creates and initializes a SASQueryParameters object based on the -// query parameter map's passed-in values. If deleteSASParametersFromValues is true, -// all SAS-related query parameters are removed from the passed-in map. If -// deleteSASParametersFromValues is false, the map passed-in map is unaltered. -func newSASQueryParameters(values url.Values, deleteSASParametersFromValues bool) SASQueryParameters { - p := SASQueryParameters{} - for k, v := range values { - val := v[0] - isSASKey := true - switch strings.ToLower(k) { - case "sv": - p.version = val - case "ss": - p.services = val - case "srt": - p.resourceTypes = val - case "spr": - p.protocol = SASProtocol(val) - case "snapshot": - p.snapshotTime, _ = time.Parse(SnapshotTimeFormat, val) - case "st": - p.startTime, p.stTimeFormat, _ = parseSASTimeString(val) - case "se": - p.expiryTime, p.seTimeFormat, _ = parseSASTimeString(val) - case "sip": - dashIndex := strings.Index(val, "-") - if dashIndex == -1 { - p.ipRange.Start = net.ParseIP(val) - } else { - p.ipRange.Start = net.ParseIP(val[:dashIndex]) - p.ipRange.End = net.ParseIP(val[dashIndex+1:]) - } - case "si": - p.identifier = val - case "sr": - p.resource = val - case "sp": - p.permissions = val - case "sig": - p.signature = val - case "rscc": - p.cacheControl = val - case "rscd": - p.contentDisposition = val - case "rsce": - p.contentEncoding = val - case "rscl": - p.contentLanguage = val - case "rsct": - p.contentType = val - case "skoid": - p.signedOid = val - case "sktid": - p.signedTid = val - case "skt": - p.signedStart, _ = time.Parse(SASTimeFormat, val) - case "ske": - p.signedExpiry, _ = time.Parse(SASTimeFormat, val) - case "sks": - p.signedService = val - case "skv": - p.signedVersion = val - case "sdd": - p.signedDirectoryDepth = val - case "saoid": - p.preauthorizedAgentObjectId = val - case "suoid": - p.agentObjectId = val - case "scid": - p.correlationId = val - default: - isSASKey = false // We didn't recognize the query parameter - } - if isSASKey && deleteSASParametersFromValues { - delete(values, k) - } - } - return p -} - -// AddToValues adds the SAS components to the specified query parameters map. -func (p *SASQueryParameters) addToValues(v url.Values) url.Values { - if p.version != "" { - v.Add("sv", p.version) - } - if p.services != "" { - v.Add("ss", p.services) - } - if p.resourceTypes != "" { - v.Add("srt", p.resourceTypes) - } - if p.protocol != "" { - v.Add("spr", string(p.protocol)) - } - if !p.startTime.IsZero() { - v.Add("st", formatSASTime(&(p.startTime), p.stTimeFormat)) - } - if !p.expiryTime.IsZero() { - v.Add("se", formatSASTime(&(p.expiryTime), p.seTimeFormat)) - } - if len(p.ipRange.Start) > 0 { - v.Add("sip", p.ipRange.String()) - } - if p.identifier != "" { - v.Add("si", p.identifier) - } - if p.resource != "" { - v.Add("sr", p.resource) - } - if p.permissions != "" { - v.Add("sp", p.permissions) - } - if p.signedOid != "" { - v.Add("skoid", p.signedOid) - v.Add("sktid", p.signedTid) - v.Add("skt", p.signedStart.Format(SASTimeFormat)) - v.Add("ske", p.signedExpiry.Format(SASTimeFormat)) - v.Add("sks", p.signedService) - v.Add("skv", p.signedVersion) - } - if p.signature != "" { - v.Add("sig", p.signature) - } - if p.cacheControl != "" { - v.Add("rscc", p.cacheControl) - } - if p.contentDisposition != "" { - v.Add("rscd", p.contentDisposition) - } - if p.contentEncoding != "" { - v.Add("rsce", p.contentEncoding) - } - if p.contentLanguage != "" { - v.Add("rscl", p.contentLanguage) - } - if p.contentType != "" { - v.Add("rsct", p.contentType) - } - if p.signedDirectoryDepth != "" { - v.Add("sdd", p.signedDirectoryDepth) - } - if p.preauthorizedAgentObjectId != "" { - v.Add("saoid", p.preauthorizedAgentObjectId) - } - if p.agentObjectId != "" { - v.Add("suoid", p.agentObjectId) - } - if p.correlationId != "" { - v.Add("scid", p.correlationId) - } - return v -} - -// Encode encodes the SAS query parameters into URL encoded form sorted by key. -func (p *SASQueryParameters) Encode() string { - v := url.Values{} - p.addToValues(v) - return v.Encode() -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_service_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_service_client.go deleted file mode 100644 index e75dd10b3..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_service_client.go +++ /dev/null @@ -1,266 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import ( - "context" - "errors" - "net/url" - "strings" - "time" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" -) - -//nolint -const ( - // ContainerNameRoot is the special Azure Storage name used to identify a storage account's root container. - ContainerNameRoot = "$root" - - // ContainerNameLogs is the special Azure Storage name used to identify a storage account's logs container. - ContainerNameLogs = "$logs" -) - -// ServiceClient represents a URL to the Azure Blob Storage service allowing you to manipulate blob containers. -type ServiceClient struct { - client *serviceClient - sharedKey *SharedKeyCredential -} - -// URL returns the URL endpoint used by the ServiceClient object. -func (s ServiceClient) URL() string { - return s.client.endpoint -} - -// NewServiceClient creates a ServiceClient object using the specified URL, Azure AD credential, and options. -// Example of serviceURL: https://.blob.core.windows.net -func NewServiceClient(serviceURL string, cred azcore.TokenCredential, options *ClientOptions) (*ServiceClient, error) { - authPolicy := runtime.NewBearerTokenPolicy(cred, []string{tokenScope}, nil) - conOptions := getConnectionOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - conn := newConnection(serviceURL, conOptions) - - return &ServiceClient{ - client: newServiceClient(conn.Endpoint(), conn.Pipeline()), - }, nil -} - -// NewServiceClientWithNoCredential creates a ServiceClient object using the specified URL and options. -// Example of serviceURL: https://.blob.core.windows.net? -func NewServiceClientWithNoCredential(serviceURL string, options *ClientOptions) (*ServiceClient, error) { - conOptions := getConnectionOptions(options) - conn := newConnection(serviceURL, conOptions) - - return &ServiceClient{ - client: newServiceClient(conn.Endpoint(), conn.Pipeline()), - }, nil -} - -// NewServiceClientWithSharedKey creates a ServiceClient object using the specified URL, shared key, and options. -// Example of serviceURL: https://.blob.core.windows.net -func NewServiceClientWithSharedKey(serviceURL string, cred *SharedKeyCredential, options *ClientOptions) (*ServiceClient, error) { - authPolicy := newSharedKeyCredPolicy(cred) - conOptions := getConnectionOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - conn := newConnection(serviceURL, conOptions) - - return &ServiceClient{ - client: newServiceClient(conn.Endpoint(), conn.Pipeline()), - sharedKey: cred, - }, nil -} - -// NewServiceClientFromConnectionString creates a service client from the given connection string. -//nolint -func NewServiceClientFromConnectionString(connectionString string, options *ClientOptions) (*ServiceClient, error) { - endpoint, credential, err := parseConnectionString(connectionString) - if err != nil { - return nil, err - } - return NewServiceClientWithSharedKey(endpoint, credential, options) -} - -// NewContainerClient creates a new ContainerClient object by concatenating containerName to the end of -// ServiceClient's URL. The new ContainerClient uses the same request policy pipeline as the ServiceClient. -// To change the pipeline, create the ContainerClient and then call its WithPipeline method passing in the -// desired pipeline object. Or, call this package's NewContainerClient instead of calling this object's -// NewContainerClient method. -func (s *ServiceClient) NewContainerClient(containerName string) (*ContainerClient, error) { - containerURL := appendToURLPath(s.client.endpoint, containerName) - return &ContainerClient{ - client: newContainerClient(containerURL, s.client.pl), - sharedKey: s.sharedKey, - }, nil -} - -// CreateContainer is a lifecycle method to creates a new container under the specified account. -// If the container with the same name already exists, a ResourceExistsError will be raised. -// This method returns a client with which to interact with the newly created container. -func (s *ServiceClient) CreateContainer(ctx context.Context, containerName string, options *ContainerCreateOptions) (ContainerCreateResponse, error) { - containerClient, err := s.NewContainerClient(containerName) - if err != nil { - return ContainerCreateResponse{}, err - } - containerCreateResp, err := containerClient.Create(ctx, options) - return containerCreateResp, err -} - -// DeleteContainer is a lifecycle method that marks the specified container for deletion. -// The container and any blobs contained within it are later deleted during garbage collection. -// If the container is not found, a ResourceNotFoundError will be raised. -func (s *ServiceClient) DeleteContainer(ctx context.Context, containerName string, options *ContainerDeleteOptions) (ContainerDeleteResponse, error) { - containerClient, _ := s.NewContainerClient(containerName) - containerDeleteResp, err := containerClient.Delete(ctx, options) - return containerDeleteResp, err -} - -// appendToURLPath appends a string to the end of a URL's path (prefixing the string with a '/' if required) -func appendToURLPath(u string, name string) string { - // e.g. "https://ms.com/a/b/?k1=v1&k2=v2#f" - // When you call url.Parse() this is what you'll get: - // Scheme: "https" - // Opaque: "" - // User: nil - // Host: "ms.com" - // Path: "/a/b/" This should start with a / and it might or might not have a trailing slash - // RawPath: "" - // ForceQuery: false - // RawQuery: "k1=v1&k2=v2" - // Fragment: "f" - uri, _ := url.Parse(u) - - if len(uri.Path) == 0 || uri.Path[len(uri.Path)-1] != '/' { - uri.Path += "/" // Append "/" to end before appending name - } - uri.Path += name - return uri.String() -} - -// GetAccountInfo provides account level information -func (s *ServiceClient) GetAccountInfo(ctx context.Context, o *ServiceGetAccountInfoOptions) (ServiceGetAccountInfoResponse, error) { - getAccountInfoOptions := o.format() - resp, err := s.client.GetAccountInfo(ctx, getAccountInfoOptions) - return toServiceGetAccountInfoResponse(resp), handleError(err) -} - -// ListContainers operation returns a pager of the containers under the specified account. -// Use an empty Marker to start enumeration from the beginning. Container names are returned in lexicographic order. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-containers2. -func (s *ServiceClient) ListContainers(o *ListContainersOptions) *ServiceListContainersSegmentPager { - listOptions := o.format() - pager := s.client.ListContainersSegment(listOptions) - //TODO: .Err()? - //// override the generated advancer, which is incorrect - //if pager.Err() != nil { - // return pager - //} - - pager.advancer = func(ctx context.Context, response serviceClientListContainersSegmentResponse) (*policy.Request, error) { - if response.ListContainersSegmentResponse.NextMarker == nil { - return nil, handleError(errors.New("unexpected missing NextMarker")) - } - req, err := s.client.listContainersSegmentCreateRequest(ctx, listOptions) - if err != nil { - return nil, handleError(err) - } - queryValues, _ := url.ParseQuery(req.Raw().URL.RawQuery) - queryValues.Set("marker", *response.ListContainersSegmentResponse.NextMarker) - - req.Raw().URL.RawQuery = queryValues.Encode() - return req, nil - } - - return toServiceListContainersSegmentPager(*pager) -} - -// GetProperties - gets the properties of a storage account's Blob service, including properties for Storage Analytics -// and CORS (Cross-Origin Resource Sharing) rules. -func (s *ServiceClient) GetProperties(ctx context.Context, o *ServiceGetPropertiesOptions) (ServiceGetPropertiesResponse, error) { - getPropertiesOptions := o.format() - resp, err := s.client.GetProperties(ctx, getPropertiesOptions) - - return toServiceGetPropertiesResponse(resp), handleError(err) -} - -// SetProperties Sets the properties of a storage account's Blob service, including Azure Storage Analytics. -// If an element (e.g. analytics_logging) is left as None, the existing settings on the service for that functionality are preserved. -func (s *ServiceClient) SetProperties(ctx context.Context, o *ServiceSetPropertiesOptions) (ServiceSetPropertiesResponse, error) { - properties, setPropertiesOptions := o.format() - resp, err := s.client.SetProperties(ctx, properties, setPropertiesOptions) - - return toServiceSetPropertiesResponse(resp), handleError(err) -} - -// GetStatistics Retrieves statistics related to replication for the Blob service. -// It is only available when read-access geo-redundant replication is enabled for the storage account. -// With geo-redundant replication, Azure Storage maintains your data durable -// in two locations. In both locations, Azure Storage constantly maintains -// multiple healthy replicas of your data. The location where you read, -// create, update, or delete data is the primary storage account location. -// The primary location exists in the region you choose at the time you -// create an account via the Azure Management Azure classic portal, for -// example, North Central US. The location to which your data is replicated -// is the secondary location. The secondary location is automatically -// determined based on the location of the primary; it is in a second data -// center that resides in the same region as the primary location. Read-only -// access is available from the secondary location, if read-access geo-redundant -// replication is enabled for your storage account. -func (s *ServiceClient) GetStatistics(ctx context.Context, o *ServiceGetStatisticsOptions) (ServiceGetStatisticsResponse, error) { - getStatisticsOptions := o.format() - resp, err := s.client.GetStatistics(ctx, getStatisticsOptions) - - return toServiceGetStatisticsResponse(resp), handleError(err) -} - -// CanGetAccountSASToken checks if shared key in ServiceClient is nil -func (s *ServiceClient) CanGetAccountSASToken() bool { - return s.sharedKey != nil -} - -// GetSASURL is a convenience method for generating a SAS token for the currently pointed at account. -// It can only be used if the credential supplied during creation was a SharedKeyCredential. -// This validity can be checked with CanGetAccountSASToken(). -func (s *ServiceClient) GetSASURL(resources AccountSASResourceTypes, permissions AccountSASPermissions, start time.Time, expiry time.Time) (string, error) { - if s.sharedKey == nil { - return "", errors.New("SAS can only be signed with a SharedKeyCredential") - } - - qps, err := AccountSASSignatureValues{ - Version: SASVersion, - Protocol: SASProtocolHTTPS, - Permissions: permissions.String(), - Services: "b", - ResourceTypes: resources.String(), - StartTime: start.UTC(), - ExpiryTime: expiry.UTC(), - }.Sign(s.sharedKey) - if err != nil { - return "", err - } - - endpoint := s.URL() - if !strings.HasSuffix(endpoint, "/") { - endpoint += "/" - } - endpoint += "?" + qps.Encode() - - return endpoint, nil -} - -// FindBlobsByTags operation finds all blobs in the storage account whose tags match a given search expression. -// Filter blobs searches across all containers within a storage account but can be scoped within the expression to a single container. -// https://docs.microsoft.com/en-us/rest/api/storageservices/find-blobs-by-tags -// eg. "dog='germanshepherd' and penguin='emperorpenguin'" -// To specify a container, eg. "@container=’containerName’ and Name = ‘C’" -func (s *ServiceClient) FindBlobsByTags(ctx context.Context, o *ServiceFilterBlobsOptions) (ServiceFilterBlobsResponse, error) { - // TODO: Use pager here? Missing support from zz_generated_pagers.go - serviceFilterBlobsOptions := o.pointer() - resp, err := s.client.FilterBlobs(ctx, serviceFilterBlobsOptions) - return toServiceFilterBlobsResponse(resp), err -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_storage_error.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_storage_error.go deleted file mode 100644 index 08c9c8730..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_storage_error.go +++ /dev/null @@ -1,236 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import ( - "bytes" - "encoding/xml" - "errors" - "fmt" - "net/http" - "sort" - "strings" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" -) - -// InternalError is an internal error type that all errors get wrapped in. -type InternalError struct { - cause error -} - -// Error checks if InternalError can be cast as StorageError -func (e *InternalError) Error() string { - if (errors.Is(e.cause, StorageError{})) { - return e.cause.Error() - } - - return fmt.Sprintf("===== INTERNAL ERROR =====\n%s", e.cause.Error()) -} - -// Is casts err into InternalError -func (e *InternalError) Is(err error) bool { - _, ok := err.(*InternalError) - - return ok -} - -// As casts target interface into InternalError -func (e *InternalError) As(target interface{}) bool { - nt, ok := target.(**InternalError) - - if ok { - *nt = e - return ok - } - - //goland:noinspection GoErrorsAs - return errors.As(e.cause, target) -} - -// StorageError is the internal struct that replaces the generated StorageError. -// TL;DR: This implements xml.Unmarshaler, and when the original StorageError is substituted, this unmarshaler kicks in. -// This handles the description and details. defunkifyStorageError handles the response, cause, and service code. -type StorageError struct { - response *http.Response - description string - - ErrorCode StorageErrorCode - details map[string]string -} - -func handleError(err error) error { - if err == nil { - return nil - } - var respErr *azcore.ResponseError - if errors.As(err, &respErr) { - return &InternalError{responseErrorToStorageError(respErr)} - } - - if err != nil { - return &InternalError{err} - } - - return nil -} - -// converts an *azcore.ResponseError to a *StorageError, or if that fails, a *InternalError -func responseErrorToStorageError(responseError *azcore.ResponseError) error { - var storageError StorageError - body, err := runtime.Payload(responseError.RawResponse) - if err != nil { - goto Default - } - if len(body) > 0 { - if err := xml.Unmarshal(body, &storageError); err != nil { - goto Default - } - } - - storageError.response = responseError.RawResponse - - storageError.ErrorCode = StorageErrorCode(responseError.RawResponse.Header.Get("x-ms-error-code")) - - if code, ok := storageError.details["Code"]; ok { - storageError.ErrorCode = StorageErrorCode(code) - delete(storageError.details, "Code") - } - - return &storageError - -Default: - return &InternalError{ - cause: responseError, - } -} - -// StatusCode returns service-error information. The caller may examine these values but should not modify any of them. -func (e *StorageError) StatusCode() int { - return e.response.StatusCode -} - -// Error implements the error interface's Error method to return a string representation of the error. -func (e StorageError) Error() string { - b := &bytes.Buffer{} - - if e.response != nil { - _, _ = fmt.Fprintf(b, "===== RESPONSE ERROR (ErrorCode=%s) =====\n", e.ErrorCode) - _, _ = fmt.Fprintf(b, "Description=%s, Details: ", e.description) - if len(e.details) == 0 { - b.WriteString("(none)\n") - } else { - b.WriteRune('\n') - keys := make([]string, 0, len(e.details)) - // Alphabetize the details - for k := range e.details { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - _, _ = fmt.Fprintf(b, " %s: %+v\n", k, e.details[k]) - } - } - // req := azcore.Request{Request: e.response.Request}.Copy() // Make a copy of the response's request - // TODO: Come Here Mohit Adele - //writeRequestWithResponse(b, &azcore.Request{Request: e.response.Request}, e.response) - } - - return b.String() - ///azcore.writeRequestWithResponse(b, prepareRequestForLogging(req), e.response, nil) - // return e.ErrorNode.Error(b.String()) -} - -// Is checks if err can be cast as StorageError -func (e StorageError) Is(err error) bool { - _, ok := err.(StorageError) - _, ok2 := err.(*StorageError) - - return ok || ok2 -} - -// Response returns StorageError.response -func (e StorageError) Response() *http.Response { - return e.response -} - -//nolint -func writeRequestWithResponse(b *bytes.Buffer, request *policy.Request, response *http.Response) { - // Write the request into the buffer. - _, _ = fmt.Fprint(b, " "+request.Raw().Method+" "+request.Raw().URL.String()+"\n") - writeHeader(b, request.Raw().Header) - if response != nil { - _, _ = fmt.Fprintln(b, " --------------------------------------------------------------------------------") - _, _ = fmt.Fprint(b, " RESPONSE Status: "+response.Status+"\n") - writeHeader(b, response.Header) - } -} - -// formatHeaders appends an HTTP request's or response's header into a Buffer. -//nolint -func writeHeader(b *bytes.Buffer, header map[string][]string) { - if len(header) == 0 { - b.WriteString(" (no headers)\n") - return - } - keys := make([]string, 0, len(header)) - // Alphabetize the headers - for k := range header { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - // Redact the value of any Authorization header to prevent security information from persisting in logs - value := interface{}("REDACTED") - if !strings.EqualFold(k, "Authorization") { - value = header[k] - } - _, _ = fmt.Fprintf(b, " %s: %+v\n", k, value) - } -} - -// Temporary returns true if the error occurred due to a temporary condition (including an HTTP status of 500 or 503). -func (e *StorageError) Temporary() bool { - if e.response != nil { - if (e.response.StatusCode == http.StatusInternalServerError) || (e.response.StatusCode == http.StatusServiceUnavailable) || (e.response.StatusCode == http.StatusBadGateway) { - return true - } - } - - return false -} - -// UnmarshalXML performs custom unmarshalling of XML-formatted Azure storage request errors. -//nolint -func (e *StorageError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (err error) { - tokName := "" - var t xml.Token - for t, err = d.Token(); err == nil; t, err = d.Token() { - switch tt := t.(type) { - case xml.StartElement: - tokName = tt.Name.Local - case xml.EndElement: - tokName = "" - case xml.CharData: - switch tokName { - case "": - continue - case "Message": - e.description = string(tt) - default: - if e.details == nil { - e.details = map[string]string{} - } - e.details[tokName] = string(tt) - } - } - } - - return nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_validators.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_validators.go deleted file mode 100644 index 341858f1a..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_validators.go +++ /dev/null @@ -1,107 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import ( - "errors" - "fmt" - "io" - "strconv" -) - -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - -// Raw converts PageRange into primitive start, end integers of type int64 -func (pr *PageRange) Raw() (start, end int64) { - if pr.Start != nil { - start = *pr.Start - } - if pr.End != nil { - end = *pr.End - } - - return -} - -// HttpRange defines a range of bytes within an HTTP resource, starting at offset and -// ending at offset+count. A zero-value HttpRange indicates the entire resource. An HttpRange -// which has an offset but na zero value count indicates from the offset to the resource's end. -type HttpRange struct { - Offset int64 - Count int64 -} - -func NewHttpRange(offset, count int64) *HttpRange { - return &HttpRange{Offset: offset, Count: count} -} - -func (r *HttpRange) format() *string { - if r == nil || (r.Offset == 0 && r.Count == 0) { // Do common case first for performance - return nil // No specified range - } - endOffset := "" // if count == CountToEnd (0) - if r.Count > 0 { - endOffset = strconv.FormatInt((r.Offset+r.Count)-1, 10) - } - dataRange := fmt.Sprintf("bytes=%v-%s", r.Offset, endOffset) - return &dataRange -} - -func getSourceRange(offset, count *int64) *string { - if offset == nil && count == nil { - return nil - } - newOffset := int64(0) - newCount := int64(CountToEnd) - - if offset != nil { - newOffset = *offset - } - - if count != nil { - newCount = *count - } - - return (&HttpRange{Offset: newOffset, Count: newCount}).format() -} - -func validateSeekableStreamAt0AndGetCount(body io.ReadSeeker) (int64, error) { - if body == nil { // nil body's are "logically" seekable to 0 and are 0 bytes long - return 0, nil - } - - err := validateSeekableStreamAt0(body) - if err != nil { - return 0, err - } - - count, err := body.Seek(0, io.SeekEnd) - if err != nil { - return 0, errors.New("body stream must be seekable") - } - - _, err = body.Seek(0, io.SeekStart) - if err != nil { - return 0, err - } - return count, nil -} - -// return an error if body is not a valid seekable stream at 0 -func validateSeekableStreamAt0(body io.ReadSeeker) error { - if body == nil { // nil body's are "logically" seekable to 0 - return nil - } - if pos, err := body.Seek(0, io.SeekCurrent); pos != 0 || err != nil { - // Help detect programmer error - if err != nil { - return errors.New("body stream must be seekable") - } - return errors.New("body stream must be set to position 0") - } - return nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_access_conditions.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_access_conditions.go deleted file mode 100644 index 93a2b1a70..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_access_conditions.go +++ /dev/null @@ -1,43 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -const ( - // ETagNone represents an empty entity tag. - ETagNone = "" - - // ETagAny matches any entity tag. - ETagAny = "*" -) - -// ContainerAccessConditions identifies container-specific access conditions which you optionally set. -type ContainerAccessConditions struct { - ModifiedAccessConditions *ModifiedAccessConditions - LeaseAccessConditions *LeaseAccessConditions -} - -func (ac *ContainerAccessConditions) format() (*ModifiedAccessConditions, *LeaseAccessConditions) { - if ac == nil { - return nil, nil - } - - return ac.ModifiedAccessConditions, ac.LeaseAccessConditions -} - -// BlobAccessConditions identifies blob-specific access conditions which you optionally set. -type BlobAccessConditions struct { - LeaseAccessConditions *LeaseAccessConditions - ModifiedAccessConditions *ModifiedAccessConditions -} - -func (ac *BlobAccessConditions) format() (*LeaseAccessConditions, *ModifiedAccessConditions) { - if ac == nil { - return nil, nil - } - - return ac.LeaseAccessConditions, ac.ModifiedAccessConditions -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_append_blob_client_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_append_blob_client_util.go deleted file mode 100644 index 19c3fef66..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_append_blob_client_util.go +++ /dev/null @@ -1,184 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import "time" - -// --------------------------------------------------------------------------------------------------------------------- - -// AppendBlobCreateOptions provides set of configurations for Create Append Blob operation -type AppendBlobCreateOptions struct { - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *BlobImmutabilityPolicyMode - // Specified if a legal hold should be set on the blob. - LegalHold *bool - - BlobAccessConditions *BlobAccessConditions - - HTTPHeaders *BlobHTTPHeaders - - CpkInfo *CpkInfo - - CpkScopeInfo *CpkScopeInfo - // Optional. Used to set blob tags in various blob operations. - TagsMap map[string]string - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs - // are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source - // blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. - // See Naming and Referencing Containers, Blobs, and Metadata for more information. - Metadata map[string]string -} - -func (o *AppendBlobCreateOptions) format() (*appendBlobClientCreateOptions, *BlobHTTPHeaders, *LeaseAccessConditions, - *CpkInfo, *CpkScopeInfo, *ModifiedAccessConditions) { - - if o == nil { - return nil, nil, nil, nil, nil, nil - } - - options := appendBlobClientCreateOptions{ - BlobTagsString: serializeBlobTagsToStrPtr(o.TagsMap), - Metadata: o.Metadata, - ImmutabilityPolicyExpiry: o.ImmutabilityPolicyExpiry, - ImmutabilityPolicyMode: o.ImmutabilityPolicyMode, - LegalHold: o.LegalHold, - } - - leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() - return &options, o.HTTPHeaders, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions -} - -// AppendBlobCreateResponse contains the response from method AppendBlobClient.Create. -type AppendBlobCreateResponse struct { - appendBlobClientCreateResponse -} - -func toAppendBlobCreateResponse(resp appendBlobClientCreateResponse) AppendBlobCreateResponse { - return AppendBlobCreateResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// AppendBlobAppendBlockOptions provides set of configurations for AppendBlock operation -type AppendBlobAppendBlockOptions struct { - // Specify the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 []byte - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte - - AppendPositionAccessConditions *AppendPositionAccessConditions - - CpkInfo *CpkInfo - - CpkScopeInfo *CpkScopeInfo - - BlobAccessConditions *BlobAccessConditions -} - -func (o *AppendBlobAppendBlockOptions) format() (*appendBlobClientAppendBlockOptions, *AppendPositionAccessConditions, *CpkInfo, *CpkScopeInfo, *ModifiedAccessConditions, *LeaseAccessConditions) { - if o == nil { - return nil, nil, nil, nil, nil, nil - } - - options := &appendBlobClientAppendBlockOptions{ - TransactionalContentCRC64: o.TransactionalContentCRC64, - TransactionalContentMD5: o.TransactionalContentMD5, - } - leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() - return options, o.AppendPositionAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions, leaseAccessConditions -} - -// AppendBlobAppendBlockResponse contains the response from method AppendBlobClient.AppendBlock. -type AppendBlobAppendBlockResponse struct { - appendBlobClientAppendBlockResponse -} - -func toAppendBlobAppendBlockResponse(resp appendBlobClientAppendBlockResponse) AppendBlobAppendBlockResponse { - return AppendBlobAppendBlockResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// AppendBlobAppendBlockFromURLOptions provides set of configurations for AppendBlockFromURL operation -type AppendBlobAppendBlockFromURLOptions struct { - // Specify the md5 calculated for the range of bytes that must be read from the copy source. - SourceContentMD5 []byte - // Specify the crc64 calculated for the range of bytes that must be read from the copy source. - SourceContentCRC64 []byte - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte - - AppendPositionAccessConditions *AppendPositionAccessConditions - - CpkInfo *CpkInfo - - CpkScopeInfo *CpkScopeInfo - - SourceModifiedAccessConditions *SourceModifiedAccessConditions - - BlobAccessConditions *BlobAccessConditions - // Optional, you can specify whether a particular range of the blob is read - Offset *int64 - - Count *int64 -} - -func (o *AppendBlobAppendBlockFromURLOptions) format() (*appendBlobClientAppendBlockFromURLOptions, *CpkInfo, *CpkScopeInfo, *LeaseAccessConditions, *AppendPositionAccessConditions, *ModifiedAccessConditions, *SourceModifiedAccessConditions) { - if o == nil { - return nil, nil, nil, nil, nil, nil, nil - } - - options := &appendBlobClientAppendBlockFromURLOptions{ - SourceRange: getSourceRange(o.Offset, o.Count), - SourceContentMD5: o.SourceContentMD5, - SourceContentcrc64: o.SourceContentCRC64, - TransactionalContentMD5: o.TransactionalContentMD5, - } - - leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() - return options, o.CpkInfo, o.CpkScopeInfo, leaseAccessConditions, o.AppendPositionAccessConditions, modifiedAccessConditions, o.SourceModifiedAccessConditions -} - -// AppendBlobAppendBlockFromURLResponse contains the response from method AppendBlobClient.AppendBlockFromURL. -type AppendBlobAppendBlockFromURLResponse struct { - appendBlobClientAppendBlockFromURLResponse -} - -func toAppendBlobAppendBlockFromURLResponse(resp appendBlobClientAppendBlockFromURLResponse) AppendBlobAppendBlockFromURLResponse { - return AppendBlobAppendBlockFromURLResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// AppendBlobSealOptions provides set of configurations for SealAppendBlob operation -type AppendBlobSealOptions struct { - BlobAccessConditions *BlobAccessConditions - AppendPositionAccessConditions *AppendPositionAccessConditions -} - -func (o *AppendBlobSealOptions) format() (leaseAccessConditions *LeaseAccessConditions, - modifiedAccessConditions *ModifiedAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions) { - if o == nil { - return nil, nil, nil - } - - return -} - -// AppendBlobSealResponse contains the response from method AppendBlobClient.Seal. -type AppendBlobSealResponse struct { - appendBlobClientSealResponse -} - -func toAppendBlobSealResponse(resp appendBlobClientSealResponse) AppendBlobSealResponse { - return AppendBlobSealResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_blob_client_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_blob_client_util.go deleted file mode 100644 index f4425b18c..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_blob_client_util.go +++ /dev/null @@ -1,478 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import ( - "context" - "io" - "net/http" - "time" -) - -// --------------------------------------------------------------------------------------------------------------------- - -// BlobDownloadOptions provides set of configurations for Download blob operation -type BlobDownloadOptions struct { - // When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the - // range is less than or equal to 4 MB in size. - RangeGetContentMD5 *bool - - // Optional, you can specify whether a particular range of the blob is read - Offset *int64 - Count *int64 - - BlobAccessConditions *BlobAccessConditions - CpkInfo *CpkInfo - CpkScopeInfo *CpkScopeInfo -} - -func (o *BlobDownloadOptions) format() (*blobClientDownloadOptions, *LeaseAccessConditions, *CpkInfo, *ModifiedAccessConditions) { - if o == nil { - return nil, nil, nil, nil - } - - offset := int64(0) - count := int64(CountToEnd) - - if o.Offset != nil { - offset = *o.Offset - } - - if o.Count != nil { - count = *o.Count - } - - basics := blobClientDownloadOptions{ - RangeGetContentMD5: o.RangeGetContentMD5, - Range: (&HttpRange{Offset: offset, Count: count}).format(), - } - - leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() - return &basics, leaseAccessConditions, o.CpkInfo, modifiedAccessConditions -} - -// BlobDownloadResponse wraps AutoRest generated BlobDownloadResponse and helps to provide info for retry. -type BlobDownloadResponse struct { - blobClientDownloadResponse - ctx context.Context - b *BlobClient - getInfo HTTPGetterInfo - ObjectReplicationRules []ObjectReplicationPolicy -} - -// Body constructs new RetryReader stream for reading data. If a connection fails -// while reading, it will make additional requests to reestablish a connection and -// continue reading. Specifying a RetryReaderOption's with MaxRetryRequests set to 0 -// (the default), returns the original response body and no retries will be performed. -// Pass in nil for options to accept the default options. -func (r *BlobDownloadResponse) Body(options *RetryReaderOptions) io.ReadCloser { - if options == nil { - options = &RetryReaderOptions{} - } - - if options.MaxRetryRequests == 0 { // No additional retries - return r.RawResponse.Body - } - return NewRetryReader(r.ctx, r.RawResponse, r.getInfo, *options, - func(ctx context.Context, getInfo HTTPGetterInfo) (*http.Response, error) { - accessConditions := &BlobAccessConditions{ - ModifiedAccessConditions: &ModifiedAccessConditions{IfMatch: &getInfo.ETag}, - } - options := BlobDownloadOptions{ - Offset: &getInfo.Offset, - Count: &getInfo.Count, - BlobAccessConditions: accessConditions, - CpkInfo: options.CpkInfo, - //CpkScopeInfo: o.CpkScopeInfo, - } - resp, err := r.b.Download(ctx, &options) - if err != nil { - return nil, err - } - return resp.RawResponse, err - }, - ) -} - -// --------------------------------------------------------------------------------------------------------------------- - -// BlobDeleteOptions provides set of configurations for Delete blob operation -type BlobDeleteOptions struct { - // Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob - // and all of its snapshots. only: Delete only the blob's snapshots and not the blob itself - DeleteSnapshots *DeleteSnapshotsOptionType - BlobAccessConditions *BlobAccessConditions -} - -func (o *BlobDeleteOptions) format() (*blobClientDeleteOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { - if o == nil { - return nil, nil, nil - } - - basics := blobClientDeleteOptions{ - DeleteSnapshots: o.DeleteSnapshots, - } - - if o.BlobAccessConditions == nil { - return &basics, nil, nil - } - - return &basics, o.BlobAccessConditions.LeaseAccessConditions, o.BlobAccessConditions.ModifiedAccessConditions -} - -// BlobDeleteResponse contains the response from method BlobClient.Delete. -type BlobDeleteResponse struct { - blobClientDeleteResponse -} - -func toBlobDeleteResponse(resp blobClientDeleteResponse) BlobDeleteResponse { - return BlobDeleteResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// BlobUndeleteOptions provides set of configurations for Blob Undelete operation -type BlobUndeleteOptions struct { -} - -func (o *BlobUndeleteOptions) format() *blobClientUndeleteOptions { - return nil -} - -// BlobUndeleteResponse contains the response from method BlobClient.Undelete. -type BlobUndeleteResponse struct { - blobClientUndeleteResponse -} - -func toBlobUndeleteResponse(resp blobClientUndeleteResponse) BlobUndeleteResponse { - return BlobUndeleteResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// BlobSetTierOptions provides set of configurations for SetTier on blob operation -type BlobSetTierOptions struct { - // Optional: Indicates the priority with which to rehydrate an archived blob. - RehydratePriority *RehydratePriority - - LeaseAccessConditions *LeaseAccessConditions - ModifiedAccessConditions *ModifiedAccessConditions -} - -func (o *BlobSetTierOptions) format() (*blobClientSetTierOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { - if o == nil { - return nil, nil, nil - } - - basics := blobClientSetTierOptions{RehydratePriority: o.RehydratePriority} - return &basics, o.LeaseAccessConditions, o.ModifiedAccessConditions -} - -// BlobSetTierResponse contains the response from method BlobClient.SetTier. -type BlobSetTierResponse struct { - blobClientSetTierResponse -} - -func toBlobSetTierResponse(resp blobClientSetTierResponse) BlobSetTierResponse { - return BlobSetTierResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// BlobGetPropertiesOptions provides set of configurations for GetProperties blob operation -type BlobGetPropertiesOptions struct { - BlobAccessConditions *BlobAccessConditions - CpkInfo *CpkInfo -} - -func (o *BlobGetPropertiesOptions) format() (blobClientGetPropertiesOptions *blobClientGetPropertiesOptions, - leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) { - if o == nil { - return nil, nil, nil, nil - } - - leaseAccessConditions, modifiedAccessConditions = o.BlobAccessConditions.format() - return nil, leaseAccessConditions, o.CpkInfo, modifiedAccessConditions -} - -// ObjectReplicationRules struct -type ObjectReplicationRules struct { - RuleId string - Status string -} - -// ObjectReplicationPolicy are deserialized attributes -type ObjectReplicationPolicy struct { - PolicyId *string - Rules *[]ObjectReplicationRules -} - -// BlobGetPropertiesResponse reformat the GetPropertiesResponse object for easy consumption -type BlobGetPropertiesResponse struct { - blobClientGetPropertiesResponse - - // deserialized attributes - ObjectReplicationRules []ObjectReplicationPolicy -} - -func toGetBlobPropertiesResponse(resp blobClientGetPropertiesResponse) BlobGetPropertiesResponse { - getResp := BlobGetPropertiesResponse{ - blobClientGetPropertiesResponse: resp, - ObjectReplicationRules: deserializeORSPolicies(resp.ObjectReplicationRules), - } - return getResp -} - -// --------------------------------------------------------------------------------------------------------------------- - -// BlobSetHTTPHeadersOptions provides set of configurations for SetHTTPHeaders on blob operation -type BlobSetHTTPHeadersOptions struct { - LeaseAccessConditions *LeaseAccessConditions - ModifiedAccessConditions *ModifiedAccessConditions -} - -func (o *BlobSetHTTPHeadersOptions) format() (*blobClientSetHTTPHeadersOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { - if o == nil { - return nil, nil, nil - } - - return nil, o.LeaseAccessConditions, o.ModifiedAccessConditions -} - -// BlobSetHTTPHeadersResponse contains the response from method BlobClient.SetHTTPHeaders. -type BlobSetHTTPHeadersResponse struct { - blobClientSetHTTPHeadersResponse -} - -func toBlobSetHTTPHeadersResponse(resp blobClientSetHTTPHeadersResponse) BlobSetHTTPHeadersResponse { - return BlobSetHTTPHeadersResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// BlobSetMetadataOptions provides set of configurations for Set Metadata on blob operation -type BlobSetMetadataOptions struct { - LeaseAccessConditions *LeaseAccessConditions - CpkInfo *CpkInfo - CpkScopeInfo *CpkScopeInfo - ModifiedAccessConditions *ModifiedAccessConditions -} - -func (o *BlobSetMetadataOptions) format() (leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, - cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) { - if o == nil { - return nil, nil, nil, nil - } - - return o.LeaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, o.ModifiedAccessConditions -} - -// BlobSetMetadataResponse contains the response from method BlobClient.SetMetadata. -type BlobSetMetadataResponse struct { - blobClientSetMetadataResponse -} - -func toBlobSetMetadataResponse(resp blobClientSetMetadataResponse) BlobSetMetadataResponse { - return BlobSetMetadataResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// BlobCreateSnapshotOptions provides set of configurations for CreateSnapshot of blob operation -type BlobCreateSnapshotOptions struct { - Metadata map[string]string - LeaseAccessConditions *LeaseAccessConditions - CpkInfo *CpkInfo - CpkScopeInfo *CpkScopeInfo - ModifiedAccessConditions *ModifiedAccessConditions -} - -func (o *BlobCreateSnapshotOptions) format() (blobSetMetadataOptions *blobClientCreateSnapshotOptions, cpkInfo *CpkInfo, - cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) { - if o == nil { - return nil, nil, nil, nil, nil - } - - basics := blobClientCreateSnapshotOptions{ - Metadata: o.Metadata, - } - - return &basics, o.CpkInfo, o.CpkScopeInfo, o.ModifiedAccessConditions, o.LeaseAccessConditions -} - -// BlobCreateSnapshotResponse contains the response from method BlobClient.CreateSnapshot -type BlobCreateSnapshotResponse struct { - blobClientCreateSnapshotResponse -} - -func toBlobCreateSnapshotResponse(resp blobClientCreateSnapshotResponse) BlobCreateSnapshotResponse { - return BlobCreateSnapshotResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// BlobStartCopyOptions provides set of configurations for StartCopyFromURL blob operation -type BlobStartCopyOptions struct { - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *BlobImmutabilityPolicyMode - // Specified if a legal hold should be set on the blob. - LegalHold *bool - // Optional. Used to set blob tags in various blob operations. - TagsMap map[string]string - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs - // are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source - // blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. - // See Naming and Referencing Containers, Blobs, and Metadata for more information. - Metadata map[string]string - // Optional: Indicates the priority with which to rehydrate an archived blob. - RehydratePriority *RehydratePriority - // Overrides the sealed state of the destination blob. Service version 2019-12-12 and newer. - SealBlob *bool - // Optional. Indicates the tier to be set on the blob. - Tier *AccessTier - - SourceModifiedAccessConditions *SourceModifiedAccessConditions - - ModifiedAccessConditions *ModifiedAccessConditions - - LeaseAccessConditions *LeaseAccessConditions -} - -func (o *BlobStartCopyOptions) format() (blobStartCopyFromUrlOptions *blobClientStartCopyFromURLOptions, - sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) { - if o == nil { - return nil, nil, nil, nil - } - - basics := blobClientStartCopyFromURLOptions{ - BlobTagsString: serializeBlobTagsToStrPtr(o.TagsMap), - Metadata: o.Metadata, - RehydratePriority: o.RehydratePriority, - SealBlob: o.SealBlob, - Tier: o.Tier, - ImmutabilityPolicyExpiry: o.ImmutabilityPolicyExpiry, - ImmutabilityPolicyMode: o.ImmutabilityPolicyMode, - LegalHold: o.LegalHold, - } - - return &basics, o.SourceModifiedAccessConditions, o.ModifiedAccessConditions, o.LeaseAccessConditions -} - -// BlobStartCopyFromURLResponse contains the response from method BlobClient.StartCopyFromURL. -type BlobStartCopyFromURLResponse struct { - blobClientStartCopyFromURLResponse -} - -func toBlobStartCopyFromURLResponse(resp blobClientStartCopyFromURLResponse) BlobStartCopyFromURLResponse { - return BlobStartCopyFromURLResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// BlobAbortCopyOptions provides set of configurations for AbortCopyFromURL operation -type BlobAbortCopyOptions struct { - LeaseAccessConditions *LeaseAccessConditions -} - -func (o *BlobAbortCopyOptions) format() (blobAbortCopyFromUrlOptions *blobClientAbortCopyFromURLOptions, - leaseAccessConditions *LeaseAccessConditions) { - if o == nil { - return nil, nil - } - return nil, o.LeaseAccessConditions -} - -// BlobAbortCopyFromURLResponse contains the response from method BlobClient.AbortCopyFromURL -type BlobAbortCopyFromURLResponse struct { - blobClientAbortCopyFromURLResponse -} - -func toBlobAbortCopyFromURLResponse(resp blobClientAbortCopyFromURLResponse) BlobAbortCopyFromURLResponse { - return BlobAbortCopyFromURLResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// BlobSetTagsOptions provides set of configurations for SetTags operation -type BlobSetTagsOptions struct { - // The version id parameter is an opaque DateTime value that, when present, - // specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - VersionID *string - // Optional header, Specifies the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 []byte - // Optional header, Specifies the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte - - TagsMap map[string]string - - ModifiedAccessConditions *ModifiedAccessConditions - LeaseAccessConditions *LeaseAccessConditions -} - -func (o *BlobSetTagsOptions) format() (*blobClientSetTagsOptions, *ModifiedAccessConditions, *LeaseAccessConditions) { - if o == nil { - return nil, nil, nil - } - - options := &blobClientSetTagsOptions{ - Tags: serializeBlobTags(o.TagsMap), - TransactionalContentMD5: o.TransactionalContentMD5, - TransactionalContentCRC64: o.TransactionalContentCRC64, - VersionID: o.VersionID, - } - - return options, o.ModifiedAccessConditions, o.LeaseAccessConditions -} - -// BlobSetTagsResponse contains the response from method BlobClient.SetTags -type BlobSetTagsResponse struct { - blobClientSetTagsResponse -} - -func toBlobSetTagsResponse(resp blobClientSetTagsResponse) BlobSetTagsResponse { - return BlobSetTagsResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// BlobGetTagsOptions provides set of configurations for GetTags operation -type BlobGetTagsOptions struct { - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. - Snapshot *string - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string - - BlobAccessConditions *BlobAccessConditions -} - -func (o *BlobGetTagsOptions) format() (*blobClientGetTagsOptions, *ModifiedAccessConditions, *LeaseAccessConditions) { - if o == nil { - return nil, nil, nil - } - - options := &blobClientGetTagsOptions{ - Snapshot: o.Snapshot, - VersionID: o.VersionID, - } - - leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() - - return options, modifiedAccessConditions, leaseAccessConditions -} - -// BlobGetTagsResponse contains the response from method BlobClient.GetTags -type BlobGetTagsResponse struct { - blobClientGetTagsResponse -} - -func toBlobGetTagsResponse(resp blobClientGetTagsResponse) BlobGetTagsResponse { - return BlobGetTagsResponse{resp} -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_blob_lease_client_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_blob_lease_client_util.go deleted file mode 100644 index 4e574622c..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_blob_lease_client_util.go +++ /dev/null @@ -1,160 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import ( - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" -) - -// --------------------------------------------------------------------------------------------------------------------- - -// BlobAcquireLeaseOptions provides set of configurations for AcquireLeaseBlob operation -type BlobAcquireLeaseOptions struct { - // Specifies the Duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite lease - // can be between 15 and 60 seconds. A lease Duration cannot be changed using renew or change. - Duration *int32 - - ModifiedAccessConditions *ModifiedAccessConditions -} - -func (o *BlobAcquireLeaseOptions) format() (blobClientAcquireLeaseOptions, *ModifiedAccessConditions) { - if o == nil { - return blobClientAcquireLeaseOptions{}, nil - } - return blobClientAcquireLeaseOptions{ - Duration: o.Duration, - }, o.ModifiedAccessConditions -} - -// BlobAcquireLeaseResponse contains the response from method BlobLeaseClient.AcquireLease. -type BlobAcquireLeaseResponse struct { - blobClientAcquireLeaseResponse -} - -func toBlobAcquireLeaseResponse(resp blobClientAcquireLeaseResponse) BlobAcquireLeaseResponse { - return BlobAcquireLeaseResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// BlobBreakLeaseOptions provides set of configurations for BreakLeaseBlob operation -type BlobBreakLeaseOptions struct { - // For a break operation, proposed Duration the lease should continue before it is broken, in seconds, between 0 and 60. This - // break period is only used if it is shorter than the time remaining on the lease. If longer, the time remaining on the lease - // is used. A new lease will not be available before the break period has expired, but the lease may be held for longer than - // the break period. If this header does not appear with a break operation, a fixed-Duration lease breaks after the remaining - // lease period elapses, and an infinite lease breaks immediately. - BreakPeriod *int32 - ModifiedAccessConditions *ModifiedAccessConditions -} - -func (o *BlobBreakLeaseOptions) format() (*blobClientBreakLeaseOptions, *ModifiedAccessConditions) { - if o == nil { - return nil, nil - } - - if o.BreakPeriod != nil { - period := leasePeriodPointer(*o.BreakPeriod) - return &blobClientBreakLeaseOptions{ - BreakPeriod: period, - }, o.ModifiedAccessConditions - } - - return nil, o.ModifiedAccessConditions -} - -// BlobBreakLeaseResponse contains the response from method BlobLeaseClient.BreakLease. -type BlobBreakLeaseResponse struct { - blobClientBreakLeaseResponse -} - -func toBlobBreakLeaseResponse(resp blobClientBreakLeaseResponse) BlobBreakLeaseResponse { - return BlobBreakLeaseResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// BlobChangeLeaseOptions provides set of configurations for ChangeLeaseBlob operation -type BlobChangeLeaseOptions struct { - ProposedLeaseID *string - ModifiedAccessConditions *ModifiedAccessConditions -} - -func (o *BlobChangeLeaseOptions) format() (*string, *blobClientChangeLeaseOptions, *ModifiedAccessConditions, error) { - generatedUuid, err := uuid.New() - if err != nil { - return nil, nil, nil, err - } - leaseID := to.Ptr(generatedUuid.String()) - if o == nil { - return leaseID, nil, nil, nil - } - - if o.ProposedLeaseID == nil { - o.ProposedLeaseID = leaseID - } - - return o.ProposedLeaseID, nil, o.ModifiedAccessConditions, nil -} - -// BlobChangeLeaseResponse contains the response from method BlobLeaseClient.ChangeLease -type BlobChangeLeaseResponse struct { - blobClientChangeLeaseResponse -} - -func toBlobChangeLeaseResponse(resp blobClientChangeLeaseResponse) BlobChangeLeaseResponse { - return BlobChangeLeaseResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// BlobRenewLeaseOptions provides set of configurations for RenewLeaseBlob operation -type BlobRenewLeaseOptions struct { - ModifiedAccessConditions *ModifiedAccessConditions -} - -func (o *BlobRenewLeaseOptions) format() (*blobClientRenewLeaseOptions, *ModifiedAccessConditions) { - if o == nil { - return nil, nil - } - - return nil, o.ModifiedAccessConditions -} - -// BlobRenewLeaseResponse contains the response from method BlobClient.RenewLease. -type BlobRenewLeaseResponse struct { - blobClientRenewLeaseResponse -} - -func toBlobRenewLeaseResponse(resp blobClientRenewLeaseResponse) BlobRenewLeaseResponse { - return BlobRenewLeaseResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// ReleaseLeaseBlobOptions provides set of configurations for ReleaseLeaseBlob operation -type ReleaseLeaseBlobOptions struct { - ModifiedAccessConditions *ModifiedAccessConditions -} - -func (o *ReleaseLeaseBlobOptions) format() (*blobClientReleaseLeaseOptions, *ModifiedAccessConditions) { - if o == nil { - return nil, nil - } - - return nil, o.ModifiedAccessConditions -} - -// BlobReleaseLeaseResponse contains the response from method BlobClient.ReleaseLease. -type BlobReleaseLeaseResponse struct { - blobClientReleaseLeaseResponse -} - -func toBlobReleaseLeaseResponse(resp blobClientReleaseLeaseResponse) BlobReleaseLeaseResponse { - return BlobReleaseLeaseResponse{resp} -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_block_blob_client_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_block_blob_client_util.go deleted file mode 100644 index 06d436855..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_block_blob_client_util.go +++ /dev/null @@ -1,272 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import "time" - -// --------------------------------------------------------------------------------------------------------------------- - -// BlockBlobUploadOptions provides set of configurations for UploadBlockBlob operation -type BlockBlobUploadOptions struct { - // Optional. Used to set blob tags in various blob operations. - TagsMap map[string]string - - // Optional. Specifies a user-defined name-value pair associated with the blob. - Metadata map[string]string - - // Optional. Indicates the tier to be set on the blob. - Tier *AccessTier - - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte - - HTTPHeaders *BlobHTTPHeaders - CpkInfo *CpkInfo - CpkScopeInfo *CpkScopeInfo - BlobAccessConditions *BlobAccessConditions -} - -func (o *BlockBlobUploadOptions) format() (*blockBlobClientUploadOptions, *BlobHTTPHeaders, *LeaseAccessConditions, - *CpkInfo, *CpkScopeInfo, *ModifiedAccessConditions) { - if o == nil { - return nil, nil, nil, nil, nil, nil - } - - basics := blockBlobClientUploadOptions{ - BlobTagsString: serializeBlobTagsToStrPtr(o.TagsMap), - Metadata: o.Metadata, - Tier: o.Tier, - TransactionalContentMD5: o.TransactionalContentMD5, - } - - leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() - return &basics, o.HTTPHeaders, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions -} - -// BlockBlobUploadResponse contains the response from method BlockBlobClient.Upload. -type BlockBlobUploadResponse struct { - blockBlobClientUploadResponse -} - -func toBlockBlobUploadResponse(resp blockBlobClientUploadResponse) BlockBlobUploadResponse { - return BlockBlobUploadResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// BlockBlobStageBlockOptions provides set of configurations for StageBlock operation -type BlockBlobStageBlockOptions struct { - CpkInfo *CpkInfo - - CpkScopeInfo *CpkScopeInfo - - LeaseAccessConditions *LeaseAccessConditions - // Specify the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 []byte - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - -func (o *BlockBlobStageBlockOptions) format() (*blockBlobClientStageBlockOptions, *LeaseAccessConditions, *CpkInfo, *CpkScopeInfo) { - if o == nil { - return nil, nil, nil, nil - } - - return &blockBlobClientStageBlockOptions{ - TransactionalContentCRC64: o.TransactionalContentCRC64, - TransactionalContentMD5: o.TransactionalContentMD5, - }, o.LeaseAccessConditions, o.CpkInfo, o.CpkScopeInfo -} - -// BlockBlobStageBlockResponse contains the response from method BlockBlobClient.StageBlock. -type BlockBlobStageBlockResponse struct { - blockBlobClientStageBlockResponse -} - -func toBlockBlobStageBlockResponse(resp blockBlobClientStageBlockResponse) BlockBlobStageBlockResponse { - return BlockBlobStageBlockResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// BlockBlobStageBlockFromURLOptions provides set of configurations for StageBlockFromURL operation -type BlockBlobStageBlockFromURLOptions struct { - // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. - CopySourceAuthorization *string - - LeaseAccessConditions *LeaseAccessConditions - - SourceModifiedAccessConditions *SourceModifiedAccessConditions - // Specify the md5 calculated for the range of bytes that must be read from the copy source. - SourceContentMD5 []byte - // Specify the crc64 calculated for the range of bytes that must be read from the copy source. - SourceContentCRC64 []byte - - Offset *int64 - - Count *int64 - - CpkInfo *CpkInfo - - CpkScopeInfo *CpkScopeInfo -} - -func (o *BlockBlobStageBlockFromURLOptions) format() (*blockBlobClientStageBlockFromURLOptions, *CpkInfo, *CpkScopeInfo, *LeaseAccessConditions, *SourceModifiedAccessConditions) { - if o == nil { - return nil, nil, nil, nil, nil - } - - options := &blockBlobClientStageBlockFromURLOptions{ - CopySourceAuthorization: o.CopySourceAuthorization, - SourceContentMD5: o.SourceContentMD5, - SourceContentcrc64: o.SourceContentCRC64, - SourceRange: getSourceRange(o.Offset, o.Count), - } - - return options, o.CpkInfo, o.CpkScopeInfo, o.LeaseAccessConditions, o.SourceModifiedAccessConditions -} - -// BlockBlobStageBlockFromURLResponse contains the response from method BlockBlobClient.StageBlockFromURL. -type BlockBlobStageBlockFromURLResponse struct { - blockBlobClientStageBlockFromURLResponse -} - -func toBlockBlobStageBlockFromURLResponse(resp blockBlobClientStageBlockFromURLResponse) BlockBlobStageBlockFromURLResponse { - return BlockBlobStageBlockFromURLResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// BlockBlobCommitBlockListOptions provides set of configurations for CommitBlockList operation -type BlockBlobCommitBlockListOptions struct { - BlobTagsMap map[string]string - Metadata map[string]string - RequestID *string - Tier *AccessTier - Timeout *int32 - TransactionalContentCRC64 []byte - TransactionalContentMD5 []byte - BlobHTTPHeaders *BlobHTTPHeaders - CpkInfo *CpkInfo - CpkScopeInfo *CpkScopeInfo - BlobAccessConditions *BlobAccessConditions -} - -func (o *BlockBlobCommitBlockListOptions) format() (*blockBlobClientCommitBlockListOptions, *BlobHTTPHeaders, *LeaseAccessConditions, *CpkInfo, *CpkScopeInfo, *ModifiedAccessConditions) { - if o == nil { - return nil, nil, nil, nil, nil, nil - } - - options := &blockBlobClientCommitBlockListOptions{ - BlobTagsString: serializeBlobTagsToStrPtr(o.BlobTagsMap), - Metadata: o.Metadata, - RequestID: o.RequestID, - Tier: o.Tier, - Timeout: o.Timeout, - TransactionalContentCRC64: o.TransactionalContentCRC64, - TransactionalContentMD5: o.TransactionalContentMD5, - } - leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() - return options, o.BlobHTTPHeaders, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions -} - -// BlockBlobCommitBlockListResponse contains the response from method BlockBlobClient.CommitBlockList. -type BlockBlobCommitBlockListResponse struct { - blockBlobClientCommitBlockListResponse -} - -func toBlockBlobCommitBlockListResponse(resp blockBlobClientCommitBlockListResponse) BlockBlobCommitBlockListResponse { - return BlockBlobCommitBlockListResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// BlockBlobGetBlockListOptions provides set of configurations for GetBlockList operation -type BlockBlobGetBlockListOptions struct { - Snapshot *string - BlobAccessConditions *BlobAccessConditions -} - -func (o *BlockBlobGetBlockListOptions) format() (*blockBlobClientGetBlockListOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { - if o == nil { - return nil, nil, nil - } - - leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() - return &blockBlobClientGetBlockListOptions{Snapshot: o.Snapshot}, leaseAccessConditions, modifiedAccessConditions -} - -// BlockBlobGetBlockListResponse contains the response from method BlockBlobClient.GetBlockList. -type BlockBlobGetBlockListResponse struct { - blockBlobClientGetBlockListResponse -} - -func toBlockBlobGetBlockListResponse(resp blockBlobClientGetBlockListResponse) BlockBlobGetBlockListResponse { - return BlockBlobGetBlockListResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// BlockBlobCopyFromURLOptions provides set of configurations for CopyBlockBlobFromURL operation -type BlockBlobCopyFromURLOptions struct { - // Optional. Used to set blob tags in various blob operations. - BlobTagsMap map[string]string - // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. - CopySourceAuthorization *string - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *BlobImmutabilityPolicyMode - // Specified if a legal hold should be set on the blob. - LegalHold *bool - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]string - // Specify the md5 calculated for the range of bytes that must be read from the copy source. - SourceContentMD5 []byte - // Optional. Indicates the tier to be set on the blob. - Tier *AccessTier - - SourceModifiedAccessConditions *SourceModifiedAccessConditions - - BlobAccessConditions *BlobAccessConditions -} - -func (o *BlockBlobCopyFromURLOptions) format() (*blobClientCopyFromURLOptions, *SourceModifiedAccessConditions, *ModifiedAccessConditions, *LeaseAccessConditions) { - if o == nil { - return nil, nil, nil, nil - } - - options := &blobClientCopyFromURLOptions{ - BlobTagsString: serializeBlobTagsToStrPtr(o.BlobTagsMap), - CopySourceAuthorization: o.CopySourceAuthorization, - ImmutabilityPolicyExpiry: o.ImmutabilityPolicyExpiry, - ImmutabilityPolicyMode: o.ImmutabilityPolicyMode, - LegalHold: o.LegalHold, - Metadata: o.Metadata, - SourceContentMD5: o.SourceContentMD5, - Tier: o.Tier, - } - - leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() - return options, o.SourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions -} - -// BlockBlobCopyFromURLResponse contains the response from method BlockBlobClient.CopyFromURL. -type BlockBlobCopyFromURLResponse struct { - blobClientCopyFromURLResponse -} - -func toBlockBlobCopyFromURLResponse(resp blobClientCopyFromURLResponse) BlockBlobCopyFromURLResponse { - return BlockBlobCopyFromURLResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_client_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_client_util.go deleted file mode 100644 index 657a767dd..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_client_util.go +++ /dev/null @@ -1,55 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import ( - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" -) - -// ClientOptions adds additional client options while constructing connection -type ClientOptions struct { - // Logging configures the built-in logging policy. - Logging policy.LogOptions - - // Retry configures the built-in retry policy. - Retry policy.RetryOptions - - // Telemetry configures the built-in telemetry policy. - Telemetry policy.TelemetryOptions - - // Transport sets the transport for HTTP requests. - Transport policy.Transporter - - // PerCallPolicies contains custom policies to inject into the pipeline. - // Each policy is executed once per request. - PerCallPolicies []policy.Policy - - // PerRetryPolicies contains custom policies to inject into the pipeline. - // Each policy is executed once per request, and for each retry of that request. - PerRetryPolicies []policy.Policy -} - -func (c *ClientOptions) toPolicyOptions() *azcore.ClientOptions { - return &azcore.ClientOptions{ - Logging: c.Logging, - Retry: c.Retry, - Telemetry: c.Telemetry, - Transport: c.Transport, - PerCallPolicies: c.PerCallPolicies, - PerRetryPolicies: c.PerRetryPolicies, - } -} - -// --------------------------------------------------------------------------------------------------------------------- - -func getConnectionOptions(options *ClientOptions) *policy.ClientOptions { - if options == nil { - options = &ClientOptions{} - } - return options.toPolicyOptions() -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_container_client_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_container_client_util.go deleted file mode 100644 index a33103e4b..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_container_client_util.go +++ /dev/null @@ -1,271 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -// --------------------------------------------------------------------------------------------------------------------- - -// ContainerCreateOptions provides set of configurations for CreateContainer operation -type ContainerCreateOptions struct { - // Specifies whether data in the container may be accessed publicly and the level of access - Access *PublicAccessType - - // Optional. Specifies a user-defined name-value pair associated with the blob. - Metadata map[string]string - - // Optional. Specifies the encryption scope settings to set on the container. - CpkScope *ContainerCpkScopeInfo -} - -func (o *ContainerCreateOptions) format() (*containerClientCreateOptions, *ContainerCpkScopeInfo) { - if o == nil { - return nil, nil - } - - basicOptions := containerClientCreateOptions{ - Access: o.Access, - Metadata: o.Metadata, - } - - return &basicOptions, o.CpkScope -} - -// ContainerCreateResponse is wrapper around containerClientCreateResponse -type ContainerCreateResponse struct { - containerClientCreateResponse -} - -func toContainerCreateResponse(resp containerClientCreateResponse) ContainerCreateResponse { - return ContainerCreateResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// ContainerDeleteOptions provides set of configurations for DeleteContainer operation -type ContainerDeleteOptions struct { - LeaseAccessConditions *LeaseAccessConditions - ModifiedAccessConditions *ModifiedAccessConditions -} - -func (o *ContainerDeleteOptions) format() (*containerClientDeleteOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { - if o == nil { - return nil, nil, nil - } - - return nil, o.LeaseAccessConditions, o.ModifiedAccessConditions -} - -// ContainerDeleteResponse contains the response from method ContainerClient.Delete. -type ContainerDeleteResponse struct { - containerClientDeleteResponse -} - -func toContainerDeleteResponse(resp containerClientDeleteResponse) ContainerDeleteResponse { - return ContainerDeleteResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// ContainerGetPropertiesOptions provides set of configurations for GetPropertiesContainer operation -type ContainerGetPropertiesOptions struct { - LeaseAccessConditions *LeaseAccessConditions -} - -func (o *ContainerGetPropertiesOptions) format() (*containerClientGetPropertiesOptions, *LeaseAccessConditions) { - if o == nil { - return nil, nil - } - - return nil, o.LeaseAccessConditions -} - -// ContainerGetPropertiesResponse contains the response from method ContainerClient.GetProperties -type ContainerGetPropertiesResponse struct { - containerClientGetPropertiesResponse -} - -func toContainerGetPropertiesResponse(resp containerClientGetPropertiesResponse) ContainerGetPropertiesResponse { - return ContainerGetPropertiesResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// ContainerSetMetadataOptions provides set of configurations for SetMetadataContainer operation -type ContainerSetMetadataOptions struct { - Metadata map[string]string - LeaseAccessConditions *LeaseAccessConditions - ModifiedAccessConditions *ModifiedAccessConditions -} - -func (o *ContainerSetMetadataOptions) format() (*containerClientSetMetadataOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { - if o == nil { - return nil, nil, nil - } - - return &containerClientSetMetadataOptions{Metadata: o.Metadata}, o.LeaseAccessConditions, o.ModifiedAccessConditions -} - -// ContainerSetMetadataResponse contains the response from method containerClient.SetMetadata -type ContainerSetMetadataResponse struct { - containerClientSetMetadataResponse -} - -func toContainerSetMetadataResponse(resp containerClientSetMetadataResponse) ContainerSetMetadataResponse { - return ContainerSetMetadataResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// ContainerGetAccessPolicyOptions provides set of configurations for GetAccessPolicy operation -type ContainerGetAccessPolicyOptions struct { - LeaseAccessConditions *LeaseAccessConditions -} - -func (o *ContainerGetAccessPolicyOptions) format() (*containerClientGetAccessPolicyOptions, *LeaseAccessConditions) { - if o == nil { - return nil, nil - } - - return nil, o.LeaseAccessConditions -} - -// ContainerGetAccessPolicyResponse contains the response from method ContainerClient.GetAccessPolicy. -type ContainerGetAccessPolicyResponse struct { - containerClientGetAccessPolicyResponse -} - -func toContainerGetAccessPolicyResponse(resp containerClientGetAccessPolicyResponse) ContainerGetAccessPolicyResponse { - return ContainerGetAccessPolicyResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// ContainerSetAccessPolicyOptions provides set of configurations for ContainerClient.SetAccessPolicy operation -type ContainerSetAccessPolicyOptions struct { - AccessConditions *ContainerAccessConditions - // Specifies whether data in the container may be accessed publicly and the level of access - Access *PublicAccessType - // the acls for the container - ContainerACL []*SignedIdentifier - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -func (o *ContainerSetAccessPolicyOptions) format() (*containerClientSetAccessPolicyOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { - if o == nil { - return nil, nil, nil - } - mac, lac := o.AccessConditions.format() - return &containerClientSetAccessPolicyOptions{ - Access: o.Access, - ContainerACL: o.ContainerACL, - RequestID: o.RequestID, - }, lac, mac -} - -// ContainerSetAccessPolicyResponse contains the response from method ContainerClient.SetAccessPolicy -type ContainerSetAccessPolicyResponse struct { - containerClientSetAccessPolicyResponse -} - -func toContainerSetAccessPolicyResponse(resp containerClientSetAccessPolicyResponse) ContainerSetAccessPolicyResponse { - return ContainerSetAccessPolicyResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// ContainerListBlobsFlatOptions provides set of configurations for SetAccessPolicy operation -type ContainerListBlobsFlatOptions struct { - // Include this parameter to specify one or more datasets to include in the response. - Include []ListBlobsIncludeItem - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - MaxResults *int32 - // Filters the results to return only containers whose name begins with the specified prefix. - Prefix *string -} - -func (o *ContainerListBlobsFlatOptions) format() *containerClientListBlobFlatSegmentOptions { - if o == nil { - return nil - } - - return &containerClientListBlobFlatSegmentOptions{ - Include: o.Include, - Marker: o.Marker, - Maxresults: o.MaxResults, - Prefix: o.Prefix, - } -} - -// ContainerListBlobFlatPager provides operations for iterating over paged responses -type ContainerListBlobFlatPager struct { - *containerClientListBlobFlatSegmentPager -} - -func toContainerListBlobFlatSegmentPager(resp *containerClientListBlobFlatSegmentPager) *ContainerListBlobFlatPager { - return &ContainerListBlobFlatPager{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -//ContainerListBlobsHierarchyOptions provides set of configurations for ContainerClient.ListBlobsHierarchy -type ContainerListBlobsHierarchyOptions struct { - // Include this parameter to specify one or more datasets to include in the response. - Include []ListBlobsIncludeItem - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - MaxResults *int32 - // Filters the results to return only containers whose name begins with the specified prefix. - Prefix *string -} - -func (o *ContainerListBlobsHierarchyOptions) format() *containerClientListBlobHierarchySegmentOptions { - if o == nil { - return nil - } - - return &containerClientListBlobHierarchySegmentOptions{ - Include: o.Include, - Marker: o.Marker, - Maxresults: o.MaxResults, - Prefix: o.Prefix, - } -} - -// ContainerListBlobHierarchyPager provides operations for iterating over paged responses. -type ContainerListBlobHierarchyPager struct { - containerClientListBlobHierarchySegmentPager -} - -func toContainerListBlobHierarchySegmentPager(resp *containerClientListBlobHierarchySegmentPager) *ContainerListBlobHierarchyPager { - if resp == nil { - return nil - } - return &ContainerListBlobHierarchyPager{*resp} -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_container_lease_client_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_container_lease_client_util.go deleted file mode 100644 index 87572e917..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_container_lease_client_util.go +++ /dev/null @@ -1,166 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import ( - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" -) - -// --------------------------------------------------------------------------------------------------------------------- - -// LeaseBreakNaturally tells ContainerClient's or BlobClient's BreakLease method to break the lease using service semantics. -const LeaseBreakNaturally = -1 - -func leasePeriodPointer(period int32) *int32 { - if period != LeaseBreakNaturally { - return &period - } else { - return nil - } -} - -// --------------------------------------------------------------------------------------------------------------------- - -// ContainerAcquireLeaseOptions provides set of configurations for AcquireLeaseContainer operation -type ContainerAcquireLeaseOptions struct { - Duration *int32 - ModifiedAccessConditions *ModifiedAccessConditions -} - -func (o *ContainerAcquireLeaseOptions) format() (containerClientAcquireLeaseOptions, *ModifiedAccessConditions) { - if o == nil { - return containerClientAcquireLeaseOptions{}, nil - } - containerAcquireLeaseOptions := containerClientAcquireLeaseOptions{ - Duration: o.Duration, - } - - return containerAcquireLeaseOptions, o.ModifiedAccessConditions -} - -// ContainerAcquireLeaseResponse contains the response from method ContainerLeaseClient.AcquireLease. -type ContainerAcquireLeaseResponse struct { - containerClientAcquireLeaseResponse -} - -func toContainerAcquireLeaseResponse(resp containerClientAcquireLeaseResponse) ContainerAcquireLeaseResponse { - return ContainerAcquireLeaseResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// ContainerBreakLeaseOptions provides set of configurations for BreakLeaseContainer operation -type ContainerBreakLeaseOptions struct { - BreakPeriod *int32 - ModifiedAccessConditions *ModifiedAccessConditions -} - -func (o *ContainerBreakLeaseOptions) format() (*containerClientBreakLeaseOptions, *ModifiedAccessConditions) { - if o == nil { - return nil, nil - } - - containerBreakLeaseOptions := &containerClientBreakLeaseOptions{ - BreakPeriod: o.BreakPeriod, - } - - return containerBreakLeaseOptions, o.ModifiedAccessConditions -} - -// ContainerBreakLeaseResponse contains the response from method ContainerLeaseClient.BreakLease. -type ContainerBreakLeaseResponse struct { - containerClientBreakLeaseResponse -} - -func toContainerBreakLeaseResponse(resp containerClientBreakLeaseResponse) ContainerBreakLeaseResponse { - return ContainerBreakLeaseResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// ContainerChangeLeaseOptions provides set of configurations for ChangeLeaseContainer operation -type ContainerChangeLeaseOptions struct { - ProposedLeaseID *string - ModifiedAccessConditions *ModifiedAccessConditions -} - -func (o *ContainerChangeLeaseOptions) format() (*string, *containerClientChangeLeaseOptions, *ModifiedAccessConditions, error) { - generatedUuid, err := uuid.New() - if err != nil { - return nil, nil, nil, err - } - leaseID := to.Ptr(generatedUuid.String()) - if o == nil { - return leaseID, nil, nil, err - } - - if o.ProposedLeaseID == nil { - o.ProposedLeaseID = leaseID - } - - return o.ProposedLeaseID, nil, o.ModifiedAccessConditions, err -} - -// ContainerChangeLeaseResponse contains the response from method ContainerLeaseClient.ChangeLease. -type ContainerChangeLeaseResponse struct { - containerClientChangeLeaseResponse -} - -func toContainerChangeLeaseResponse(resp containerClientChangeLeaseResponse) ContainerChangeLeaseResponse { - return ContainerChangeLeaseResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// ContainerReleaseLeaseOptions provides set of configurations for ReleaseLeaseContainer operation -type ContainerReleaseLeaseOptions struct { - ModifiedAccessConditions *ModifiedAccessConditions -} - -func (o *ContainerReleaseLeaseOptions) format() (*containerClientReleaseLeaseOptions, *ModifiedAccessConditions) { - if o == nil { - return nil, nil - } - - return nil, o.ModifiedAccessConditions -} - -// ContainerReleaseLeaseResponse contains the response from method ContainerLeaseClient.ReleaseLease. -type ContainerReleaseLeaseResponse struct { - containerClientReleaseLeaseResponse -} - -func toContainerReleaseLeaseResponse(resp containerClientReleaseLeaseResponse) ContainerReleaseLeaseResponse { - return ContainerReleaseLeaseResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// ContainerRenewLeaseOptions provides set of configurations for RenewLeaseContainer operation -type ContainerRenewLeaseOptions struct { - ModifiedAccessConditions *ModifiedAccessConditions -} - -func (o *ContainerRenewLeaseOptions) format() (*containerClientRenewLeaseOptions, *ModifiedAccessConditions) { - if o == nil { - return nil, nil - } - - return nil, o.ModifiedAccessConditions -} - -// ContainerRenewLeaseResponse contains the response from method ContainerLeaseClient.RenewLease. -type ContainerRenewLeaseResponse struct { - containerClientRenewLeaseResponse -} - -func toContainerRenewLeaseResponse(resp containerClientRenewLeaseResponse) ContainerRenewLeaseResponse { - return ContainerRenewLeaseResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_highlevel_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_highlevel_util.go deleted file mode 100644 index c7a67abe7..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_highlevel_util.go +++ /dev/null @@ -1,201 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import ( - "context" - "fmt" -) - -const _1MiB = 1024 * 1024 - -// UploadOption identifies options used by the UploadBuffer and UploadFile functions. -type UploadOption struct { - // BlockSize specifies the block size to use; the default (and maximum size) is BlockBlobMaxStageBlockBytes. - BlockSize int64 - - // Progress is a function that is invoked periodically as bytes are sent to the BlockBlobClient. - // Note that the progress reporting is not always increasing; it can go down when retrying a request. - Progress func(bytesTransferred int64) - - // HTTPHeaders indicates the HTTP headers to be associated with the blob. - HTTPHeaders *BlobHTTPHeaders - - // Metadata indicates the metadata to be associated with the blob when PutBlockList is called. - Metadata map[string]string - - // BlobAccessConditions indicates the access conditions for the block blob. - BlobAccessConditions *BlobAccessConditions - - // AccessTier indicates the tier of blob - AccessTier *AccessTier - - // TagsMap - TagsMap map[string]string - - // ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data. - CpkInfo *CpkInfo - CpkScopeInfo *CpkScopeInfo - - // Parallelism indicates the maximum number of blocks to upload in parallel (0=default) - Parallelism uint16 - // Optional header, Specifies the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 *[]byte - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 *[]byte -} - -func (o *UploadOption) getStageBlockOptions() *BlockBlobStageBlockOptions { - leaseAccessConditions, _ := o.BlobAccessConditions.format() - return &BlockBlobStageBlockOptions{ - CpkInfo: o.CpkInfo, - CpkScopeInfo: o.CpkScopeInfo, - LeaseAccessConditions: leaseAccessConditions, - } -} - -func (o *UploadOption) getUploadBlockBlobOptions() *BlockBlobUploadOptions { - return &BlockBlobUploadOptions{ - TagsMap: o.TagsMap, - Metadata: o.Metadata, - Tier: o.AccessTier, - HTTPHeaders: o.HTTPHeaders, - BlobAccessConditions: o.BlobAccessConditions, - CpkInfo: o.CpkInfo, - CpkScopeInfo: o.CpkScopeInfo, - } -} - -func (o *UploadOption) getCommitBlockListOptions() *BlockBlobCommitBlockListOptions { - return &BlockBlobCommitBlockListOptions{ - BlobTagsMap: o.TagsMap, - Metadata: o.Metadata, - Tier: o.AccessTier, - BlobHTTPHeaders: o.HTTPHeaders, - CpkInfo: o.CpkInfo, - CpkScopeInfo: o.CpkScopeInfo, - } -} - -// --------------------------------------------------------------------------------------------------------------------- - -// UploadStreamOptions provides set of configurations for UploadStream operation -type UploadStreamOptions struct { - // TransferManager provides a TransferManager that controls buffer allocation/reuse and - // concurrency. This overrides BufferSize and MaxBuffers if set. - TransferManager TransferManager - transferMangerNotSet bool - // BufferSize sizes the buffer used to read data from source. If < 1 MiB, defaults to 1 MiB. - BufferSize int - // MaxBuffers defines the number of simultaneous uploads will be performed to upload the file. - MaxBuffers int - HTTPHeaders *BlobHTTPHeaders - Metadata map[string]string - BlobAccessConditions *BlobAccessConditions - AccessTier *AccessTier - BlobTagsMap map[string]string - CpkInfo *CpkInfo - CpkScopeInfo *CpkScopeInfo -} - -func (u *UploadStreamOptions) defaults() error { - if u.TransferManager != nil { - return nil - } - - if u.MaxBuffers == 0 { - u.MaxBuffers = 1 - } - - if u.BufferSize < _1MiB { - u.BufferSize = _1MiB - } - - var err error - u.TransferManager, err = NewStaticBuffer(u.BufferSize, u.MaxBuffers) - if err != nil { - return fmt.Errorf("bug: default transfer manager could not be created: %s", err) - } - u.transferMangerNotSet = true - return nil -} - -func (u *UploadStreamOptions) getStageBlockOptions() *BlockBlobStageBlockOptions { - leaseAccessConditions, _ := u.BlobAccessConditions.format() - return &BlockBlobStageBlockOptions{ - CpkInfo: u.CpkInfo, - CpkScopeInfo: u.CpkScopeInfo, - LeaseAccessConditions: leaseAccessConditions, - } -} - -func (u *UploadStreamOptions) getCommitBlockListOptions() *BlockBlobCommitBlockListOptions { - options := &BlockBlobCommitBlockListOptions{ - BlobTagsMap: u.BlobTagsMap, - Metadata: u.Metadata, - Tier: u.AccessTier, - BlobHTTPHeaders: u.HTTPHeaders, - CpkInfo: u.CpkInfo, - CpkScopeInfo: u.CpkScopeInfo, - BlobAccessConditions: u.BlobAccessConditions, - } - - return options -} - -// --------------------------------------------------------------------------------------------------------------------- - -// DownloadOptions identifies options used by the DownloadToBuffer and DownloadToFile functions. -type DownloadOptions struct { - // BlockSize specifies the block size to use for each parallel download; the default size is BlobDefaultDownloadBlockSize. - BlockSize int64 - - // Progress is a function that is invoked periodically as bytes are received. - Progress func(bytesTransferred int64) - - // BlobAccessConditions indicates the access conditions used when making HTTP GET requests against the blob. - BlobAccessConditions *BlobAccessConditions - - // ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data. - CpkInfo *CpkInfo - CpkScopeInfo *CpkScopeInfo - - // Parallelism indicates the maximum number of blocks to download in parallel (0=default) - Parallelism uint16 - - // RetryReaderOptionsPerBlock is used when downloading each block. - RetryReaderOptionsPerBlock RetryReaderOptions -} - -func (o *DownloadOptions) getBlobPropertiesOptions() *BlobGetPropertiesOptions { - return &BlobGetPropertiesOptions{ - BlobAccessConditions: o.BlobAccessConditions, - CpkInfo: o.CpkInfo, - } -} - -func (o *DownloadOptions) getDownloadBlobOptions(offSet, count int64, rangeGetContentMD5 *bool) *BlobDownloadOptions { - return &BlobDownloadOptions{ - BlobAccessConditions: o.BlobAccessConditions, - CpkInfo: o.CpkInfo, - CpkScopeInfo: o.CpkScopeInfo, - Offset: &offSet, - Count: &count, - RangeGetContentMD5: rangeGetContentMD5, - } -} - -// --------------------------------------------------------------------------------------------------------------------- - -// BatchTransferOptions identifies options used by DoBatchTransfer. -type BatchTransferOptions struct { - TransferSize int64 - ChunkSize int64 - Parallelism uint16 - Operation func(offset int64, chunkSize int64, ctx context.Context) error - OperationName string -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_serialize_and_desearilize_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_serialize_and_desearilize_util.go deleted file mode 100644 index 3cf85ca43..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_serialize_and_desearilize_util.go +++ /dev/null @@ -1,68 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import ( - "net/url" - "strings" -) - -func serializeBlobTagsToStrPtr(tagsMap map[string]string) *string { - if tagsMap == nil { - return nil - } - tags := make([]string, 0) - for key, val := range tagsMap { - tags = append(tags, url.QueryEscape(key)+"="+url.QueryEscape(val)) - } - //tags = tags[:len(tags)-1] - blobTagsString := strings.Join(tags, "&") - return &blobTagsString -} - -func serializeBlobTags(tagsMap map[string]string) *BlobTags { - if tagsMap == nil { - return nil - } - blobTagSet := make([]*BlobTag, 0) - for key, val := range tagsMap { - newKey, newVal := key, val - blobTagSet = append(blobTagSet, &BlobTag{Key: &newKey, Value: &newVal}) - } - return &BlobTags{BlobTagSet: blobTagSet} -} - -func deserializeORSPolicies(policies map[string]string) (objectReplicationPolicies []ObjectReplicationPolicy) { - if policies == nil { - return nil - } - // For source blobs (blobs that have policy ids and rule ids applied to them), - // the header will be formatted as "x-ms-or-_: {Complete, Failed}". - // The value of this header is the status of the replication. - orPolicyStatusHeader := make(map[string]string) - for key, value := range policies { - if strings.Contains(key, "or-") && key != "x-ms-or-policy-id" { - orPolicyStatusHeader[key] = value - } - } - - parsedResult := make(map[string][]ObjectReplicationRules) - for key, value := range orPolicyStatusHeader { - policyAndRuleIDs := strings.Split(strings.Split(key, "or-")[1], "_") - policyId, ruleId := policyAndRuleIDs[0], policyAndRuleIDs[1] - - parsedResult[policyId] = append(parsedResult[policyId], ObjectReplicationRules{RuleId: ruleId, Status: value}) - } - - for policyId, rules := range parsedResult { - objectReplicationPolicies = append(objectReplicationPolicies, ObjectReplicationPolicy{ - PolicyId: &policyId, - Rules: &rules, - }) - } - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_service_client_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_service_client_util.go deleted file mode 100644 index 747a94ee2..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_service_client_util.go +++ /dev/null @@ -1,226 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -// --------------------------------------------------------------------------------------------------------------------- - -// ServiceGetAccountInfoOptions provides set of options for ServiceClient.GetAccountInfo -type ServiceGetAccountInfoOptions struct { - // placeholder for future options -} - -func (o *ServiceGetAccountInfoOptions) format() *serviceClientGetAccountInfoOptions { - return nil -} - -// ServiceGetAccountInfoResponse contains the response from ServiceClient.GetAccountInfo -type ServiceGetAccountInfoResponse struct { - serviceClientGetAccountInfoResponse -} - -func toServiceGetAccountInfoResponse(resp serviceClientGetAccountInfoResponse) ServiceGetAccountInfoResponse { - return ServiceGetAccountInfoResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// ListContainersDetail indicates what additional information the service should return with each container. -type ListContainersDetail struct { - // Tells the service whether to return metadata for each container. - Metadata bool - - // Tells the service whether to return soft-deleted containers. - Deleted bool -} - -// string produces the `Include` query parameter's value. -func (o *ListContainersDetail) format() []ListContainersIncludeType { - if !o.Metadata && !o.Deleted { - return nil - } - - items := make([]ListContainersIncludeType, 0, 2) - // NOTE: Multiple strings MUST be appended in alphabetic order or signing the string for authentication fails! - if o.Deleted { - items = append(items, ListContainersIncludeTypeDeleted) - } - if o.Metadata { - items = append(items, ListContainersIncludeTypeMetadata) - } - return items -} - -// ListContainersOptions provides set of configurations for ListContainers operation -type ListContainersOptions struct { - Include ListContainersDetail - - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing operation did not return all containers - // remaining to be listed with the current page. The NextMarker value can be used as the value for the marker parameter in - // a subsequent call to request the next page of list items. The marker value is opaque to the client. - Marker *string - - // Specifies the maximum number of containers to return. If the request does not specify max results, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the listing operation crosses a partition boundary, - // then the service will return a continuation token for retrieving the remainder of the results. For this reason, it is possible - // that the service will return fewer results than specified by max results, or than the default of 5000. - MaxResults *int32 - - // Filters the results to return only containers whose name begins with the specified prefix. - Prefix *string -} - -func (o *ListContainersOptions) format() *serviceClientListContainersSegmentOptions { - if o == nil { - return nil - } - - return &serviceClientListContainersSegmentOptions{ - Include: o.Include.format(), - Marker: o.Marker, - Maxresults: o.MaxResults, - Prefix: o.Prefix, - } -} - -// ServiceListContainersSegmentPager provides operations for iterating over paged responses. -type ServiceListContainersSegmentPager struct { - serviceClientListContainersSegmentPager -} - -func toServiceListContainersSegmentPager(resp serviceClientListContainersSegmentPager) *ServiceListContainersSegmentPager { - return &ServiceListContainersSegmentPager{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// ServiceGetPropertiesOptions provides set of options for ServiceClient.GetProperties -type ServiceGetPropertiesOptions struct { - // placeholder for future options -} - -func (o *ServiceGetPropertiesOptions) format() *serviceClientGetPropertiesOptions { - return nil -} - -// ServiceGetPropertiesResponse contains the response from ServiceClient.GetProperties -type ServiceGetPropertiesResponse struct { - serviceClientGetPropertiesResponse -} - -func toServiceGetPropertiesResponse(resp serviceClientGetPropertiesResponse) ServiceGetPropertiesResponse { - return ServiceGetPropertiesResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// ServiceSetPropertiesOptions provides set of options for ServiceClient.SetProperties -type ServiceSetPropertiesOptions struct { - // The set of CORS rules. - Cors []*CorsRule - - // The default version to use for requests to the Blob service if an incoming request's version is not specified. Possible - // values include version 2008-10-27 and all more recent versions - DefaultServiceVersion *string - - // the retention policy which determines how long the associated data should persist - DeleteRetentionPolicy *RetentionPolicy - - // a summary of request statistics grouped by API in hour or minute aggregates for blobs - HourMetrics *Metrics - - // Azure Analytics Logging settings. - Logging *Logging - - // a summary of request statistics grouped by API in hour or minute aggregates for blobs - MinuteMetrics *Metrics - - // The properties that enable an account to host a static website - StaticWebsite *StaticWebsite -} - -func (o *ServiceSetPropertiesOptions) format() (StorageServiceProperties, *serviceClientSetPropertiesOptions) { - if o == nil { - return StorageServiceProperties{}, nil - } - - return StorageServiceProperties{ - Cors: o.Cors, - DefaultServiceVersion: o.DefaultServiceVersion, - DeleteRetentionPolicy: o.DeleteRetentionPolicy, - HourMetrics: o.HourMetrics, - Logging: o.Logging, - MinuteMetrics: o.MinuteMetrics, - StaticWebsite: o.StaticWebsite, - }, nil -} - -// ServiceSetPropertiesResponse contains the response from ServiceClient.SetProperties -type ServiceSetPropertiesResponse struct { - serviceClientSetPropertiesResponse -} - -func toServiceSetPropertiesResponse(resp serviceClientSetPropertiesResponse) ServiceSetPropertiesResponse { - return ServiceSetPropertiesResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// ServiceGetStatisticsOptions provides set of options for ServiceClient.GetStatistics -type ServiceGetStatisticsOptions struct { - // placeholder for future options -} - -func (o *ServiceGetStatisticsOptions) format() *serviceClientGetStatisticsOptions { - return nil -} - -// ServiceGetStatisticsResponse contains the response from ServiceClient.GetStatistics. -type ServiceGetStatisticsResponse struct { - serviceClientGetStatisticsResponse -} - -func toServiceGetStatisticsResponse(resp serviceClientGetStatisticsResponse) ServiceGetStatisticsResponse { - return ServiceGetStatisticsResponse{resp} -} - -// --------------------------------------------------------------------------------------------------------------------- - -// ServiceFilterBlobsOptions provides set of configurations for ServiceClient.FindBlobsByTags -type ServiceFilterBlobsOptions struct { - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The operation returns the NextMarker - // value within the response body if the listing operation did not return all containers remaining to be listed with the current page. The NextMarker value - // can be used as the value for the marker parameter in a subsequent call to request the next page of list items. The marker value is opaque to the client. - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value greater than 5000, the server - // will return up to 5000 items. Note that if the listing operation crosses a partition boundary, then the service will return a continuation token for - // retrieving the remainder of the results. For this reason, it is possible that the service will return fewer results than specified by maxresults, or - // than the default of 5000. - MaxResults *int32 - // Filters the results to return only to return only blobs whose tags match the specified expression. - Where *string -} - -func (o *ServiceFilterBlobsOptions) pointer() *serviceClientFilterBlobsOptions { - if o == nil { - return nil - } - return &serviceClientFilterBlobsOptions{ - Marker: o.Marker, - Maxresults: o.MaxResults, - Where: o.Where, - } -} - -// ServiceFilterBlobsResponse contains the response from ServiceClient.FindBlobsByTags -type ServiceFilterBlobsResponse struct { - serviceClientFilterBlobsResponse -} - -func toServiceFilterBlobsResponse(resp serviceClientFilterBlobsResponse) ServiceFilterBlobsResponse { - return ServiceFilterBlobsResponse{resp} -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_appendblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_appendblob_client.go deleted file mode 100644 index ca5aac8cd..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_appendblob_client.go +++ /dev/null @@ -1,648 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -package azblob - -import ( - "context" - "encoding/base64" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" - "io" - "net/http" - "strconv" - "time" -) - -type appendBlobClient struct { - endpoint string - pl runtime.Pipeline -} - -// newAppendBlobClient creates a new instance of appendBlobClient with the specified values. -// endpoint - The URL of the service account, container, or blob that is the target of the desired operation. -// pl - the pipeline used for sending requests and handling responses. -func newAppendBlobClient(endpoint string, pl runtime.Pipeline) *appendBlobClient { - client := &appendBlobClient{ - endpoint: endpoint, - pl: pl, - } - return client -} - -// AppendBlock - The Append Block operation commits a new block of data to the end of an existing append blob. The Append -// Block operation is permitted only if the blob was created with x-ms-blob-type set to -// AppendBlob. Append Block is supported only on version 2015-02-21 version or later. -// If the operation fails it returns an *azcore.ResponseError type. -// contentLength - The length of the request. -// body - Initial data -// appendBlobClientAppendBlockOptions - appendBlobClientAppendBlockOptions contains the optional parameters for the appendBlobClient.AppendBlock -// method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the appendBlobClient.AppendBlock -// method. -// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *appendBlobClient) AppendBlock(ctx context.Context, contentLength int64, body io.ReadSeekCloser, appendBlobClientAppendBlockOptions *appendBlobClientAppendBlockOptions, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (appendBlobClientAppendBlockResponse, error) { - req, err := client.appendBlockCreateRequest(ctx, contentLength, body, appendBlobClientAppendBlockOptions, leaseAccessConditions, appendPositionAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) - if err != nil { - return appendBlobClientAppendBlockResponse{}, err - } - resp, err := client.pl.Do(req) - if err != nil { - return appendBlobClientAppendBlockResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return appendBlobClientAppendBlockResponse{}, runtime.NewResponseError(resp) - } - return client.appendBlockHandleResponse(resp) -} - -// appendBlockCreateRequest creates the AppendBlock request. -func (client *appendBlobClient) appendBlockCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, appendBlobClientAppendBlockOptions *appendBlobClientAppendBlockOptions, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "appendblock") - if appendBlobClientAppendBlockOptions != nil && appendBlobClientAppendBlockOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*appendBlobClientAppendBlockOptions.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) - if appendBlobClientAppendBlockOptions != nil && appendBlobClientAppendBlockOptions.TransactionalContentMD5 != nil { - req.Raw().Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(appendBlobClientAppendBlockOptions.TransactionalContentMD5)) - } - if appendBlobClientAppendBlockOptions != nil && appendBlobClientAppendBlockOptions.TransactionalContentCRC64 != nil { - req.Raw().Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(appendBlobClientAppendBlockOptions.TransactionalContentCRC64)) - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) - } - if appendPositionAccessConditions != nil && appendPositionAccessConditions.MaxSize != nil { - req.Raw().Header.Set("x-ms-blob-condition-maxsize", strconv.FormatInt(*appendPositionAccessConditions.MaxSize, 10)) - } - if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil { - req.Raw().Header.Set("x-ms-blob-condition-appendpos", strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)) - } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) - } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) - } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) - } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if appendBlobClientAppendBlockOptions != nil && appendBlobClientAppendBlockOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *appendBlobClientAppendBlockOptions.RequestID) - } - req.Raw().Header.Set("Accept", "application/xml") - return req, req.SetBody(body, "application/octet-stream") -} - -// appendBlockHandleResponse handles the AppendBlock response. -func (client *appendBlobClient) appendBlockHandleResponse(resp *http.Response) (appendBlobClientAppendBlockResponse, error) { - result := appendBlobClientAppendBlockResponse{RawResponse: resp} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return appendBlobClientAppendBlockResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return appendBlobClientAppendBlockResponse{}, err - } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return appendBlobClientAppendBlockResponse{}, err - } - result.XMSContentCRC64 = xMSContentCRC64 - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return appendBlobClientAppendBlockResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-blob-append-offset"); val != "" { - result.BlobAppendOffset = &val - } - if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { - blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) - blobCommittedBlockCount := int32(blobCommittedBlockCount32) - if err != nil { - return appendBlobClientAppendBlockResponse{}, err - } - result.BlobCommittedBlockCount = &blobCommittedBlockCount - } - if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return appendBlobClientAppendBlockResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted - } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val - } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val - } - return result, nil -} - -// AppendBlockFromURL - The Append Block operation commits a new block of data to the end of an existing append blob where -// the contents are read from a source url. The Append Block operation is permitted only if the blob was -// created with x-ms-blob-type set to AppendBlob. Append Block is supported only on version 2015-02-21 version or later. -// If the operation fails it returns an *azcore.ResponseError type. -// sourceURL - Specify a URL to the copy source. -// contentLength - The length of the request. -// appendBlobClientAppendBlockFromURLOptions - appendBlobClientAppendBlockFromURLOptions contains the optional parameters -// for the appendBlobClient.AppendBlockFromURL method. -// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the appendBlobClient.AppendBlock -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the blobClient.StartCopyFromURL -// method. -func (client *appendBlobClient) AppendBlockFromURL(ctx context.Context, sourceURL string, contentLength int64, appendBlobClientAppendBlockFromURLOptions *appendBlobClientAppendBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (appendBlobClientAppendBlockFromURLResponse, error) { - req, err := client.appendBlockFromURLCreateRequest(ctx, sourceURL, contentLength, appendBlobClientAppendBlockFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) - if err != nil { - return appendBlobClientAppendBlockFromURLResponse{}, err - } - resp, err := client.pl.Do(req) - if err != nil { - return appendBlobClientAppendBlockFromURLResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return appendBlobClientAppendBlockFromURLResponse{}, runtime.NewResponseError(resp) - } - return client.appendBlockFromURLHandleResponse(resp) -} - -// appendBlockFromURLCreateRequest creates the AppendBlockFromURL request. -func (client *appendBlobClient) appendBlockFromURLCreateRequest(ctx context.Context, sourceURL string, contentLength int64, appendBlobClientAppendBlockFromURLOptions *appendBlobClientAppendBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "appendblock") - if appendBlobClientAppendBlockFromURLOptions != nil && appendBlobClientAppendBlockFromURLOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*appendBlobClientAppendBlockFromURLOptions.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-copy-source", sourceURL) - if appendBlobClientAppendBlockFromURLOptions != nil && appendBlobClientAppendBlockFromURLOptions.SourceRange != nil { - req.Raw().Header.Set("x-ms-source-range", *appendBlobClientAppendBlockFromURLOptions.SourceRange) - } - if appendBlobClientAppendBlockFromURLOptions != nil && appendBlobClientAppendBlockFromURLOptions.SourceContentMD5 != nil { - req.Raw().Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(appendBlobClientAppendBlockFromURLOptions.SourceContentMD5)) - } - if appendBlobClientAppendBlockFromURLOptions != nil && appendBlobClientAppendBlockFromURLOptions.SourceContentcrc64 != nil { - req.Raw().Header.Set("x-ms-source-content-crc64", base64.StdEncoding.EncodeToString(appendBlobClientAppendBlockFromURLOptions.SourceContentcrc64)) - } - req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) - if appendBlobClientAppendBlockFromURLOptions != nil && appendBlobClientAppendBlockFromURLOptions.TransactionalContentMD5 != nil { - req.Raw().Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(appendBlobClientAppendBlockFromURLOptions.TransactionalContentMD5)) - } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) - } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) - } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) - } - if appendPositionAccessConditions != nil && appendPositionAccessConditions.MaxSize != nil { - req.Raw().Header.Set("x-ms-blob-condition-maxsize", strconv.FormatInt(*appendPositionAccessConditions.MaxSize, 10)) - } - if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil { - req.Raw().Header.Set("x-ms-blob-condition-appendpos", strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header.Set("x-ms-source-if-modified-since", sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)) - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header.Set("x-ms-source-if-unmodified-since", sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)) - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { - req.Raw().Header.Set("x-ms-source-if-match", *sourceModifiedAccessConditions.SourceIfMatch) - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { - req.Raw().Header.Set("x-ms-source-if-none-match", *sourceModifiedAccessConditions.SourceIfNoneMatch) - } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if appendBlobClientAppendBlockFromURLOptions != nil && appendBlobClientAppendBlockFromURLOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *appendBlobClientAppendBlockFromURLOptions.RequestID) - } - if appendBlobClientAppendBlockFromURLOptions != nil && appendBlobClientAppendBlockFromURLOptions.CopySourceAuthorization != nil { - req.Raw().Header.Set("x-ms-copy-source-authorization", *appendBlobClientAppendBlockFromURLOptions.CopySourceAuthorization) - } - req.Raw().Header.Set("Accept", "application/xml") - return req, nil -} - -// appendBlockFromURLHandleResponse handles the AppendBlockFromURL response. -func (client *appendBlobClient) appendBlockFromURLHandleResponse(resp *http.Response) (appendBlobClientAppendBlockFromURLResponse, error) { - result := appendBlobClientAppendBlockFromURLResponse{RawResponse: resp} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return appendBlobClientAppendBlockFromURLResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return appendBlobClientAppendBlockFromURLResponse{}, err - } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return appendBlobClientAppendBlockFromURLResponse{}, err - } - result.XMSContentCRC64 = xMSContentCRC64 - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return appendBlobClientAppendBlockFromURLResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-blob-append-offset"); val != "" { - result.BlobAppendOffset = &val - } - if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { - blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) - blobCommittedBlockCount := int32(blobCommittedBlockCount32) - if err != nil { - return appendBlobClientAppendBlockFromURLResponse{}, err - } - result.BlobCommittedBlockCount = &blobCommittedBlockCount - } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val - } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val - } - if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return appendBlobClientAppendBlockFromURLResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted - } - return result, nil -} - -// Create - The Create Append Blob operation creates a new append blob. -// If the operation fails it returns an *azcore.ResponseError type. -// contentLength - The length of the request. -// appendBlobClientCreateOptions - appendBlobClientCreateOptions contains the optional parameters for the appendBlobClient.Create -// method. -// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the blobClient.SetHTTPHeaders method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *appendBlobClient) Create(ctx context.Context, contentLength int64, appendBlobClientCreateOptions *appendBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (appendBlobClientCreateResponse, error) { - req, err := client.createCreateRequest(ctx, contentLength, appendBlobClientCreateOptions, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) - if err != nil { - return appendBlobClientCreateResponse{}, err - } - resp, err := client.pl.Do(req) - if err != nil { - return appendBlobClientCreateResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return appendBlobClientCreateResponse{}, runtime.NewResponseError(resp) - } - return client.createHandleResponse(resp) -} - -// createCreateRequest creates the Create request. -func (client *appendBlobClient) createCreateRequest(ctx context.Context, contentLength int64, appendBlobClientCreateOptions *appendBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - if appendBlobClientCreateOptions != nil && appendBlobClientCreateOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*appendBlobClientCreateOptions.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-blob-type", "AppendBlob") - req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { - req.Raw().Header.Set("x-ms-blob-content-type", *blobHTTPHeaders.BlobContentType) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { - req.Raw().Header.Set("x-ms-blob-content-encoding", *blobHTTPHeaders.BlobContentEncoding) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { - req.Raw().Header.Set("x-ms-blob-content-language", *blobHTTPHeaders.BlobContentLanguage) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { - req.Raw().Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { - req.Raw().Header.Set("x-ms-blob-cache-control", *blobHTTPHeaders.BlobCacheControl) - } - if appendBlobClientCreateOptions != nil && appendBlobClientCreateOptions.Metadata != nil { - for k, v := range appendBlobClientCreateOptions.Metadata { - req.Raw().Header.Set("x-ms-meta-"+k, v) - } - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { - req.Raw().Header.Set("x-ms-blob-content-disposition", *blobHTTPHeaders.BlobContentDisposition) - } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) - } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) - } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) - } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if appendBlobClientCreateOptions != nil && appendBlobClientCreateOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *appendBlobClientCreateOptions.RequestID) - } - if appendBlobClientCreateOptions != nil && appendBlobClientCreateOptions.BlobTagsString != nil { - req.Raw().Header.Set("x-ms-tags", *appendBlobClientCreateOptions.BlobTagsString) - } - if appendBlobClientCreateOptions != nil && appendBlobClientCreateOptions.ImmutabilityPolicyExpiry != nil { - req.Raw().Header.Set("x-ms-immutability-policy-until-date", appendBlobClientCreateOptions.ImmutabilityPolicyExpiry.Format(time.RFC1123)) - } - if appendBlobClientCreateOptions != nil && appendBlobClientCreateOptions.ImmutabilityPolicyMode != nil { - req.Raw().Header.Set("x-ms-immutability-policy-mode", string(*appendBlobClientCreateOptions.ImmutabilityPolicyMode)) - } - if appendBlobClientCreateOptions != nil && appendBlobClientCreateOptions.LegalHold != nil { - req.Raw().Header.Set("x-ms-legal-hold", strconv.FormatBool(*appendBlobClientCreateOptions.LegalHold)) - } - req.Raw().Header.Set("Accept", "application/xml") - return req, nil -} - -// createHandleResponse handles the Create response. -func (client *appendBlobClient) createHandleResponse(resp *http.Response) (appendBlobClientCreateResponse, error) { - result := appendBlobClientCreateResponse{RawResponse: resp} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return appendBlobClientCreateResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return appendBlobClientCreateResponse{}, err - } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return appendBlobClientCreateResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return appendBlobClientCreateResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted - } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val - } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val - } - return result, nil -} - -// Seal - The Seal operation seals the Append Blob to make it read-only. Seal is supported only on version 2019-12-12 version -// or later. -// If the operation fails it returns an *azcore.ResponseError type. -// appendBlobClientSealOptions - appendBlobClientSealOptions contains the optional parameters for the appendBlobClient.Seal -// method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -// AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the appendBlobClient.AppendBlock -// method. -func (client *appendBlobClient) Seal(ctx context.Context, appendBlobClientSealOptions *appendBlobClientSealOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions) (appendBlobClientSealResponse, error) { - req, err := client.sealCreateRequest(ctx, appendBlobClientSealOptions, leaseAccessConditions, modifiedAccessConditions, appendPositionAccessConditions) - if err != nil { - return appendBlobClientSealResponse{}, err - } - resp, err := client.pl.Do(req) - if err != nil { - return appendBlobClientSealResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return appendBlobClientSealResponse{}, runtime.NewResponseError(resp) - } - return client.sealHandleResponse(resp) -} - -// sealCreateRequest creates the Seal request. -func (client *appendBlobClient) sealCreateRequest(ctx context.Context, appendBlobClientSealOptions *appendBlobClientSealOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "seal") - if appendBlobClientSealOptions != nil && appendBlobClientSealOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*appendBlobClientSealOptions.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if appendBlobClientSealOptions != nil && appendBlobClientSealOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *appendBlobClientSealOptions.RequestID) - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) - } - if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil { - req.Raw().Header.Set("x-ms-blob-condition-appendpos", strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)) - } - req.Raw().Header.Set("Accept", "application/xml") - return req, nil -} - -// sealHandleResponse handles the Seal response. -func (client *appendBlobClient) sealHandleResponse(resp *http.Response) (appendBlobClientSealResponse, error) { - result := appendBlobClientSealResponse{RawResponse: resp} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return appendBlobClientSealResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return appendBlobClientSealResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-blob-sealed"); val != "" { - isSealed, err := strconv.ParseBool(val) - if err != nil { - return appendBlobClientSealResponse{}, err - } - result.IsSealed = &isSealed - } - return result, nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_blockblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_blockblob_client.go deleted file mode 100644 index 3f78a28aa..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_blockblob_client.go +++ /dev/null @@ -1,953 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -package azblob - -import ( - "context" - "encoding/base64" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" - "io" - "net/http" - "strconv" - "time" -) - -type blockBlobClient struct { - endpoint string - pl runtime.Pipeline -} - -// newBlockBlobClient creates a new instance of blockBlobClient with the specified values. -// endpoint - The URL of the service account, container, or blob that is the target of the desired operation. -// pl - the pipeline used for sending requests and handling responses. -func newBlockBlobClient(endpoint string, pl runtime.Pipeline) *blockBlobClient { - client := &blockBlobClient{ - endpoint: endpoint, - pl: pl, - } - return client -} - -// CommitBlockList - The Commit Block List operation writes a blob by specifying the list of block IDs that make up the blob. -// In order to be written as part of a blob, a block must have been successfully written to the -// server in a prior Put Block operation. You can call Put Block List to update a blob by uploading only those blocks that -// have changed, then committing the new and existing blocks together. You can do -// this by specifying whether to commit a block from the committed block list or from the uncommitted block list, or to commit -// the most recently uploaded version of the block, whichever list it may -// belong to. -// If the operation fails it returns an *azcore.ResponseError type. -// blocks - Blob Blocks. -// blockBlobClientCommitBlockListOptions - blockBlobClientCommitBlockListOptions contains the optional parameters for the -// blockBlobClient.CommitBlockList method. -// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the blobClient.SetHTTPHeaders method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *blockBlobClient) CommitBlockList(ctx context.Context, blocks BlockLookupList, blockBlobClientCommitBlockListOptions *blockBlobClientCommitBlockListOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (blockBlobClientCommitBlockListResponse, error) { - req, err := client.commitBlockListCreateRequest(ctx, blocks, blockBlobClientCommitBlockListOptions, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) - if err != nil { - return blockBlobClientCommitBlockListResponse{}, err - } - resp, err := client.pl.Do(req) - if err != nil { - return blockBlobClientCommitBlockListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return blockBlobClientCommitBlockListResponse{}, runtime.NewResponseError(resp) - } - return client.commitBlockListHandleResponse(resp) -} - -// commitBlockListCreateRequest creates the CommitBlockList request. -func (client *blockBlobClient) commitBlockListCreateRequest(ctx context.Context, blocks BlockLookupList, blockBlobClientCommitBlockListOptions *blockBlobClientCommitBlockListOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "blocklist") - if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blockBlobClientCommitBlockListOptions.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { - req.Raw().Header.Set("x-ms-blob-cache-control", *blobHTTPHeaders.BlobCacheControl) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { - req.Raw().Header.Set("x-ms-blob-content-type", *blobHTTPHeaders.BlobContentType) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { - req.Raw().Header.Set("x-ms-blob-content-encoding", *blobHTTPHeaders.BlobContentEncoding) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { - req.Raw().Header.Set("x-ms-blob-content-language", *blobHTTPHeaders.BlobContentLanguage) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { - req.Raw().Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)) - } - if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.TransactionalContentMD5 != nil { - req.Raw().Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(blockBlobClientCommitBlockListOptions.TransactionalContentMD5)) - } - if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.TransactionalContentCRC64 != nil { - req.Raw().Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(blockBlobClientCommitBlockListOptions.TransactionalContentCRC64)) - } - if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.Metadata != nil { - for k, v := range blockBlobClientCommitBlockListOptions.Metadata { - req.Raw().Header.Set("x-ms-meta-"+k, v) - } - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { - req.Raw().Header.Set("x-ms-blob-content-disposition", *blobHTTPHeaders.BlobContentDisposition) - } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) - } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) - } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) - } - if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.Tier != nil { - req.Raw().Header.Set("x-ms-access-tier", string(*blockBlobClientCommitBlockListOptions.Tier)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) - } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blockBlobClientCommitBlockListOptions.RequestID) - } - if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.BlobTagsString != nil { - req.Raw().Header.Set("x-ms-tags", *blockBlobClientCommitBlockListOptions.BlobTagsString) - } - if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.ImmutabilityPolicyExpiry != nil { - req.Raw().Header.Set("x-ms-immutability-policy-until-date", blockBlobClientCommitBlockListOptions.ImmutabilityPolicyExpiry.Format(time.RFC1123)) - } - if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.ImmutabilityPolicyMode != nil { - req.Raw().Header.Set("x-ms-immutability-policy-mode", string(*blockBlobClientCommitBlockListOptions.ImmutabilityPolicyMode)) - } - if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.LegalHold != nil { - req.Raw().Header.Set("x-ms-legal-hold", strconv.FormatBool(*blockBlobClientCommitBlockListOptions.LegalHold)) - } - req.Raw().Header.Set("Accept", "application/xml") - return req, runtime.MarshalAsXML(req, blocks) -} - -// commitBlockListHandleResponse handles the CommitBlockList response. -func (client *blockBlobClient) commitBlockListHandleResponse(resp *http.Response) (blockBlobClientCommitBlockListResponse, error) { - result := blockBlobClientCommitBlockListResponse{RawResponse: resp} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return blockBlobClientCommitBlockListResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return blockBlobClientCommitBlockListResponse{}, err - } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return blockBlobClientCommitBlockListResponse{}, err - } - result.XMSContentCRC64 = xMSContentCRC64 - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return blockBlobClientCommitBlockListResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return blockBlobClientCommitBlockListResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted - } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val - } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val - } - return result, nil -} - -// GetBlockList - The Get Block List operation retrieves the list of blocks that have been uploaded as part of a block blob -// If the operation fails it returns an *azcore.ResponseError type. -// listType - Specifies whether to return the list of committed blocks, the list of uncommitted blocks, or both lists together. -// blockBlobClientGetBlockListOptions - blockBlobClientGetBlockListOptions contains the optional parameters for the blockBlobClient.GetBlockList -// method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *blockBlobClient) GetBlockList(ctx context.Context, listType BlockListType, blockBlobClientGetBlockListOptions *blockBlobClientGetBlockListOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (blockBlobClientGetBlockListResponse, error) { - req, err := client.getBlockListCreateRequest(ctx, listType, blockBlobClientGetBlockListOptions, leaseAccessConditions, modifiedAccessConditions) - if err != nil { - return blockBlobClientGetBlockListResponse{}, err - } - resp, err := client.pl.Do(req) - if err != nil { - return blockBlobClientGetBlockListResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return blockBlobClientGetBlockListResponse{}, runtime.NewResponseError(resp) - } - return client.getBlockListHandleResponse(resp) -} - -// getBlockListCreateRequest creates the GetBlockList request. -func (client *blockBlobClient) getBlockListCreateRequest(ctx context.Context, listType BlockListType, blockBlobClientGetBlockListOptions *blockBlobClientGetBlockListOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "blocklist") - if blockBlobClientGetBlockListOptions != nil && blockBlobClientGetBlockListOptions.Snapshot != nil { - reqQP.Set("snapshot", *blockBlobClientGetBlockListOptions.Snapshot) - } - reqQP.Set("blocklisttype", string(listType)) - if blockBlobClientGetBlockListOptions != nil && blockBlobClientGetBlockListOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blockBlobClientGetBlockListOptions.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) - } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blockBlobClientGetBlockListOptions != nil && blockBlobClientGetBlockListOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blockBlobClientGetBlockListOptions.RequestID) - } - req.Raw().Header.Set("Accept", "application/xml") - return req, nil -} - -// getBlockListHandleResponse handles the GetBlockList response. -func (client *blockBlobClient) getBlockListHandleResponse(resp *http.Response) (blockBlobClientGetBlockListResponse, error) { - result := blockBlobClientGetBlockListResponse{RawResponse: resp} - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return blockBlobClientGetBlockListResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val - } - if val := resp.Header.Get("Content-Type"); val != "" { - result.ContentType = &val - } - if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { - blobContentLength, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return blockBlobClientGetBlockListResponse{}, err - } - result.BlobContentLength = &blobContentLength - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return blockBlobClientGetBlockListResponse{}, err - } - result.Date = &date - } - if err := runtime.UnmarshalAsXML(resp, &result.BlockList); err != nil { - return blockBlobClientGetBlockListResponse{}, err - } - return result, nil -} - -// PutBlobFromURL - The Put Blob from URL operation creates a new Block Blob where the contents of the blob are read from -// a given URL. This API is supported beginning with the 2020-04-08 version. Partial updates are not -// supported with Put Blob from URL; the content of an existing blob is overwritten with the content of the new blob. To perform -// partial updates to a block blob’s contents using a source URL, use the Put -// Block from URL API in conjunction with Put Block List. -// If the operation fails it returns an *azcore.ResponseError type. -// contentLength - The length of the request. -// copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies -// a page blob snapshot. The value should be URL-encoded as it would appear in a request -// URI. The source blob must either be public or must be authenticated via a shared access signature. -// blockBlobClientPutBlobFromURLOptions - blockBlobClientPutBlobFromURLOptions contains the optional parameters for the blockBlobClient.PutBlobFromURL -// method. -// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the blobClient.SetHTTPHeaders method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the blobClient.StartCopyFromURL -// method. -func (client *blockBlobClient) PutBlobFromURL(ctx context.Context, contentLength int64, copySource string, blockBlobClientPutBlobFromURLOptions *blockBlobClientPutBlobFromURLOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (blockBlobClientPutBlobFromURLResponse, error) { - req, err := client.putBlobFromURLCreateRequest(ctx, contentLength, copySource, blockBlobClientPutBlobFromURLOptions, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions, sourceModifiedAccessConditions) - if err != nil { - return blockBlobClientPutBlobFromURLResponse{}, err - } - resp, err := client.pl.Do(req) - if err != nil { - return blockBlobClientPutBlobFromURLResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return blockBlobClientPutBlobFromURLResponse{}, runtime.NewResponseError(resp) - } - return client.putBlobFromURLHandleResponse(resp) -} - -// putBlobFromURLCreateRequest creates the PutBlobFromURL request. -func (client *blockBlobClient) putBlobFromURLCreateRequest(ctx context.Context, contentLength int64, copySource string, blockBlobClientPutBlobFromURLOptions *blockBlobClientPutBlobFromURLOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - if blockBlobClientPutBlobFromURLOptions != nil && blockBlobClientPutBlobFromURLOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blockBlobClientPutBlobFromURLOptions.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-blob-type", "BlockBlob") - if blockBlobClientPutBlobFromURLOptions != nil && blockBlobClientPutBlobFromURLOptions.TransactionalContentMD5 != nil { - req.Raw().Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(blockBlobClientPutBlobFromURLOptions.TransactionalContentMD5)) - } - req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { - req.Raw().Header.Set("x-ms-blob-content-type", *blobHTTPHeaders.BlobContentType) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { - req.Raw().Header.Set("x-ms-blob-content-encoding", *blobHTTPHeaders.BlobContentEncoding) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { - req.Raw().Header.Set("x-ms-blob-content-language", *blobHTTPHeaders.BlobContentLanguage) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { - req.Raw().Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { - req.Raw().Header.Set("x-ms-blob-cache-control", *blobHTTPHeaders.BlobCacheControl) - } - if blockBlobClientPutBlobFromURLOptions != nil && blockBlobClientPutBlobFromURLOptions.Metadata != nil { - for k, v := range blockBlobClientPutBlobFromURLOptions.Metadata { - req.Raw().Header.Set("x-ms-meta-"+k, v) - } - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { - req.Raw().Header.Set("x-ms-blob-content-disposition", *blobHTTPHeaders.BlobContentDisposition) - } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) - } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) - } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) - } - if blockBlobClientPutBlobFromURLOptions != nil && blockBlobClientPutBlobFromURLOptions.Tier != nil { - req.Raw().Header.Set("x-ms-access-tier", string(*blockBlobClientPutBlobFromURLOptions.Tier)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header.Set("x-ms-source-if-modified-since", sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)) - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header.Set("x-ms-source-if-unmodified-since", sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)) - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { - req.Raw().Header.Set("x-ms-source-if-match", *sourceModifiedAccessConditions.SourceIfMatch) - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { - req.Raw().Header.Set("x-ms-source-if-none-match", *sourceModifiedAccessConditions.SourceIfNoneMatch) - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfTags != nil { - req.Raw().Header.Set("x-ms-source-if-tags", *sourceModifiedAccessConditions.SourceIfTags) - } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blockBlobClientPutBlobFromURLOptions != nil && blockBlobClientPutBlobFromURLOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blockBlobClientPutBlobFromURLOptions.RequestID) - } - if blockBlobClientPutBlobFromURLOptions != nil && blockBlobClientPutBlobFromURLOptions.SourceContentMD5 != nil { - req.Raw().Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(blockBlobClientPutBlobFromURLOptions.SourceContentMD5)) - } - if blockBlobClientPutBlobFromURLOptions != nil && blockBlobClientPutBlobFromURLOptions.BlobTagsString != nil { - req.Raw().Header.Set("x-ms-tags", *blockBlobClientPutBlobFromURLOptions.BlobTagsString) - } - req.Raw().Header.Set("x-ms-copy-source", copySource) - if blockBlobClientPutBlobFromURLOptions != nil && blockBlobClientPutBlobFromURLOptions.CopySourceBlobProperties != nil { - req.Raw().Header.Set("x-ms-copy-source-blob-properties", strconv.FormatBool(*blockBlobClientPutBlobFromURLOptions.CopySourceBlobProperties)) - } - if blockBlobClientPutBlobFromURLOptions != nil && blockBlobClientPutBlobFromURLOptions.CopySourceAuthorization != nil { - req.Raw().Header.Set("x-ms-copy-source-authorization", *blockBlobClientPutBlobFromURLOptions.CopySourceAuthorization) - } - req.Raw().Header.Set("Accept", "application/xml") - return req, nil -} - -// putBlobFromURLHandleResponse handles the PutBlobFromURL response. -func (client *blockBlobClient) putBlobFromURLHandleResponse(resp *http.Response) (blockBlobClientPutBlobFromURLResponse, error) { - result := blockBlobClientPutBlobFromURLResponse{RawResponse: resp} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return blockBlobClientPutBlobFromURLResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return blockBlobClientPutBlobFromURLResponse{}, err - } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return blockBlobClientPutBlobFromURLResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return blockBlobClientPutBlobFromURLResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted - } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val - } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val - } - return result, nil -} - -// StageBlock - The Stage Block operation creates a new block to be committed as part of a blob -// If the operation fails it returns an *azcore.ResponseError type. -// blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal -// to 64 bytes in size. For a given blob, the length of the value specified for the blockid -// parameter must be the same size for each block. -// contentLength - The length of the request. -// body - Initial data -// blockBlobClientStageBlockOptions - blockBlobClientStageBlockOptions contains the optional parameters for the blockBlobClient.StageBlock -// method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. -func (client *blockBlobClient) StageBlock(ctx context.Context, blockID string, contentLength int64, body io.ReadSeekCloser, blockBlobClientStageBlockOptions *blockBlobClientStageBlockOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo) (blockBlobClientStageBlockResponse, error) { - req, err := client.stageBlockCreateRequest(ctx, blockID, contentLength, body, blockBlobClientStageBlockOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo) - if err != nil { - return blockBlobClientStageBlockResponse{}, err - } - resp, err := client.pl.Do(req) - if err != nil { - return blockBlobClientStageBlockResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return blockBlobClientStageBlockResponse{}, runtime.NewResponseError(resp) - } - return client.stageBlockHandleResponse(resp) -} - -// stageBlockCreateRequest creates the StageBlock request. -func (client *blockBlobClient) stageBlockCreateRequest(ctx context.Context, blockID string, contentLength int64, body io.ReadSeekCloser, blockBlobClientStageBlockOptions *blockBlobClientStageBlockOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "block") - reqQP.Set("blockid", blockID) - if blockBlobClientStageBlockOptions != nil && blockBlobClientStageBlockOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blockBlobClientStageBlockOptions.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) - if blockBlobClientStageBlockOptions != nil && blockBlobClientStageBlockOptions.TransactionalContentMD5 != nil { - req.Raw().Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(blockBlobClientStageBlockOptions.TransactionalContentMD5)) - } - if blockBlobClientStageBlockOptions != nil && blockBlobClientStageBlockOptions.TransactionalContentCRC64 != nil { - req.Raw().Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(blockBlobClientStageBlockOptions.TransactionalContentCRC64)) - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) - } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) - } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) - } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) - } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blockBlobClientStageBlockOptions != nil && blockBlobClientStageBlockOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blockBlobClientStageBlockOptions.RequestID) - } - req.Raw().Header.Set("Accept", "application/xml") - return req, req.SetBody(body, "application/octet-stream") -} - -// stageBlockHandleResponse handles the StageBlock response. -func (client *blockBlobClient) stageBlockHandleResponse(resp *http.Response) (blockBlobClientStageBlockResponse, error) { - result := blockBlobClientStageBlockResponse{RawResponse: resp} - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return blockBlobClientStageBlockResponse{}, err - } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return blockBlobClientStageBlockResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return blockBlobClientStageBlockResponse{}, err - } - result.XMSContentCRC64 = xMSContentCRC64 - } - if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return blockBlobClientStageBlockResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted - } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val - } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val - } - return result, nil -} - -// StageBlockFromURL - The Stage Block operation creates a new block to be committed as part of a blob where the contents -// are read from a URL. -// If the operation fails it returns an *azcore.ResponseError type. -// blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal -// to 64 bytes in size. For a given blob, the length of the value specified for the blockid -// parameter must be the same size for each block. -// contentLength - The length of the request. -// sourceURL - Specify a URL to the copy source. -// blockBlobClientStageBlockFromURLOptions - blockBlobClientStageBlockFromURLOptions contains the optional parameters for -// the blockBlobClient.StageBlockFromURL method. -// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the blobClient.StartCopyFromURL -// method. -func (client *blockBlobClient) StageBlockFromURL(ctx context.Context, blockID string, contentLength int64, sourceURL string, blockBlobClientStageBlockFromURLOptions *blockBlobClientStageBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (blockBlobClientStageBlockFromURLResponse, error) { - req, err := client.stageBlockFromURLCreateRequest(ctx, blockID, contentLength, sourceURL, blockBlobClientStageBlockFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions) - if err != nil { - return blockBlobClientStageBlockFromURLResponse{}, err - } - resp, err := client.pl.Do(req) - if err != nil { - return blockBlobClientStageBlockFromURLResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return blockBlobClientStageBlockFromURLResponse{}, runtime.NewResponseError(resp) - } - return client.stageBlockFromURLHandleResponse(resp) -} - -// stageBlockFromURLCreateRequest creates the StageBlockFromURL request. -func (client *blockBlobClient) stageBlockFromURLCreateRequest(ctx context.Context, blockID string, contentLength int64, sourceURL string, blockBlobClientStageBlockFromURLOptions *blockBlobClientStageBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "block") - reqQP.Set("blockid", blockID) - if blockBlobClientStageBlockFromURLOptions != nil && blockBlobClientStageBlockFromURLOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blockBlobClientStageBlockFromURLOptions.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) - req.Raw().Header.Set("x-ms-copy-source", sourceURL) - if blockBlobClientStageBlockFromURLOptions != nil && blockBlobClientStageBlockFromURLOptions.SourceRange != nil { - req.Raw().Header.Set("x-ms-source-range", *blockBlobClientStageBlockFromURLOptions.SourceRange) - } - if blockBlobClientStageBlockFromURLOptions != nil && blockBlobClientStageBlockFromURLOptions.SourceContentMD5 != nil { - req.Raw().Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(blockBlobClientStageBlockFromURLOptions.SourceContentMD5)) - } - if blockBlobClientStageBlockFromURLOptions != nil && blockBlobClientStageBlockFromURLOptions.SourceContentcrc64 != nil { - req.Raw().Header.Set("x-ms-source-content-crc64", base64.StdEncoding.EncodeToString(blockBlobClientStageBlockFromURLOptions.SourceContentcrc64)) - } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) - } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) - } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header.Set("x-ms-source-if-modified-since", sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)) - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header.Set("x-ms-source-if-unmodified-since", sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)) - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { - req.Raw().Header.Set("x-ms-source-if-match", *sourceModifiedAccessConditions.SourceIfMatch) - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { - req.Raw().Header.Set("x-ms-source-if-none-match", *sourceModifiedAccessConditions.SourceIfNoneMatch) - } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blockBlobClientStageBlockFromURLOptions != nil && blockBlobClientStageBlockFromURLOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blockBlobClientStageBlockFromURLOptions.RequestID) - } - if blockBlobClientStageBlockFromURLOptions != nil && blockBlobClientStageBlockFromURLOptions.CopySourceAuthorization != nil { - req.Raw().Header.Set("x-ms-copy-source-authorization", *blockBlobClientStageBlockFromURLOptions.CopySourceAuthorization) - } - req.Raw().Header.Set("Accept", "application/xml") - return req, nil -} - -// stageBlockFromURLHandleResponse handles the StageBlockFromURL response. -func (client *blockBlobClient) stageBlockFromURLHandleResponse(resp *http.Response) (blockBlobClientStageBlockFromURLResponse, error) { - result := blockBlobClientStageBlockFromURLResponse{RawResponse: resp} - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return blockBlobClientStageBlockFromURLResponse{}, err - } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return blockBlobClientStageBlockFromURLResponse{}, err - } - result.XMSContentCRC64 = xMSContentCRC64 - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return blockBlobClientStageBlockFromURLResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return blockBlobClientStageBlockFromURLResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted - } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val - } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val - } - return result, nil -} - -// Upload - The Upload Block Blob operation updates the content of an existing block blob. Updating an existing block blob -// overwrites any existing metadata on the blob. Partial updates are not supported with Put -// Blob; the content of the existing blob is overwritten with the content of the new blob. To perform a partial update of -// the content of a block blob, use the Put Block List operation. -// If the operation fails it returns an *azcore.ResponseError type. -// contentLength - The length of the request. -// body - Initial data -// blockBlobClientUploadOptions - blockBlobClientUploadOptions contains the optional parameters for the blockBlobClient.Upload -// method. -// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the blobClient.SetHTTPHeaders method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *blockBlobClient) Upload(ctx context.Context, contentLength int64, body io.ReadSeekCloser, blockBlobClientUploadOptions *blockBlobClientUploadOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (blockBlobClientUploadResponse, error) { - req, err := client.uploadCreateRequest(ctx, contentLength, body, blockBlobClientUploadOptions, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) - if err != nil { - return blockBlobClientUploadResponse{}, err - } - resp, err := client.pl.Do(req) - if err != nil { - return blockBlobClientUploadResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return blockBlobClientUploadResponse{}, runtime.NewResponseError(resp) - } - return client.uploadHandleResponse(resp) -} - -// uploadCreateRequest creates the Upload request. -func (client *blockBlobClient) uploadCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, blockBlobClientUploadOptions *blockBlobClientUploadOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - if blockBlobClientUploadOptions != nil && blockBlobClientUploadOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*blockBlobClientUploadOptions.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-blob-type", "BlockBlob") - if blockBlobClientUploadOptions != nil && blockBlobClientUploadOptions.TransactionalContentMD5 != nil { - req.Raw().Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(blockBlobClientUploadOptions.TransactionalContentMD5)) - } - req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { - req.Raw().Header.Set("x-ms-blob-content-type", *blobHTTPHeaders.BlobContentType) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { - req.Raw().Header.Set("x-ms-blob-content-encoding", *blobHTTPHeaders.BlobContentEncoding) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { - req.Raw().Header.Set("x-ms-blob-content-language", *blobHTTPHeaders.BlobContentLanguage) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { - req.Raw().Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { - req.Raw().Header.Set("x-ms-blob-cache-control", *blobHTTPHeaders.BlobCacheControl) - } - if blockBlobClientUploadOptions != nil && blockBlobClientUploadOptions.Metadata != nil { - for k, v := range blockBlobClientUploadOptions.Metadata { - req.Raw().Header.Set("x-ms-meta-"+k, v) - } - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { - req.Raw().Header.Set("x-ms-blob-content-disposition", *blobHTTPHeaders.BlobContentDisposition) - } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) - } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) - } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) - } - if blockBlobClientUploadOptions != nil && blockBlobClientUploadOptions.Tier != nil { - req.Raw().Header.Set("x-ms-access-tier", string(*blockBlobClientUploadOptions.Tier)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) - } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if blockBlobClientUploadOptions != nil && blockBlobClientUploadOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *blockBlobClientUploadOptions.RequestID) - } - if blockBlobClientUploadOptions != nil && blockBlobClientUploadOptions.BlobTagsString != nil { - req.Raw().Header.Set("x-ms-tags", *blockBlobClientUploadOptions.BlobTagsString) - } - if blockBlobClientUploadOptions != nil && blockBlobClientUploadOptions.ImmutabilityPolicyExpiry != nil { - req.Raw().Header.Set("x-ms-immutability-policy-until-date", blockBlobClientUploadOptions.ImmutabilityPolicyExpiry.Format(time.RFC1123)) - } - if blockBlobClientUploadOptions != nil && blockBlobClientUploadOptions.ImmutabilityPolicyMode != nil { - req.Raw().Header.Set("x-ms-immutability-policy-mode", string(*blockBlobClientUploadOptions.ImmutabilityPolicyMode)) - } - if blockBlobClientUploadOptions != nil && blockBlobClientUploadOptions.LegalHold != nil { - req.Raw().Header.Set("x-ms-legal-hold", strconv.FormatBool(*blockBlobClientUploadOptions.LegalHold)) - } - req.Raw().Header.Set("Accept", "application/xml") - return req, req.SetBody(body, "application/octet-stream") -} - -// uploadHandleResponse handles the Upload response. -func (client *blockBlobClient) uploadHandleResponse(resp *http.Response) (blockBlobClientUploadResponse, error) { - result := blockBlobClientUploadResponse{RawResponse: resp} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return blockBlobClientUploadResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return blockBlobClientUploadResponse{}, err - } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return blockBlobClientUploadResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return blockBlobClientUploadResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted - } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val - } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val - } - return result, nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_pageblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_pageblob_client.go deleted file mode 100644 index bad81201b..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_pageblob_client.go +++ /dev/null @@ -1,1247 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -package azblob - -import ( - "context" - "encoding/base64" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" - "io" - "net/http" - "strconv" - "time" -) - -type pageBlobClient struct { - endpoint string - pl runtime.Pipeline -} - -// newPageBlobClient creates a new instance of pageBlobClient with the specified values. -// endpoint - The URL of the service account, container, or blob that is the target of the desired operation. -// pl - the pipeline used for sending requests and handling responses. -func newPageBlobClient(endpoint string, pl runtime.Pipeline) *pageBlobClient { - client := &pageBlobClient{ - endpoint: endpoint, - pl: pl, - } - return client -} - -// ClearPages - The Clear Pages operation clears a set of pages from a page blob -// If the operation fails it returns an *azcore.ResponseError type. -// contentLength - The length of the request. -// pageBlobClientClearPagesOptions - pageBlobClientClearPagesOptions contains the optional parameters for the pageBlobClient.ClearPages -// method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. -// SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the pageBlobClient.UploadPages -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *pageBlobClient) ClearPages(ctx context.Context, contentLength int64, pageBlobClientClearPagesOptions *pageBlobClientClearPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (pageBlobClientClearPagesResponse, error) { - req, err := client.clearPagesCreateRequest(ctx, contentLength, pageBlobClientClearPagesOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) - if err != nil { - return pageBlobClientClearPagesResponse{}, err - } - resp, err := client.pl.Do(req) - if err != nil { - return pageBlobClientClearPagesResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return pageBlobClientClearPagesResponse{}, runtime.NewResponseError(resp) - } - return client.clearPagesHandleResponse(resp) -} - -// clearPagesCreateRequest creates the ClearPages request. -func (client *pageBlobClient) clearPagesCreateRequest(ctx context.Context, contentLength int64, pageBlobClientClearPagesOptions *pageBlobClientClearPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "page") - if pageBlobClientClearPagesOptions != nil && pageBlobClientClearPagesOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobClientClearPagesOptions.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-page-write", "clear") - req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) - if pageBlobClientClearPagesOptions != nil && pageBlobClientClearPagesOptions.Range != nil { - req.Raw().Header.Set("x-ms-range", *pageBlobClientClearPagesOptions.Range) - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) - } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) - } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) - } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) - } - if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo != nil { - req.Raw().Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo, 10)) - } - if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThan != nil { - req.Raw().Header.Set("x-ms-if-sequence-number-lt", strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThan, 10)) - } - if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { - req.Raw().Header.Set("x-ms-if-sequence-number-eq", strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) - } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if pageBlobClientClearPagesOptions != nil && pageBlobClientClearPagesOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *pageBlobClientClearPagesOptions.RequestID) - } - req.Raw().Header.Set("Accept", "application/xml") - return req, nil -} - -// clearPagesHandleResponse handles the ClearPages response. -func (client *pageBlobClient) clearPagesHandleResponse(resp *http.Response) (pageBlobClientClearPagesResponse, error) { - result := pageBlobClientClearPagesResponse{RawResponse: resp} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return pageBlobClientClearPagesResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return pageBlobClientClearPagesResponse{}, err - } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return pageBlobClientClearPagesResponse{}, err - } - result.XMSContentCRC64 = xMSContentCRC64 - } - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return pageBlobClientClearPagesResponse{}, err - } - result.BlobSequenceNumber = &blobSequenceNumber - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return pageBlobClientClearPagesResponse{}, err - } - result.Date = &date - } - return result, nil -} - -// CopyIncremental - The Copy Incremental operation copies a snapshot of the source page blob to a destination page blob. -// The snapshot is copied such that only the differential changes between the previously copied -// snapshot are transferred to the destination. The copied snapshots are complete copies of the original snapshot and can -// be read or copied from as usual. This API is supported since REST version -// 2016-05-31. -// If the operation fails it returns an *azcore.ResponseError type. -// copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies -// a page blob snapshot. The value should be URL-encoded as it would appear in a request -// URI. The source blob must either be public or must be authenticated via a shared access signature. -// pageBlobClientCopyIncrementalOptions - pageBlobClientCopyIncrementalOptions contains the optional parameters for the pageBlobClient.CopyIncremental -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *pageBlobClient) CopyIncremental(ctx context.Context, copySource string, pageBlobClientCopyIncrementalOptions *pageBlobClientCopyIncrementalOptions, modifiedAccessConditions *ModifiedAccessConditions) (pageBlobClientCopyIncrementalResponse, error) { - req, err := client.copyIncrementalCreateRequest(ctx, copySource, pageBlobClientCopyIncrementalOptions, modifiedAccessConditions) - if err != nil { - return pageBlobClientCopyIncrementalResponse{}, err - } - resp, err := client.pl.Do(req) - if err != nil { - return pageBlobClientCopyIncrementalResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return pageBlobClientCopyIncrementalResponse{}, runtime.NewResponseError(resp) - } - return client.copyIncrementalHandleResponse(resp) -} - -// copyIncrementalCreateRequest creates the CopyIncremental request. -func (client *pageBlobClient) copyIncrementalCreateRequest(ctx context.Context, copySource string, pageBlobClientCopyIncrementalOptions *pageBlobClientCopyIncrementalOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "incrementalcopy") - if pageBlobClientCopyIncrementalOptions != nil && pageBlobClientCopyIncrementalOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobClientCopyIncrementalOptions.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) - } - req.Raw().Header.Set("x-ms-copy-source", copySource) - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if pageBlobClientCopyIncrementalOptions != nil && pageBlobClientCopyIncrementalOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *pageBlobClientCopyIncrementalOptions.RequestID) - } - req.Raw().Header.Set("Accept", "application/xml") - return req, nil -} - -// copyIncrementalHandleResponse handles the CopyIncremental response. -func (client *pageBlobClient) copyIncrementalHandleResponse(resp *http.Response) (pageBlobClientCopyIncrementalResponse, error) { - result := pageBlobClientCopyIncrementalResponse{RawResponse: resp} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return pageBlobClientCopyIncrementalResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return pageBlobClientCopyIncrementalResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-copy-id"); val != "" { - result.CopyID = &val - } - if val := resp.Header.Get("x-ms-copy-status"); val != "" { - result.CopyStatus = (*CopyStatusType)(&val) - } - return result, nil -} - -// Create - The Create operation creates a new page blob. -// If the operation fails it returns an *azcore.ResponseError type. -// contentLength - The length of the request. -// blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned -// to a 512-byte boundary. -// pageBlobClientCreateOptions - pageBlobClientCreateOptions contains the optional parameters for the pageBlobClient.Create -// method. -// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the blobClient.SetHTTPHeaders method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *pageBlobClient) Create(ctx context.Context, contentLength int64, blobContentLength int64, pageBlobClientCreateOptions *pageBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (pageBlobClientCreateResponse, error) { - req, err := client.createCreateRequest(ctx, contentLength, blobContentLength, pageBlobClientCreateOptions, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) - if err != nil { - return pageBlobClientCreateResponse{}, err - } - resp, err := client.pl.Do(req) - if err != nil { - return pageBlobClientCreateResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return pageBlobClientCreateResponse{}, runtime.NewResponseError(resp) - } - return client.createHandleResponse(resp) -} - -// createCreateRequest creates the Create request. -func (client *pageBlobClient) createCreateRequest(ctx context.Context, contentLength int64, blobContentLength int64, pageBlobClientCreateOptions *pageBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - if pageBlobClientCreateOptions != nil && pageBlobClientCreateOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobClientCreateOptions.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-blob-type", "PageBlob") - req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) - if pageBlobClientCreateOptions != nil && pageBlobClientCreateOptions.Tier != nil { - req.Raw().Header.Set("x-ms-access-tier", string(*pageBlobClientCreateOptions.Tier)) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { - req.Raw().Header.Set("x-ms-blob-content-type", *blobHTTPHeaders.BlobContentType) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { - req.Raw().Header.Set("x-ms-blob-content-encoding", *blobHTTPHeaders.BlobContentEncoding) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { - req.Raw().Header.Set("x-ms-blob-content-language", *blobHTTPHeaders.BlobContentLanguage) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { - req.Raw().Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { - req.Raw().Header.Set("x-ms-blob-cache-control", *blobHTTPHeaders.BlobCacheControl) - } - if pageBlobClientCreateOptions != nil && pageBlobClientCreateOptions.Metadata != nil { - for k, v := range pageBlobClientCreateOptions.Metadata { - req.Raw().Header.Set("x-ms-meta-"+k, v) - } - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { - req.Raw().Header.Set("x-ms-blob-content-disposition", *blobHTTPHeaders.BlobContentDisposition) - } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) - } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) - } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) - } - req.Raw().Header.Set("x-ms-blob-content-length", strconv.FormatInt(blobContentLength, 10)) - if pageBlobClientCreateOptions != nil && pageBlobClientCreateOptions.BlobSequenceNumber != nil { - req.Raw().Header.Set("x-ms-blob-sequence-number", strconv.FormatInt(*pageBlobClientCreateOptions.BlobSequenceNumber, 10)) - } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if pageBlobClientCreateOptions != nil && pageBlobClientCreateOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *pageBlobClientCreateOptions.RequestID) - } - if pageBlobClientCreateOptions != nil && pageBlobClientCreateOptions.BlobTagsString != nil { - req.Raw().Header.Set("x-ms-tags", *pageBlobClientCreateOptions.BlobTagsString) - } - if pageBlobClientCreateOptions != nil && pageBlobClientCreateOptions.ImmutabilityPolicyExpiry != nil { - req.Raw().Header.Set("x-ms-immutability-policy-until-date", pageBlobClientCreateOptions.ImmutabilityPolicyExpiry.Format(time.RFC1123)) - } - if pageBlobClientCreateOptions != nil && pageBlobClientCreateOptions.ImmutabilityPolicyMode != nil { - req.Raw().Header.Set("x-ms-immutability-policy-mode", string(*pageBlobClientCreateOptions.ImmutabilityPolicyMode)) - } - if pageBlobClientCreateOptions != nil && pageBlobClientCreateOptions.LegalHold != nil { - req.Raw().Header.Set("x-ms-legal-hold", strconv.FormatBool(*pageBlobClientCreateOptions.LegalHold)) - } - req.Raw().Header.Set("Accept", "application/xml") - return req, nil -} - -// createHandleResponse handles the Create response. -func (client *pageBlobClient) createHandleResponse(resp *http.Response) (pageBlobClientCreateResponse, error) { - result := pageBlobClientCreateResponse{RawResponse: resp} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return pageBlobClientCreateResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return pageBlobClientCreateResponse{}, err - } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return pageBlobClientCreateResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return pageBlobClientCreateResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted - } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val - } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val - } - return result, nil -} - -// GetPageRanges - The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot of a page -// blob -// If the operation fails it returns an *azcore.ResponseError type. -// pageBlobClientGetPageRangesOptions - pageBlobClientGetPageRangesOptions contains the optional parameters for the pageBlobClient.GetPageRanges -// method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *pageBlobClient) GetPageRanges(pageBlobClientGetPageRangesOptions *pageBlobClientGetPageRangesOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) *pageBlobClientGetPageRangesPager { - return &pageBlobClientGetPageRangesPager{ - client: client, - requester: func(ctx context.Context) (*policy.Request, error) { - return client.getPageRangesCreateRequest(ctx, pageBlobClientGetPageRangesOptions, leaseAccessConditions, modifiedAccessConditions) - }, - advancer: func(ctx context.Context, resp pageBlobClientGetPageRangesResponse) (*policy.Request, error) { - return runtime.NewRequest(ctx, http.MethodGet, *resp.PageList.NextMarker) - }, - } -} - -// getPageRangesCreateRequest creates the GetPageRanges request. -func (client *pageBlobClient) getPageRangesCreateRequest(ctx context.Context, pageBlobClientGetPageRangesOptions *pageBlobClientGetPageRangesOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "pagelist") - if pageBlobClientGetPageRangesOptions != nil && pageBlobClientGetPageRangesOptions.Snapshot != nil { - reqQP.Set("snapshot", *pageBlobClientGetPageRangesOptions.Snapshot) - } - if pageBlobClientGetPageRangesOptions != nil && pageBlobClientGetPageRangesOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobClientGetPageRangesOptions.Timeout), 10)) - } - if pageBlobClientGetPageRangesOptions != nil && pageBlobClientGetPageRangesOptions.Marker != nil { - reqQP.Set("marker", *pageBlobClientGetPageRangesOptions.Marker) - } - if pageBlobClientGetPageRangesOptions != nil && pageBlobClientGetPageRangesOptions.Maxresults != nil { - reqQP.Set("maxresults", strconv.FormatInt(int64(*pageBlobClientGetPageRangesOptions.Maxresults), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - if pageBlobClientGetPageRangesOptions != nil && pageBlobClientGetPageRangesOptions.Range != nil { - req.Raw().Header.Set("x-ms-range", *pageBlobClientGetPageRangesOptions.Range) - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) - } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if pageBlobClientGetPageRangesOptions != nil && pageBlobClientGetPageRangesOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *pageBlobClientGetPageRangesOptions.RequestID) - } - req.Raw().Header.Set("Accept", "application/xml") - return req, nil -} - -// getPageRangesHandleResponse handles the GetPageRanges response. -func (client *pageBlobClient) getPageRangesHandleResponse(resp *http.Response) (pageBlobClientGetPageRangesResponse, error) { - result := pageBlobClientGetPageRangesResponse{RawResponse: resp} - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return pageBlobClientGetPageRangesResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val - } - if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { - blobContentLength, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return pageBlobClientGetPageRangesResponse{}, err - } - result.BlobContentLength = &blobContentLength - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return pageBlobClientGetPageRangesResponse{}, err - } - result.Date = &date - } - if err := runtime.UnmarshalAsXML(resp, &result.PageList); err != nil { - return pageBlobClientGetPageRangesResponse{}, err - } - return result, nil -} - -// GetPageRangesDiff - The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that were -// changed between target blob and previous snapshot. -// If the operation fails it returns an *azcore.ResponseError type. -// pageBlobClientGetPageRangesDiffOptions - pageBlobClientGetPageRangesDiffOptions contains the optional parameters for the -// pageBlobClient.GetPageRangesDiff method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *pageBlobClient) GetPageRangesDiff(pageBlobClientGetPageRangesDiffOptions *pageBlobClientGetPageRangesDiffOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) *pageBlobClientGetPageRangesDiffPager { - return &pageBlobClientGetPageRangesDiffPager{ - client: client, - requester: func(ctx context.Context) (*policy.Request, error) { - return client.getPageRangesDiffCreateRequest(ctx, pageBlobClientGetPageRangesDiffOptions, leaseAccessConditions, modifiedAccessConditions) - }, - advancer: func(ctx context.Context, resp pageBlobClientGetPageRangesDiffResponse) (*policy.Request, error) { - return runtime.NewRequest(ctx, http.MethodGet, *resp.PageList.NextMarker) - }, - } -} - -// getPageRangesDiffCreateRequest creates the GetPageRangesDiff request. -func (client *pageBlobClient) getPageRangesDiffCreateRequest(ctx context.Context, pageBlobClientGetPageRangesDiffOptions *pageBlobClientGetPageRangesDiffOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "pagelist") - if pageBlobClientGetPageRangesDiffOptions != nil && pageBlobClientGetPageRangesDiffOptions.Snapshot != nil { - reqQP.Set("snapshot", *pageBlobClientGetPageRangesDiffOptions.Snapshot) - } - if pageBlobClientGetPageRangesDiffOptions != nil && pageBlobClientGetPageRangesDiffOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobClientGetPageRangesDiffOptions.Timeout), 10)) - } - if pageBlobClientGetPageRangesDiffOptions != nil && pageBlobClientGetPageRangesDiffOptions.Prevsnapshot != nil { - reqQP.Set("prevsnapshot", *pageBlobClientGetPageRangesDiffOptions.Prevsnapshot) - } - if pageBlobClientGetPageRangesDiffOptions != nil && pageBlobClientGetPageRangesDiffOptions.Marker != nil { - reqQP.Set("marker", *pageBlobClientGetPageRangesDiffOptions.Marker) - } - if pageBlobClientGetPageRangesDiffOptions != nil && pageBlobClientGetPageRangesDiffOptions.Maxresults != nil { - reqQP.Set("maxresults", strconv.FormatInt(int64(*pageBlobClientGetPageRangesDiffOptions.Maxresults), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - if pageBlobClientGetPageRangesDiffOptions != nil && pageBlobClientGetPageRangesDiffOptions.PrevSnapshotURL != nil { - req.Raw().Header.Set("x-ms-previous-snapshot-url", *pageBlobClientGetPageRangesDiffOptions.PrevSnapshotURL) - } - if pageBlobClientGetPageRangesDiffOptions != nil && pageBlobClientGetPageRangesDiffOptions.Range != nil { - req.Raw().Header.Set("x-ms-range", *pageBlobClientGetPageRangesDiffOptions.Range) - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) - } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if pageBlobClientGetPageRangesDiffOptions != nil && pageBlobClientGetPageRangesDiffOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *pageBlobClientGetPageRangesDiffOptions.RequestID) - } - req.Raw().Header.Set("Accept", "application/xml") - return req, nil -} - -// getPageRangesDiffHandleResponse handles the GetPageRangesDiff response. -func (client *pageBlobClient) getPageRangesDiffHandleResponse(resp *http.Response) (pageBlobClientGetPageRangesDiffResponse, error) { - result := pageBlobClientGetPageRangesDiffResponse{RawResponse: resp} - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return pageBlobClientGetPageRangesDiffResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val - } - if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { - blobContentLength, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return pageBlobClientGetPageRangesDiffResponse{}, err - } - result.BlobContentLength = &blobContentLength - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return pageBlobClientGetPageRangesDiffResponse{}, err - } - result.Date = &date - } - if err := runtime.UnmarshalAsXML(resp, &result.PageList); err != nil { - return pageBlobClientGetPageRangesDiffResponse{}, err - } - return result, nil -} - -// Resize - Resize the Blob -// If the operation fails it returns an *azcore.ResponseError type. -// blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned -// to a 512-byte boundary. -// pageBlobClientResizeOptions - pageBlobClientResizeOptions contains the optional parameters for the pageBlobClient.Resize -// method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *pageBlobClient) Resize(ctx context.Context, blobContentLength int64, pageBlobClientResizeOptions *pageBlobClientResizeOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (pageBlobClientResizeResponse, error) { - req, err := client.resizeCreateRequest(ctx, blobContentLength, pageBlobClientResizeOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) - if err != nil { - return pageBlobClientResizeResponse{}, err - } - resp, err := client.pl.Do(req) - if err != nil { - return pageBlobClientResizeResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return pageBlobClientResizeResponse{}, runtime.NewResponseError(resp) - } - return client.resizeHandleResponse(resp) -} - -// resizeCreateRequest creates the Resize request. -func (client *pageBlobClient) resizeCreateRequest(ctx context.Context, blobContentLength int64, pageBlobClientResizeOptions *pageBlobClientResizeOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "properties") - if pageBlobClientResizeOptions != nil && pageBlobClientResizeOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobClientResizeOptions.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) - } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) - } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) - } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) - } - req.Raw().Header.Set("x-ms-blob-content-length", strconv.FormatInt(blobContentLength, 10)) - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if pageBlobClientResizeOptions != nil && pageBlobClientResizeOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *pageBlobClientResizeOptions.RequestID) - } - req.Raw().Header.Set("Accept", "application/xml") - return req, nil -} - -// resizeHandleResponse handles the Resize response. -func (client *pageBlobClient) resizeHandleResponse(resp *http.Response) (pageBlobClientResizeResponse, error) { - result := pageBlobClientResizeResponse{RawResponse: resp} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return pageBlobClientResizeResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return pageBlobClientResizeResponse{}, err - } - result.BlobSequenceNumber = &blobSequenceNumber - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return pageBlobClientResizeResponse{}, err - } - result.Date = &date - } - return result, nil -} - -// UpdateSequenceNumber - Update the sequence number of the blob -// If the operation fails it returns an *azcore.ResponseError type. -// sequenceNumberAction - Required if the x-ms-blob-sequence-number header is set for the request. This property applies to -// page blobs only. This property indicates how the service should modify the blob's sequence number -// pageBlobClientUpdateSequenceNumberOptions - pageBlobClientUpdateSequenceNumberOptions contains the optional parameters -// for the pageBlobClient.UpdateSequenceNumber method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *pageBlobClient) UpdateSequenceNumber(ctx context.Context, sequenceNumberAction SequenceNumberActionType, pageBlobClientUpdateSequenceNumberOptions *pageBlobClientUpdateSequenceNumberOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (pageBlobClientUpdateSequenceNumberResponse, error) { - req, err := client.updateSequenceNumberCreateRequest(ctx, sequenceNumberAction, pageBlobClientUpdateSequenceNumberOptions, leaseAccessConditions, modifiedAccessConditions) - if err != nil { - return pageBlobClientUpdateSequenceNumberResponse{}, err - } - resp, err := client.pl.Do(req) - if err != nil { - return pageBlobClientUpdateSequenceNumberResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return pageBlobClientUpdateSequenceNumberResponse{}, runtime.NewResponseError(resp) - } - return client.updateSequenceNumberHandleResponse(resp) -} - -// updateSequenceNumberCreateRequest creates the UpdateSequenceNumber request. -func (client *pageBlobClient) updateSequenceNumberCreateRequest(ctx context.Context, sequenceNumberAction SequenceNumberActionType, pageBlobClientUpdateSequenceNumberOptions *pageBlobClientUpdateSequenceNumberOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "properties") - if pageBlobClientUpdateSequenceNumberOptions != nil && pageBlobClientUpdateSequenceNumberOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobClientUpdateSequenceNumberOptions.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) - } - req.Raw().Header.Set("x-ms-sequence-number-action", string(sequenceNumberAction)) - if pageBlobClientUpdateSequenceNumberOptions != nil && pageBlobClientUpdateSequenceNumberOptions.BlobSequenceNumber != nil { - req.Raw().Header.Set("x-ms-blob-sequence-number", strconv.FormatInt(*pageBlobClientUpdateSequenceNumberOptions.BlobSequenceNumber, 10)) - } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if pageBlobClientUpdateSequenceNumberOptions != nil && pageBlobClientUpdateSequenceNumberOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *pageBlobClientUpdateSequenceNumberOptions.RequestID) - } - req.Raw().Header.Set("Accept", "application/xml") - return req, nil -} - -// updateSequenceNumberHandleResponse handles the UpdateSequenceNumber response. -func (client *pageBlobClient) updateSequenceNumberHandleResponse(resp *http.Response) (pageBlobClientUpdateSequenceNumberResponse, error) { - result := pageBlobClientUpdateSequenceNumberResponse{RawResponse: resp} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return pageBlobClientUpdateSequenceNumberResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return pageBlobClientUpdateSequenceNumberResponse{}, err - } - result.BlobSequenceNumber = &blobSequenceNumber - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return pageBlobClientUpdateSequenceNumberResponse{}, err - } - result.Date = &date - } - return result, nil -} - -// UploadPages - The Upload Pages operation writes a range of pages to a page blob -// If the operation fails it returns an *azcore.ResponseError type. -// contentLength - The length of the request. -// body - Initial data -// pageBlobClientUploadPagesOptions - pageBlobClientUploadPagesOptions contains the optional parameters for the pageBlobClient.UploadPages -// method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. -// SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the pageBlobClient.UploadPages -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -func (client *pageBlobClient) UploadPages(ctx context.Context, contentLength int64, body io.ReadSeekCloser, pageBlobClientUploadPagesOptions *pageBlobClientUploadPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (pageBlobClientUploadPagesResponse, error) { - req, err := client.uploadPagesCreateRequest(ctx, contentLength, body, pageBlobClientUploadPagesOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) - if err != nil { - return pageBlobClientUploadPagesResponse{}, err - } - resp, err := client.pl.Do(req) - if err != nil { - return pageBlobClientUploadPagesResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return pageBlobClientUploadPagesResponse{}, runtime.NewResponseError(resp) - } - return client.uploadPagesHandleResponse(resp) -} - -// uploadPagesCreateRequest creates the UploadPages request. -func (client *pageBlobClient) uploadPagesCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, pageBlobClientUploadPagesOptions *pageBlobClientUploadPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "page") - if pageBlobClientUploadPagesOptions != nil && pageBlobClientUploadPagesOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobClientUploadPagesOptions.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-page-write", "update") - req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) - if pageBlobClientUploadPagesOptions != nil && pageBlobClientUploadPagesOptions.TransactionalContentMD5 != nil { - req.Raw().Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(pageBlobClientUploadPagesOptions.TransactionalContentMD5)) - } - if pageBlobClientUploadPagesOptions != nil && pageBlobClientUploadPagesOptions.TransactionalContentCRC64 != nil { - req.Raw().Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(pageBlobClientUploadPagesOptions.TransactionalContentCRC64)) - } - if pageBlobClientUploadPagesOptions != nil && pageBlobClientUploadPagesOptions.Range != nil { - req.Raw().Header.Set("x-ms-range", *pageBlobClientUploadPagesOptions.Range) - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) - } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) - } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) - } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) - } - if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo != nil { - req.Raw().Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo, 10)) - } - if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThan != nil { - req.Raw().Header.Set("x-ms-if-sequence-number-lt", strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThan, 10)) - } - if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { - req.Raw().Header.Set("x-ms-if-sequence-number-eq", strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) - } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if pageBlobClientUploadPagesOptions != nil && pageBlobClientUploadPagesOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *pageBlobClientUploadPagesOptions.RequestID) - } - req.Raw().Header.Set("Accept", "application/xml") - return req, req.SetBody(body, "application/octet-stream") -} - -// uploadPagesHandleResponse handles the UploadPages response. -func (client *pageBlobClient) uploadPagesHandleResponse(resp *http.Response) (pageBlobClientUploadPagesResponse, error) { - result := pageBlobClientUploadPagesResponse{RawResponse: resp} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return pageBlobClientUploadPagesResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return pageBlobClientUploadPagesResponse{}, err - } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return pageBlobClientUploadPagesResponse{}, err - } - result.XMSContentCRC64 = xMSContentCRC64 - } - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return pageBlobClientUploadPagesResponse{}, err - } - result.BlobSequenceNumber = &blobSequenceNumber - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return pageBlobClientUploadPagesResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return pageBlobClientUploadPagesResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted - } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val - } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val - } - return result, nil -} - -// UploadPagesFromURL - The Upload Pages operation writes a range of pages to a page blob where the contents are read from -// a URL -// If the operation fails it returns an *azcore.ResponseError type. -// sourceURL - Specify a URL to the copy source. -// sourceRange - Bytes of source data in the specified range. The length of this range should match the ContentLength header -// and x-ms-range/Range destination range header. -// contentLength - The length of the request. -// rangeParam - The range of bytes to which the source range would be written. The range should be 512 aligned and range-end -// is required. -// pageBlobClientUploadPagesFromURLOptions - pageBlobClientUploadPagesFromURLOptions contains the optional parameters for -// the pageBlobClient.UploadPagesFromURL method. -// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. -// SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the pageBlobClient.UploadPages -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. -// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the blobClient.StartCopyFromURL -// method. -func (client *pageBlobClient) UploadPagesFromURL(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParam string, pageBlobClientUploadPagesFromURLOptions *pageBlobClientUploadPagesFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (pageBlobClientUploadPagesFromURLResponse, error) { - req, err := client.uploadPagesFromURLCreateRequest(ctx, sourceURL, sourceRange, contentLength, rangeParam, pageBlobClientUploadPagesFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, sequenceNumberAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) - if err != nil { - return pageBlobClientUploadPagesFromURLResponse{}, err - } - resp, err := client.pl.Do(req) - if err != nil { - return pageBlobClientUploadPagesFromURLResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return pageBlobClientUploadPagesFromURLResponse{}, runtime.NewResponseError(resp) - } - return client.uploadPagesFromURLHandleResponse(resp) -} - -// uploadPagesFromURLCreateRequest creates the UploadPagesFromURL request. -func (client *pageBlobClient) uploadPagesFromURLCreateRequest(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParam string, pageBlobClientUploadPagesFromURLOptions *pageBlobClientUploadPagesFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "page") - if pageBlobClientUploadPagesFromURLOptions != nil && pageBlobClientUploadPagesFromURLOptions.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobClientUploadPagesFromURLOptions.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header.Set("x-ms-page-write", "update") - req.Raw().Header.Set("x-ms-copy-source", sourceURL) - req.Raw().Header.Set("x-ms-source-range", sourceRange) - if pageBlobClientUploadPagesFromURLOptions != nil && pageBlobClientUploadPagesFromURLOptions.SourceContentMD5 != nil { - req.Raw().Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(pageBlobClientUploadPagesFromURLOptions.SourceContentMD5)) - } - if pageBlobClientUploadPagesFromURLOptions != nil && pageBlobClientUploadPagesFromURLOptions.SourceContentcrc64 != nil { - req.Raw().Header.Set("x-ms-source-content-crc64", base64.StdEncoding.EncodeToString(pageBlobClientUploadPagesFromURLOptions.SourceContentcrc64)) - } - req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) - req.Raw().Header.Set("x-ms-range", rangeParam) - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) - } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) - } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) - } - if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo != nil { - req.Raw().Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo, 10)) - } - if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThan != nil { - req.Raw().Header.Set("x-ms-if-sequence-number-lt", strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThan, 10)) - } - if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { - req.Raw().Header.Set("x-ms-if-sequence-number-eq", strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header.Set("x-ms-source-if-modified-since", sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)) - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header.Set("x-ms-source-if-unmodified-since", sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)) - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { - req.Raw().Header.Set("x-ms-source-if-match", *sourceModifiedAccessConditions.SourceIfMatch) - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { - req.Raw().Header.Set("x-ms-source-if-none-match", *sourceModifiedAccessConditions.SourceIfNoneMatch) - } - req.Raw().Header.Set("x-ms-version", "2020-10-02") - if pageBlobClientUploadPagesFromURLOptions != nil && pageBlobClientUploadPagesFromURLOptions.RequestID != nil { - req.Raw().Header.Set("x-ms-client-request-id", *pageBlobClientUploadPagesFromURLOptions.RequestID) - } - if pageBlobClientUploadPagesFromURLOptions != nil && pageBlobClientUploadPagesFromURLOptions.CopySourceAuthorization != nil { - req.Raw().Header.Set("x-ms-copy-source-authorization", *pageBlobClientUploadPagesFromURLOptions.CopySourceAuthorization) - } - req.Raw().Header.Set("Accept", "application/xml") - return req, nil -} - -// uploadPagesFromURLHandleResponse handles the UploadPagesFromURL response. -func (client *pageBlobClient) uploadPagesFromURLHandleResponse(resp *http.Response) (pageBlobClientUploadPagesFromURLResponse, error) { - result := pageBlobClientUploadPagesFromURLResponse{RawResponse: resp} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = &val - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return pageBlobClientUploadPagesFromURLResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return pageBlobClientUploadPagesFromURLResponse{}, err - } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return pageBlobClientUploadPagesFromURLResponse{}, err - } - result.XMSContentCRC64 = xMSContentCRC64 - } - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return pageBlobClientUploadPagesFromURLResponse{}, err - } - result.BlobSequenceNumber = &blobSequenceNumber - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return pageBlobClientUploadPagesFromURLResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return pageBlobClientUploadPagesFromURLResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted - } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val - } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val - } - return result, nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_pagers.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_pagers.go deleted file mode 100644 index 9f0cc4629..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_pagers.go +++ /dev/null @@ -1,287 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -package azblob - -import ( - "context" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" - "net/http" - "reflect" -) - -// containerClientListBlobFlatSegmentPager provides operations for iterating over paged responses. -type containerClientListBlobFlatSegmentPager struct { - client *containerClient - current containerClientListBlobFlatSegmentResponse - err error - requester func(context.Context) (*policy.Request, error) - advancer func(context.Context, containerClientListBlobFlatSegmentResponse) (*policy.Request, error) -} - -// Err returns the last error encountered while paging. -func (p *containerClientListBlobFlatSegmentPager) Err() error { - return p.err -} - -// NextPage returns true if the pager advanced to the next page. -// Returns false if there are no more pages or an error occurred. -func (p *containerClientListBlobFlatSegmentPager) NextPage(ctx context.Context) bool { - var req *policy.Request - var err error - if !reflect.ValueOf(p.current).IsZero() { - if p.current.ListBlobsFlatSegmentResponse.NextMarker == nil || len(*p.current.ListBlobsFlatSegmentResponse.NextMarker) == 0 { - return false - } - req, err = p.advancer(ctx, p.current) - } else { - req, err = p.requester(ctx) - } - if err != nil { - p.err = err - return false - } - resp, err := p.client.pl.Do(req) - if err != nil { - p.err = err - return false - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - p.err = runtime.NewResponseError(resp) - return false - } - result, err := p.client.listBlobFlatSegmentHandleResponse(resp) - if err != nil { - p.err = err - return false - } - p.current = result - return true -} - -// PageResponse returns the current containerClientListBlobFlatSegmentResponse page. -func (p *containerClientListBlobFlatSegmentPager) PageResponse() containerClientListBlobFlatSegmentResponse { - return p.current -} - -// containerClientListBlobHierarchySegmentPager provides operations for iterating over paged responses. -type containerClientListBlobHierarchySegmentPager struct { - client *containerClient - current containerClientListBlobHierarchySegmentResponse - err error - requester func(context.Context) (*policy.Request, error) - advancer func(context.Context, containerClientListBlobHierarchySegmentResponse) (*policy.Request, error) -} - -// Err returns the last error encountered while paging. -func (p *containerClientListBlobHierarchySegmentPager) Err() error { - return p.err -} - -// NextPage returns true if the pager advanced to the next page. -// Returns false if there are no more pages or an error occurred. -func (p *containerClientListBlobHierarchySegmentPager) NextPage(ctx context.Context) bool { - var req *policy.Request - var err error - if !reflect.ValueOf(p.current).IsZero() { - if p.current.ListBlobsHierarchySegmentResponse.NextMarker == nil || len(*p.current.ListBlobsHierarchySegmentResponse.NextMarker) == 0 { - return false - } - req, err = p.advancer(ctx, p.current) - } else { - req, err = p.requester(ctx) - } - if err != nil { - p.err = err - return false - } - resp, err := p.client.pl.Do(req) - if err != nil { - p.err = err - return false - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - p.err = runtime.NewResponseError(resp) - return false - } - result, err := p.client.listBlobHierarchySegmentHandleResponse(resp) - if err != nil { - p.err = err - return false - } - p.current = result - return true -} - -// PageResponse returns the current containerClientListBlobHierarchySegmentResponse page. -func (p *containerClientListBlobHierarchySegmentPager) PageResponse() containerClientListBlobHierarchySegmentResponse { - return p.current -} - -// pageBlobClientGetPageRangesDiffPager provides operations for iterating over paged responses. -type pageBlobClientGetPageRangesDiffPager struct { - client *pageBlobClient - current pageBlobClientGetPageRangesDiffResponse - err error - requester func(context.Context) (*policy.Request, error) - advancer func(context.Context, pageBlobClientGetPageRangesDiffResponse) (*policy.Request, error) -} - -// Err returns the last error encountered while paging. -func (p *pageBlobClientGetPageRangesDiffPager) Err() error { - return p.err -} - -// NextPage returns true if the pager advanced to the next page. -// Returns false if there are no more pages or an error occurred. -func (p *pageBlobClientGetPageRangesDiffPager) NextPage(ctx context.Context) bool { - var req *policy.Request - var err error - if !reflect.ValueOf(p.current).IsZero() { - if p.current.PageList.NextMarker == nil || len(*p.current.PageList.NextMarker) == 0 { - return false - } - req, err = p.advancer(ctx, p.current) - } else { - req, err = p.requester(ctx) - } - if err != nil { - p.err = err - return false - } - resp, err := p.client.pl.Do(req) - if err != nil { - p.err = err - return false - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - p.err = runtime.NewResponseError(resp) - return false - } - result, err := p.client.getPageRangesDiffHandleResponse(resp) - if err != nil { - p.err = err - return false - } - p.current = result - return true -} - -// PageResponse returns the current pageBlobClientGetPageRangesDiffResponse page. -func (p *pageBlobClientGetPageRangesDiffPager) PageResponse() pageBlobClientGetPageRangesDiffResponse { - return p.current -} - -// pageBlobClientGetPageRangesPager provides operations for iterating over paged responses. -type pageBlobClientGetPageRangesPager struct { - client *pageBlobClient - current pageBlobClientGetPageRangesResponse - err error - requester func(context.Context) (*policy.Request, error) - advancer func(context.Context, pageBlobClientGetPageRangesResponse) (*policy.Request, error) -} - -// Err returns the last error encountered while paging. -func (p *pageBlobClientGetPageRangesPager) Err() error { - return p.err -} - -// NextPage returns true if the pager advanced to the next page. -// Returns false if there are no more pages or an error occurred. -func (p *pageBlobClientGetPageRangesPager) NextPage(ctx context.Context) bool { - var req *policy.Request - var err error - if !reflect.ValueOf(p.current).IsZero() { - if p.current.PageList.NextMarker == nil || len(*p.current.PageList.NextMarker) == 0 { - return false - } - req, err = p.advancer(ctx, p.current) - } else { - req, err = p.requester(ctx) - } - if err != nil { - p.err = err - return false - } - resp, err := p.client.pl.Do(req) - if err != nil { - p.err = err - return false - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - p.err = runtime.NewResponseError(resp) - return false - } - result, err := p.client.getPageRangesHandleResponse(resp) - if err != nil { - p.err = err - return false - } - p.current = result - return true -} - -// PageResponse returns the current pageBlobClientGetPageRangesResponse page. -func (p *pageBlobClientGetPageRangesPager) PageResponse() pageBlobClientGetPageRangesResponse { - return p.current -} - -// serviceClientListContainersSegmentPager provides operations for iterating over paged responses. -type serviceClientListContainersSegmentPager struct { - client *serviceClient - current serviceClientListContainersSegmentResponse - err error - requester func(context.Context) (*policy.Request, error) - advancer func(context.Context, serviceClientListContainersSegmentResponse) (*policy.Request, error) -} - -// Err returns the last error encountered while paging. -func (p *serviceClientListContainersSegmentPager) Err() error { - return p.err -} - -// NextPage returns true if the pager advanced to the next page. -// Returns false if there are no more pages or an error occurred. -func (p *serviceClientListContainersSegmentPager) NextPage(ctx context.Context) bool { - var req *policy.Request - var err error - if !reflect.ValueOf(p.current).IsZero() { - if p.current.ListContainersSegmentResponse.NextMarker == nil || len(*p.current.ListContainersSegmentResponse.NextMarker) == 0 { - return false - } - req, err = p.advancer(ctx, p.current) - } else { - req, err = p.requester(ctx) - } - if err != nil { - p.err = err - return false - } - resp, err := p.client.pl.Do(req) - if err != nil { - p.err = err - return false - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - p.err = runtime.NewResponseError(resp) - return false - } - result, err := p.client.listContainersSegmentHandleResponse(resp) - if err != nil { - p.err = err - return false - } - p.current = result - return true -} - -// PageResponse returns the current serviceClientListContainersSegmentResponse page. -func (p *serviceClientListContainersSegmentPager) PageResponse() serviceClientListContainersSegmentResponse { - return p.current -} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go index 1c2c6ae24..11a33de73 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go @@ -12,7 +12,9 @@ package confidential import ( "context" "crypto" + "crypto/rsa" "crypto/x509" + "encoding/base64" "encoding/pem" "errors" "fmt" @@ -20,6 +22,7 @@ import ( "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" @@ -60,12 +63,9 @@ type AuthResult = base.AuthResult type Account = shared.Account // CertFromPEM converts a PEM file (.pem or .key) for use with NewCredFromCert(). The file -// must have the public certificate and the private key encoded. The private key must be encoded -// in PKCS8 (not PKCS1). This is usually denoted by the section "PRIVATE KEY" (instead of PKCS1's -// "RSA PRIVATE KEY"). If a PEM block is encoded and password is not an empty string, it attempts -// to decrypt the PEM blocks using the password. This will return multiple x509 certificates, -// though this use case should have a single cert. Multiple certs are due to certificate -// chaining for use cases like TLS that sign from root to leaf. +// must contain the public certificate and the private key. If a PEM block is encrypted and +// password is not an empty string, it attempts to decrypt the PEM blocks using the password. +// Multiple certs are due to certificate chaining for use cases like TLS that sign from root to leaf. func CertFromPEM(pemData []byte, password string) ([]*x509.Certificate, crypto.PrivateKey, error) { var certs []*x509.Certificate var priv crypto.PrivateKey @@ -79,7 +79,7 @@ func CertFromPEM(pemData []byte, password string) ([]*x509.Certificate, crypto.P if x509.IsEncryptedPEMBlock(block) { b, err := x509.DecryptPEMBlock(block, []byte(password)) if err != nil { - return nil, nil, fmt.Errorf("could not decrypt encrypted PEM block: %w", err) + return nil, nil, fmt.Errorf("could not decrypt encrypted PEM block: %v", err) } block, _ = pem.Decode(b) if block == nil { @@ -91,18 +91,27 @@ func CertFromPEM(pemData []byte, password string) ([]*x509.Certificate, crypto.P case "CERTIFICATE": cert, err := x509.ParseCertificate(block.Bytes) if err != nil { - return nil, nil, fmt.Errorf("block labelled 'CERTIFICATE' could not be pared by x509: %w", err) + return nil, nil, fmt.Errorf("block labelled 'CERTIFICATE' could not be parsed by x509: %v", err) } certs = append(certs, cert) case "PRIVATE KEY": if priv != nil { - return nil, nil, fmt.Errorf("found multiple blocks labelled 'PRIVATE KEY'") + return nil, nil, errors.New("found multiple private key blocks") } var err error - priv, err = parsePrivateKey(block.Bytes) + priv, err = x509.ParsePKCS8PrivateKey(block.Bytes) if err != nil { - return nil, nil, fmt.Errorf("could not decode private key: %w", err) + return nil, nil, fmt.Errorf("could not decode private key: %v", err) + } + case "RSA PRIVATE KEY": + if priv != nil { + return nil, nil, errors.New("found multiple private key blocks") + } + var err error + priv, err = x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, nil, fmt.Errorf("could not decode private key: %v", err) } } pemData = rest @@ -119,15 +128,8 @@ func CertFromPEM(pemData []byte, password string) ([]*x509.Certificate, crypto.P return certs, priv, nil } -// parsePrivateKey is based on https://gist.github.com/ukautz/cd118e298bbd8f0a88fc . I don't *think* -// we need to do the extra decoding in the example, but *maybe*? -func parsePrivateKey(der []byte) (crypto.PrivateKey, error) { - key, err := x509.ParsePKCS8PrivateKey(der) - if err != nil { - return nil, fmt.Errorf("problems decoding private key using PKCS8: %w", err) - } - return key, nil -} +// AssertionRequestOptions has required information for client assertion claims +type AssertionRequestOptions = exported.AssertionRequestOptions // Credential represents the credential used in confidential client flows. type Credential struct { @@ -135,16 +137,37 @@ type Credential struct { cert *x509.Certificate key crypto.PrivateKey + x5c []string + + assertionCallback func(context.Context, AssertionRequestOptions) (string, error) - assertion string + tokenProvider func(context.Context, TokenProviderParameters) (TokenProviderResult, error) } // toInternal returns the accesstokens.Credential that is used internally. The current structure of the // code requires that client.go, requests.go and confidential.go share a credential type without // having import recursion. That requires the type used between is in a shared package. Therefore // we have this. -func (c Credential) toInternal() *accesstokens.Credential { - return &accesstokens.Credential{Secret: c.secret, Cert: c.cert, Key: c.key, Assertion: c.assertion} +func (c Credential) toInternal() (*accesstokens.Credential, error) { + if c.secret != "" { + return &accesstokens.Credential{Secret: c.secret}, nil + } + if c.cert != nil { + if c.key == nil { + return nil, errors.New("missing private key for certificate") + } + return &accesstokens.Credential{Cert: c.cert, Key: c.key, X5c: c.x5c}, nil + } + if c.key != nil { + return nil, errors.New("missing certificate for private key") + } + if c.assertionCallback != nil { + return &accesstokens.Credential{AssertionCallback: c.assertionCallback}, nil + } + if c.tokenProvider != nil { + return &accesstokens.Credential{TokenProvider: c.tokenProvider}, nil + } + return nil, errors.New("invalid credential") } // NewCredFromSecret creates a Credential from a secret. @@ -156,17 +179,69 @@ func NewCredFromSecret(secret string) (Credential, error) { } // NewCredFromAssertion creates a Credential from a signed assertion. +// +// Deprecated: a Credential created by this function can't refresh the +// assertion when it expires. Use NewCredFromAssertionCallback instead. func NewCredFromAssertion(assertion string) (Credential, error) { if assertion == "" { return Credential{}, errors.New("assertion can't be empty string") } - return Credential{assertion: assertion}, nil + return NewCredFromAssertionCallback(func(context.Context, AssertionRequestOptions) (string, error) { return assertion, nil }), nil +} + +// NewCredFromAssertionCallback creates a Credential that invokes a callback to get assertions +// authenticating the application. The callback must be thread safe. +func NewCredFromAssertionCallback(callback func(context.Context, AssertionRequestOptions) (string, error)) Credential { + return Credential{assertionCallback: callback} } -// NewCredFromCert creates a Credential from an x509.Certificate and a PKCS8 DER encoded private key. -// CertFromPEM() can be used to get these values from a PEM file storing a PKCS8 private key. +// NewCredFromCert creates a Credential from an x509.Certificate and an RSA private key. +// CertFromPEM() can be used to get these values from a PEM file. func NewCredFromCert(cert *x509.Certificate, key crypto.PrivateKey) Credential { - return Credential{cert: cert, key: key} + cred, _ := NewCredFromCertChain([]*x509.Certificate{cert}, key) + return cred +} + +// NewCredFromCertChain creates a Credential from a chain of x509.Certificates and an RSA private key +// as returned by CertFromPEM(). +func NewCredFromCertChain(certs []*x509.Certificate, key crypto.PrivateKey) (Credential, error) { + cred := Credential{key: key} + k, ok := key.(*rsa.PrivateKey) + if !ok { + return cred, errors.New("key must be an RSA key") + } + for _, cert := range certs { + if cert == nil { + // not returning an error here because certs may still contain a sufficient cert/key pair + continue + } + certKey, ok := cert.PublicKey.(*rsa.PublicKey) + if ok && k.E == certKey.E && k.N.Cmp(certKey.N) == 0 { + // We know this is the signing cert because its public key matches the given private key. + // This cert must be first in x5c. + cred.cert = cert + cred.x5c = append([]string{base64.StdEncoding.EncodeToString(cert.Raw)}, cred.x5c...) + } else { + cred.x5c = append(cred.x5c, base64.StdEncoding.EncodeToString(cert.Raw)) + } + } + if cred.cert == nil { + return cred, errors.New("key doesn't match any certificate") + } + return cred, nil +} + +// TokenProviderParameters is the authentication parameters passed to token providers +type TokenProviderParameters = exported.TokenProviderParameters + +// TokenProviderResult is the authentication result returned by custom token providers +type TokenProviderResult = exported.TokenProviderResult + +// NewCredFromTokenProvider creates a Credential from a function that provides access tokens. The function +// must be concurrency safe. This is intended only to allow the Azure SDK to cache MSI tokens. It isn't +// useful to applications in general because the token provider must implement all authentication logic. +func NewCredFromTokenProvider(provider func(context.Context, TokenProviderParameters) (TokenProviderResult, error)) Credential { + return Credential{tokenProvider: provider} } // AutoDetectRegion instructs MSAL Go to auto detect region for Azure regional token service. @@ -274,6 +349,11 @@ func WithAzureRegion(val string) Option { // will store credentials for (a Client is per user). clientID is the Azure clientID and cred is // the type of credential to use. func New(clientID string, cred Credential, options ...Option) (Client, error) { + internalCred, err := cred.toInternal() + if err != nil { + return Client{}, err + } + opts := Options{ Authority: base.AuthorityPublicCloud, HTTPClient: shared.DefaultClient, @@ -286,15 +366,28 @@ func New(clientID string, cred Credential, options ...Option) (Client, error) { return Client{}, err } - base, err := base.New(clientID, opts.Authority, oauth.New(opts.HTTPClient), base.WithX5C(opts.SendX5C), base.WithCacheAccessor(opts.Accessor), base.WithRegionDetection(opts.AzureRegion)) + baseOpts := []base.Option{ + base.WithCacheAccessor(opts.Accessor), + base.WithRegionDetection(opts.AzureRegion), + base.WithX5C(opts.SendX5C), + } + if cred.tokenProvider != nil { + // The caller will handle all details of authentication, using Client only as a token cache. + // Declaring the authority host known prevents unnecessary metadata discovery requests. (The + // authority is irrelevant to Client and friends because the token provider is responsible + // for authentication.) + parsed, err := url.Parse(opts.Authority) + if err != nil { + return Client{}, errors.New("invalid authority") + } + baseOpts = append(baseOpts, base.WithKnownAuthorityHosts([]string{parsed.Hostname()})) + } + base, err := base.New(clientID, opts.Authority, oauth.New(opts.HTTPClient), baseOpts...) if err != nil { return Client{}, err } - return Client{ - base: base, - cred: cred.toInternal(), - }, nil + return Client{base: base, cred: internalCred}, nil } // UserID is the unique user identifier this client if for. diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go index c8adf3da2..c9b8dbed0 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go @@ -7,7 +7,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net/http" "reflect" "strings" @@ -21,7 +20,7 @@ var prettyConf = &pretty.Config{ TrackCycles: true, Formatter: map[reflect.Type]interface{}{ reflect.TypeOf((*io.Reader)(nil)).Elem(): func(r io.Reader) string { - b, err := ioutil.ReadAll(r) + b, err := io.ReadAll(r) if err != nil { return "could not read io.Reader content" } diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go index ac2916433..a86f06400 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go @@ -37,9 +37,9 @@ type manager interface { } // partitionedManager provides an internal cache. It is defined to allow faking the cache in tests. -// In all production use it is a *storage.Manager. +// In all production use it is a *storage.PartitionedManager. type partitionedManager interface { - Read(ctx context.Context, authParameters authority.AuthParams, account shared.Account) (storage.TokenResponse, error) + Read(ctx context.Context, authParameters authority.AuthParams) (storage.TokenResponse, error) Write(authParameters authority.AuthParams, tokenResponse accesstokens.TokenResponse) (shared.Account, error) } @@ -147,6 +147,15 @@ func WithCacheAccessor(ca cache.ExportReplace) Option { } } +// WithKnownAuthorityHosts specifies hosts Client shouldn't validate or request metadata for because they're known to the user +func WithKnownAuthorityHosts(hosts []string) Option { + return func(c *Client) { + cp := make([]string, len(hosts)) + copy(cp, hosts) + c.AuthParams.KnownAuthorityHosts = cp + } +} + // WithX5C specifies if x5c claim(public key of the certificate) should be sent to STS to enable Subject Name Issuer Authentication. func WithX5C(sendX5C bool) Option { return func(c *Client) { @@ -230,7 +239,7 @@ func (b Client) AuthCodeURL(ctx context.Context, clientID, redirectURI string, s func (b Client) AcquireTokenSilent(ctx context.Context, silent AcquireTokenSilentParameters) (AuthResult, error) { authParams := b.AuthParams // This is a copy, as we dont' have a pointer receiver and authParams is not a pointer. authParams.Scopes = silent.Scopes - authParams.HomeaccountID = silent.Account.HomeAccountID + authParams.HomeAccountID = silent.Account.HomeAccountID authParams.AuthorizationType = silent.AuthorizationType authParams.UserAssertion = silent.UserAssertion @@ -242,7 +251,7 @@ func (b Client) AcquireTokenSilent(ctx context.Context, silent AcquireTokenSilen b.cacheAccessor.Replace(s, suggestedCacheKey) defer b.cacheAccessor.Export(s, suggestedCacheKey) } - storageTokenResponse, err = b.pmanager.Read(ctx, authParams, silent.Account) + storageTokenResponse, err = b.pmanager.Read(ctx, authParams) if err != nil { return AuthResult{}, err } @@ -262,7 +271,7 @@ func (b Client) AcquireTokenSilent(ctx context.Context, silent AcquireTokenSilen result, err := AuthResultFromStorage(storageTokenResponse) if err != nil { if reflect.ValueOf(storageTokenResponse.RefreshToken).IsZero() { - return AuthResult{}, errors.New("no refresh token found") + return AuthResult{}, errors.New("no token found") } var cc *accesstokens.Credential @@ -270,7 +279,7 @@ func (b Client) AcquireTokenSilent(ctx context.Context, silent AcquireTokenSilen cc = silent.Credential } - token, err := b.Token.Refresh(ctx, silent.RequestType, b.AuthParams, cc, storageTokenResponse.RefreshToken) + token, err := b.Token.Refresh(ctx, silent.RequestType, authParams, cc, storageTokenResponse.RefreshToken) if err != nil { return AuthResult{}, err } @@ -376,7 +385,7 @@ func (b Client) AllAccounts() []shared.Account { func (b Client) Account(homeAccountID string) shared.Account { authParams := b.AuthParams // This is a copy, as we dont' have a pointer receiver and .AuthParams is not a pointer. authParams.AuthorizationType = authority.AccountByID - authParams.HomeaccountID = homeAccountID + authParams.HomeAccountID = homeAccountID if s, ok := b.manager.(cache.Serializer); ok { suggestedCacheKey := b.AuthParams.CacheKey(false) b.cacheAccessor.Replace(s, suggestedCacheKey) diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go index 1e8d9a841..548c2faeb 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go @@ -103,8 +103,14 @@ func (a AccessToken) Key() string { ) } +// FakeValidate enables tests to fake access token validation +var FakeValidate func(AccessToken) error + // Validate validates that this AccessToken can be used. func (a AccessToken) Validate() error { + if FakeValidate != nil { + return FakeValidate(a) + } if a.CachedAt.T.After(time.Now()) { return errors.New("access token isn't valid, it was cached at a future time") } diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go index 4c7c1f1b5..d17e7c034 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go @@ -36,7 +36,7 @@ func NewPartitionedManager(requests *oauth.Client) *PartitionedManager { } // Read reads a storage token from the cache if it exists. -func (m *PartitionedManager) Read(ctx context.Context, authParameters authority.AuthParams, account shared.Account) (TokenResponse, error) { +func (m *PartitionedManager) Read(ctx context.Context, authParameters authority.AuthParams) (TokenResponse, error) { realm := authParameters.AuthorityInfo.Tenant clientID := authParameters.ClientID scopes := authParameters.Scopes @@ -69,7 +69,7 @@ func (m *PartitionedManager) Read(ctx context.Context, authParameters authority. return TokenResponse{}, err } - account, err = m.readAccount(metadata.Aliases, realm, userAssertionHash, idToken.HomeAccountID) + account, err := m.readAccount(metadata.Aliases, realm, userAssertionHash, idToken.HomeAccountID) if err != nil { return TokenResponse{}, err } @@ -83,8 +83,8 @@ func (m *PartitionedManager) Read(ctx context.Context, authParameters authority. // Write writes a token response to the cache and returns the account information the token is stored with. func (m *PartitionedManager) Write(authParameters authority.AuthParams, tokenResponse accesstokens.TokenResponse) (shared.Account, error) { - authParameters.HomeaccountID = tokenResponse.ClientInfo.HomeAccountID() - homeAccountID := authParameters.HomeaccountID + authParameters.HomeAccountID = tokenResponse.ClientInfo.HomeAccountID() + homeAccountID := authParameters.HomeAccountID environment := authParameters.AuthorityInfo.Host realm := authParameters.AuthorityInfo.Tenant clientID := authParameters.ClientID diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go index 881bd7c68..b759408b5 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go @@ -84,20 +84,22 @@ func isMatchingScopes(scopesOne []string, scopesTwo string) bool { // Read reads a storage token from the cache if it exists. func (m *Manager) Read(ctx context.Context, authParameters authority.AuthParams, account shared.Account) (TokenResponse, error) { - homeAccountID := authParameters.HomeaccountID + homeAccountID := authParameters.HomeAccountID realm := authParameters.AuthorityInfo.Tenant clientID := authParameters.ClientID scopes := authParameters.Scopes - metadata, err := m.getMetadataEntry(ctx, authParameters.AuthorityInfo) - if err != nil { - return TokenResponse{}, err + // fetch metadata if and only if the authority isn't explicitly trusted + aliases := authParameters.KnownAuthorityHosts + if len(aliases) == 0 { + metadata, err := m.getMetadataEntry(ctx, authParameters.AuthorityInfo) + if err != nil { + return TokenResponse{}, err + } + aliases = metadata.Aliases } - accessToken, err := m.readAccessToken(homeAccountID, metadata.Aliases, realm, clientID, scopes) - if err != nil { - return TokenResponse{}, err - } + accessToken := m.readAccessToken(homeAccountID, aliases, realm, clientID, scopes) if account.IsZero() { return TokenResponse{ @@ -107,22 +109,22 @@ func (m *Manager) Read(ctx context.Context, authParameters authority.AuthParams, Account: shared.Account{}, }, nil } - idToken, err := m.readIDToken(homeAccountID, metadata.Aliases, realm, clientID) + idToken, err := m.readIDToken(homeAccountID, aliases, realm, clientID) if err != nil { return TokenResponse{}, err } - AppMetaData, err := m.readAppMetaData(metadata.Aliases, clientID) + AppMetaData, err := m.readAppMetaData(aliases, clientID) if err != nil { return TokenResponse{}, err } familyID := AppMetaData.FamilyID - refreshToken, err := m.readRefreshToken(homeAccountID, metadata.Aliases, familyID, clientID) + refreshToken, err := m.readRefreshToken(homeAccountID, aliases, familyID, clientID) if err != nil { return TokenResponse{}, err } - account, err = m.readAccount(homeAccountID, metadata.Aliases, realm) + account, err = m.readAccount(homeAccountID, aliases, realm) if err != nil { return TokenResponse{}, err } @@ -138,8 +140,8 @@ const scopeSeparator = " " // Write writes a token response to the cache and returns the account information the token is stored with. func (m *Manager) Write(authParameters authority.AuthParams, tokenResponse accesstokens.TokenResponse) (shared.Account, error) { - authParameters.HomeaccountID = tokenResponse.ClientInfo.HomeAccountID() - homeAccountID := authParameters.HomeaccountID + authParameters.HomeAccountID = tokenResponse.ClientInfo.HomeAccountID() + homeAccountID := authParameters.HomeAccountID environment := authParameters.AuthorityInfo.Host realm := authParameters.AuthorityInfo.Tenant clientID := authParameters.ClientID @@ -249,7 +251,7 @@ func (m *Manager) aadMetadata(ctx context.Context, authorityInfo authority.Info) return m.aadCache[authorityInfo.Host], nil } -func (m *Manager) readAccessToken(homeID string, envAliases []string, realm, clientID string, scopes []string) (AccessToken, error) { +func (m *Manager) readAccessToken(homeID string, envAliases []string, realm, clientID string, scopes []string) AccessToken { m.contractMu.RLock() defer m.contractMu.RUnlock() // TODO: linear search (over a map no less) is slow for a large number (thousands) of tokens. @@ -259,12 +261,12 @@ func (m *Manager) readAccessToken(homeID string, envAliases []string, realm, cli if at.HomeAccountID == homeID && at.Realm == realm && at.ClientID == clientID { if checkAlias(at.Environment, envAliases) { if isMatchingScopes(scopes, at.Scopes) { - return at, nil + return at } } } } - return AccessToken{}, fmt.Errorf("access token not found") + return AccessToken{} } func (m *Manager) writeAccessToken(accessToken AccessToken) error { diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go new file mode 100644 index 000000000..7b673e3fe --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go @@ -0,0 +1,34 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// package exported contains internal types that are re-exported from a public package +package exported + +// AssertionRequestOptions has information required to generate a client assertion +type AssertionRequestOptions struct { + // ClientID identifies the application for which an assertion is requested. Used as the assertion's "iss" and "sub" claims. + ClientID string + + // TokenEndpoint is the intended token endpoint. Used as the assertion's "aud" claim. + TokenEndpoint string +} + +// TokenProviderParameters is the authentication parameters passed to token providers +type TokenProviderParameters struct { + // Claims contains any additional claims requested for the token + Claims string + // CorrelationID of the authentication request + CorrelationID string + // Scopes requested for the token + Scopes []string + // TenantID identifies the tenant in which to authenticate + TenantID string +} + +// TokenProviderResult is the authentication result returned by custom token providers +type TokenProviderResult struct { + // AccessToken is the requested token + AccessToken string + // ExpiresInSeconds is the lifetime of the token in seconds + ExpiresInSeconds int +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go index 83bd60d41..2238521f5 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go @@ -52,7 +52,7 @@ func Marshal(i interface{}) ([]byte, error) { if v.Kind() != reflect.Ptr && v.CanAddr() { v = v.Addr() } - err := marshalStruct(reflect.ValueOf(i), &buff, enc) + err := marshalStruct(v, &buff, enc) if err != nil { return nil, err } diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go index 41f4373fa..04236ff31 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go @@ -11,6 +11,7 @@ import ( "net/http" "strconv" "strings" + "time" ) var okPage = []byte(` @@ -84,7 +85,7 @@ func New(reqState string, port int) (*Server, error) { serv := &Server{ Addr: fmt.Sprintf("http://localhost:%s", portStr), - s: &http.Server{Addr: "localhost:0"}, + s: &http.Server{Addr: "localhost:0", ReadHeaderTimeout: time.Second}, reqState: reqState, resultCh: make(chan Result, 1), } diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go index 825f55fb7..6b4016c11 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go @@ -7,15 +7,18 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" + "io" "time" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported" + internalTime "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs" + "github.com/google/uuid" ) // ResolveEndpointer contains the methods for resolving authority endpoints. @@ -92,6 +95,27 @@ func (t *Client) AuthCode(ctx context.Context, req accesstokens.AuthCodeRequest) // Credential acquires a token from the authority using a client credentials grant. func (t *Client) Credential(ctx context.Context, authParams authority.AuthParams, cred *accesstokens.Credential) (accesstokens.TokenResponse, error) { + if cred.TokenProvider != nil { + now := time.Now() + scopes := make([]string, len(authParams.Scopes)) + copy(scopes, authParams.Scopes) + params := exported.TokenProviderParameters{ + CorrelationID: uuid.New().String(), + Scopes: scopes, + } + tr, err := cred.TokenProvider(ctx, params) + if err != nil { + return accesstokens.TokenResponse{}, err + } + return accesstokens.TokenResponse{ + AccessToken: tr.AccessToken, + ExpiresOn: internalTime.DurationTime{ + T: now.Add(time.Duration(tr.ExpiresInSeconds) * time.Second), + }, + GrantedScopes: accesstokens.Scopes{Slice: authParams.Scopes}, + }, nil + } + if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil { return accesstokens.TokenResponse{}, err } @@ -99,7 +123,7 @@ func (t *Client) Credential(ctx context.Context, authParams authority.AuthParams if cred.Secret != "" { return t.AccessTokens.FromClientSecret(ctx, authParams, cred.Secret) } - jwt, err := cred.JWT(authParams) + jwt, err := cred.JWT(ctx, authParams) if err != nil { return accesstokens.TokenResponse{}, err } @@ -116,7 +140,7 @@ func (t *Client) OnBehalfOf(ctx context.Context, authParams authority.AuthParams return t.AccessTokens.FromUserAssertionClientSecret(ctx, authParams, authParams.UserAssertion, cred.Secret) } - jwt, err := cred.JWT(authParams) + jwt, err := cred.JWT(ctx, authParams) if err != nil { return accesstokens.TokenResponse{}, err } @@ -233,7 +257,7 @@ func isWaitDeviceCodeErr(err error) bool { } var dCErr deviceCodeError defer c.Resp.Body.Close() - body, err := ioutil.ReadAll(c.Resp.Body) + body, err := io.ReadAll(c.Resp.Body) if err != nil { return false } diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go index 65831b0b7..eaeb2ef5f 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go @@ -19,17 +19,18 @@ import ( "crypto/sha1" "crypto/x509" "encoding/base64" + "encoding/json" "fmt" "net/url" "strconv" "strings" - "sync" "time" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust" - "github.com/golang-jwt/jwt" + "github.com/golang-jwt/jwt/v4" "github.com/google/uuid" ) @@ -43,9 +44,6 @@ const ( password = "password" ) -// assertionLifetime allows tests to control the expiration time of JWT assertions created by Credential. -var assertionLifetime = 10 * time.Minute - //go:generate stringer -type=AppType // AppType is whether the authorization code flow is for a public or confidential client. @@ -90,44 +88,37 @@ type Credential struct { // Secret contains the credential secret if we are doing auth by secret. Secret string - // Cert is the public x509 certificate if we are doing any auth other than secret. + // Cert is the public certificate, if we're authenticating by certificate. Cert *x509.Certificate - // Key is the private key for signing if we are doing any auth other than secret. + // Key is the private key for signing, if we're authenticating by certificate. Key crypto.PrivateKey + // X5c is the JWT assertion's x5c header value, required for SN/I authentication. + X5c []string + + // AssertionCallback is a function provided by the application, if we're authenticating by assertion. + AssertionCallback func(context.Context, exported.AssertionRequestOptions) (string, error) - // mu protects everything below. - mu sync.Mutex - // Assertion is the signed JWT assertion if we have retrieved it or if it was passed. - Assertion string - // Expires is when the Assertion expires. Public to allow faking in tests. - // Any use outside msal is not supported by a compatibility promise. - Expires time.Time + // TokenProvider is a function provided by the application that implements custom authentication + // logic for a confidential client + TokenProvider func(context.Context, exported.TokenProviderParameters) (exported.TokenProviderResult, error) } // JWT gets the jwt assertion when the credential is not using a secret. -func (c *Credential) JWT(authParams authority.AuthParams) (string, error) { - c.mu.Lock() - defer c.mu.Unlock() - - if c.Expires.After(time.Now()) { - return c.Assertion, nil - } else if c.Cert == nil || c.Key == nil { - // The assertion has expired and this Credential can't generate a new one. The assertion - // was presumably provided by the application via confidential.NewCredFromAssertion(). We - // return it despite its expiration to maintain the behavior of previous versions, and - // because there's no API enabling the application to replace the assertion - // (see https://github.com/AzureAD/microsoft-authentication-library-for-go/issues/292). - return c.Assertion, nil +func (c *Credential) JWT(ctx context.Context, authParams authority.AuthParams) (string, error) { + if c.AssertionCallback != nil { + options := exported.AssertionRequestOptions{ + ClientID: authParams.ClientID, + TokenEndpoint: authParams.Endpoints.TokenEndpoint, + } + return c.AssertionCallback(ctx, options) } - expires := time.Now().Add(assertionLifetime) - token := jwt.NewWithClaims(jwt.SigningMethodRS256, jwt.MapClaims{ "aud": authParams.Endpoints.TokenEndpoint, - "exp": strconv.FormatInt(expires.Unix(), 10), + "exp": json.Number(strconv.FormatInt(time.Now().Add(10*time.Minute).Unix(), 10)), "iss": authParams.ClientID, "jti": uuid.New().String(), - "nbf": strconv.FormatInt(time.Now().Unix(), 10), + "nbf": json.Number(strconv.FormatInt(time.Now().Unix(), 10)), "sub": authParams.ClientID, }) token.Header = map[string]interface{}{ @@ -137,16 +128,14 @@ func (c *Credential) JWT(authParams authority.AuthParams) (string, error) { } if authParams.SendX5C { - token.Header["x5c"] = []string{base64.StdEncoding.EncodeToString(c.Cert.Raw)} + token.Header["x5c"] = c.X5c } - var err error - c.Assertion, err = token.SignedString(c.Key) + + assertion, err := token.SignedString(c.Key) if err != nil { return "", fmt.Errorf("unable to sign a JWT token using private key: %w", err) } - - c.Expires = expires - return c.Assertion, nil + return assertion, nil } // thumbprint runs the asn1.Der bytes through sha1 for use in the x5t parameter of JWT. @@ -213,7 +202,7 @@ func (c Client) FromAuthCode(ctx context.Context, req AuthCodeRequest) (TokenRes if req.Credential == nil { return TokenResponse{}, fmt.Errorf("AuthCodeRequest had nil Credential for Confidential app") } - qv, err = prepURLVals(req.Credential, req.AuthParams) + qv, err = prepURLVals(ctx, req.Credential, req.AuthParams) if err != nil { return TokenResponse{}, err } @@ -239,7 +228,7 @@ func (c Client) FromRefreshToken(ctx context.Context, appType AppType, authParam qv := url.Values{} if appType == ATConfidential { var err error - qv, err = prepURLVals(cc, authParams) + qv, err = prepURLVals(ctx, cc, authParams) if err != nil { return TokenResponse{}, err } @@ -374,14 +363,14 @@ func (c Client) doTokenResp(ctx context.Context, authParams authority.AuthParams // prepURLVals returns an url.Values that sets various key/values if we are doing secrets // or JWT assertions. -func prepURLVals(cc *Credential, authParams authority.AuthParams) (url.Values, error) { +func prepURLVals(ctx context.Context, cc *Credential, authParams authority.AuthParams) (url.Values, error) { params := url.Values{} if cc.Secret != "" { params.Set("client_secret", cc.Secret) return params, nil } - jwt, err := cc.JWT(authParams) + jwt, err := cc.JWT(ctx, authParams) if err != nil { return nil, err } diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go index dcc6c53c2..4724d944f 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go @@ -9,7 +9,7 @@ import ( "encoding/base64" "errors" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "os" @@ -22,7 +22,7 @@ import ( const ( authorizationEndpoint = "https://%v/%v/oauth2/v2.0/authorize" instanceDiscoveryEndpoint = "https://%v/common/discovery/instance" - TenantDiscoveryEndpointWithRegion = "https://%v.r.%v/%v/v2.0/.well-known/openid-configuration" + tenantDiscoveryEndpointWithRegion = "https://%s.%s/%s/v2.0/.well-known/openid-configuration" regionName = "REGION_NAME" defaultAPIVersion = "2021-10-01" imdsEndpoint = "http://169.254.169.254/metadata/instance/compute/location?format=text&api-version=" + defaultAPIVersion @@ -133,7 +133,7 @@ type AuthParams struct { ClientID string // Redirecturi is used for auth flows that specify a redirect URI (e.g. local server for interactive auth flow). Redirecturi string - HomeaccountID string + HomeAccountID string // Username is the user-name portion for username/password auth flow. Username string // Password is the password portion for username/password auth flow. @@ -156,6 +156,9 @@ type AuthParams struct { SendX5C bool // UserAssertion is the access token used to acquire token on behalf of user UserAssertion string + + // KnownAuthorityHosts don't require metadata discovery because they're known to the user + KnownAuthorityHosts []string } // NewAuthParams creates an authorization parameters object. @@ -243,7 +246,7 @@ const ( Managed UserRealmAccountType = "Managed" ) -//UserRealm is used for the username password request to determine user type +// UserRealm is used for the username password request to determine user type type UserRealm struct { AccountType UserRealmAccountType `json:"account_type"` DomainName string `json:"domain_name"` @@ -332,7 +335,12 @@ func (c Client) AADInstanceDiscovery(ctx context.Context, authorityInfo Info) (I region = detectRegion(ctx) } if region != "" { - resp.TenantDiscoveryEndpoint = fmt.Sprintf(TenantDiscoveryEndpointWithRegion, region, authorityInfo.Host, authorityInfo.Tenant) + environment := authorityInfo.Host + switch environment { + case "login.microsoft.com", "login.windows.net", "sts.windows.net", defaultHost: + environment = "r." + defaultHost + } + resp.TenantDiscoveryEndpoint = fmt.Sprintf(tenantDiscoveryEndpointWithRegion, region, environment, authorityInfo.Tenant) metadata := InstanceDiscoveryMetadata{ PreferredNetwork: fmt.Sprintf("%v.%v", region, authorityInfo.Host), PreferredCache: authorityInfo.Host, @@ -378,7 +386,7 @@ func detectRegion(ctx context.Context) string { } } defer resp.Body.Close() - response, err := ioutil.ReadAll(resp.Body) + response, err := io.ReadAll(resp.Body) if err != nil { return "" } @@ -393,7 +401,7 @@ func (a *AuthParams) CacheKey(isAppCache bool) string { return a.AppKey() } if a.AuthorizationType == ATRefreshToken || a.AuthorizationType == AccountByID { - return a.HomeaccountID + return a.HomeAccountID } return "" } diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go index 0620b3e41..7d9ec7cd3 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go @@ -11,7 +11,6 @@ import ( "encoding/xml" "fmt" "io" - "io/ioutil" "net/http" "net/url" "reflect" @@ -89,7 +88,7 @@ func (c *Client) JSONCall(ctx context.Context, endpoint string, headers http.Hea if err != nil { return fmt.Errorf("bug: conn.Call(): could not marshal the body object: %w", err) } - req.Body = ioutil.NopCloser(bytes.NewBuffer(data)) + req.Body = io.NopCloser(bytes.NewBuffer(data)) req.Method = http.MethodPost } @@ -163,7 +162,7 @@ func (c *Client) xmlCall(ctx context.Context, u *url.URL, headers http.Header, b if len(body) > 0 { req.Method = http.MethodPost - req.Body = ioutil.NopCloser(strings.NewReader(body)) + req.Body = io.NopCloser(strings.NewReader(body)) } data, err := c.do(ctx, req) @@ -201,9 +200,9 @@ func (c *Client) URLFormCall(ctx context.Context, endpoint string, qv url.Values URL: u, Header: headers, ContentLength: int64(len(enc)), - Body: ioutil.NopCloser(strings.NewReader(enc)), + Body: io.NopCloser(strings.NewReader(enc)), GetBody: func() (io.ReadCloser, error) { - return ioutil.NopCloser(strings.NewReader(enc)), nil + return io.NopCloser(strings.NewReader(enc)), nil }, } @@ -248,7 +247,7 @@ func (c *Client) do(ctx context.Context, req *http.Request) ([]byte, error) { if err != nil { return nil, fmt.Errorf("could not read the body of an HTTP Response: %w", err) } - reply.Body = ioutil.NopCloser(bytes.NewBuffer(data)) + reply.Body = io.NopCloser(bytes.NewBuffer(data)) // NOTE: This doesn't happen immediately after the call so that we can get an error message // from the server and include it in our error. @@ -297,7 +296,7 @@ func (c *Client) readBody(resp *http.Response) ([]byte, error) { default: return nil, fmt.Errorf("bug: comm.Client.JSONCall(): content was send with unsupported content-encoding %s", resp.Header.Get("Content-Encoding")) } - return ioutil.ReadAll(reader) + return io.ReadAll(reader) } var testID string diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/ops.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/ops.go index 01060ac62..1f9c543fa 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/ops.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/ops.go @@ -6,6 +6,7 @@ Package ops provides operations to various backend services using REST clients. The REST type provides several clients that can be used to communicate to backends. Usage is simple: + rest := ops.New() // Creates an authority client and calls the UserRealm() method. diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go index e50d5e97f..5e1ea9129 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go @@ -5,4 +5,4 @@ package version // Version is the version of this client package that is communicated to the server. -const Version = "0.5.1" +const Version = "0.7.0" diff --git a/vendor/github.com/armon/go-metrics/prometheus/prometheus.go b/vendor/github.com/armon/go-metrics/prometheus/prometheus.go index d8221fbf0..5b2eeb43c 100644 --- a/vendor/github.com/armon/go-metrics/prometheus/prometheus.go +++ b/vendor/github.com/armon/go-metrics/prometheus/prometheus.go @@ -6,7 +6,6 @@ package prometheus import ( "fmt" "log" - "regexp" "strings" "sync" "time" @@ -248,15 +247,15 @@ func initCounters(m *sync.Map, counters []CounterDefinition, help map[string]str return } -var forbiddenChars = regexp.MustCompile("[ .=\\-/]") +var forbiddenCharsReplacer = strings.NewReplacer(" ", "_", ".", "_", "=", "_", "-", "_", "/", "_") func flattenKey(parts []string, labels []metrics.Label) (string, string) { key := strings.Join(parts, "_") - key = forbiddenChars.ReplaceAllString(key, "_") + key = forbiddenCharsReplacer.Replace(key) hash := key for _, label := range labels { - hash += fmt.Sprintf(";%s=%s", label.Name, label.Value) + hash += ";" + label.Name + "=" + label.Value } return key, hash diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go index e62483600..18694f07f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go @@ -226,12 +226,24 @@ func NewCredentialsCommand(command *exec.Cmd, options ...func(*ProcessProvider)) return credentials.NewCredentials(p) } -type credentialProcessResponse struct { - Version int - AccessKeyID string `json:"AccessKeyId"` +// A CredentialProcessResponse is the AWS credentials format that must be +// returned when executing an external credential_process. +type CredentialProcessResponse struct { + // As of this writing, the Version key must be set to 1. This might + // increment over time as the structure evolves. + Version int + + // The access key ID that identifies the temporary security credentials. + AccessKeyID string `json:"AccessKeyId"` + + // The secret access key that can be used to sign requests. SecretAccessKey string - SessionToken string - Expiration *time.Time + + // The token that users must pass to the service API to use the temporary credentials. + SessionToken string + + // The date on which the current credentials expire. + Expiration *time.Time } // Retrieve executes the 'credential_process' and returns the credentials. @@ -242,7 +254,7 @@ func (p *ProcessProvider) Retrieve() (credentials.Value, error) { } // Serialize and validate response - resp := &credentialProcessResponse{} + resp := &CredentialProcessResponse{} if err = json.Unmarshal(out, resp); err != nil { return credentials.Value{ProviderName: ProviderName}, awserr.New( ErrCodeProcessProviderParse, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/crr/cache.go b/vendor/github.com/aws/aws-sdk-go/aws/crr/cache.go new file mode 100644 index 000000000..c07f6731e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/crr/cache.go @@ -0,0 +1,122 @@ +package crr + +import ( + "sync/atomic" +) + +// EndpointCache is an LRU cache that holds a series of endpoints +// based on some key. The datastructure makes use of a read write +// mutex to enable asynchronous use. +type EndpointCache struct { + endpoints syncMap + endpointLimit int64 + // size is used to count the number elements in the cache. + // The atomic package is used to ensure this size is accurate when + // using multiple goroutines. + size int64 +} + +// NewEndpointCache will return a newly initialized cache with a limit +// of endpointLimit entries. +func NewEndpointCache(endpointLimit int64) *EndpointCache { + return &EndpointCache{ + endpointLimit: endpointLimit, + endpoints: newSyncMap(), + } +} + +// get is a concurrent safe get operation that will retrieve an endpoint +// based on endpointKey. A boolean will also be returned to illustrate whether +// or not the endpoint had been found. +func (c *EndpointCache) get(endpointKey string) (Endpoint, bool) { + endpoint, ok := c.endpoints.Load(endpointKey) + if !ok { + return Endpoint{}, false + } + + ev := endpoint.(Endpoint) + ev.Prune() + + c.endpoints.Store(endpointKey, ev) + return endpoint.(Endpoint), true +} + +// Has returns if the enpoint cache contains a valid entry for the endpoint key +// provided. +func (c *EndpointCache) Has(endpointKey string) bool { + endpoint, ok := c.get(endpointKey) + _, found := endpoint.GetValidAddress() + + return ok && found +} + +// Get will retrieve a weighted address based off of the endpoint key. If an endpoint +// should be retrieved, due to not existing or the current endpoint has expired +// the Discoverer object that was passed in will attempt to discover a new endpoint +// and add that to the cache. +func (c *EndpointCache) Get(d Discoverer, endpointKey string, required bool) (WeightedAddress, error) { + var err error + endpoint, ok := c.get(endpointKey) + weighted, found := endpoint.GetValidAddress() + shouldGet := !ok || !found + + if required && shouldGet { + if endpoint, err = c.discover(d, endpointKey); err != nil { + return WeightedAddress{}, err + } + + weighted, _ = endpoint.GetValidAddress() + } else if shouldGet { + go c.discover(d, endpointKey) + } + + return weighted, nil +} + +// Add is a concurrent safe operation that will allow new endpoints to be added +// to the cache. If the cache is full, the number of endpoints equal endpointLimit, +// then this will remove the oldest entry before adding the new endpoint. +func (c *EndpointCache) Add(endpoint Endpoint) { + // de-dups multiple adds of an endpoint with a pre-existing key + if iface, ok := c.endpoints.Load(endpoint.Key); ok { + e := iface.(Endpoint) + if e.Len() > 0 { + return + } + } + c.endpoints.Store(endpoint.Key, endpoint) + + size := atomic.AddInt64(&c.size, 1) + if size > 0 && size > c.endpointLimit { + c.deleteRandomKey() + } +} + +// deleteRandomKey will delete a random key from the cache. If +// no key was deleted false will be returned. +func (c *EndpointCache) deleteRandomKey() bool { + atomic.AddInt64(&c.size, -1) + found := false + + c.endpoints.Range(func(key, value interface{}) bool { + found = true + c.endpoints.Delete(key) + + return false + }) + + return found +} + +// discover will get and store and endpoint using the Discoverer. +func (c *EndpointCache) discover(d Discoverer, endpointKey string) (Endpoint, error) { + endpoint, err := d.Discover() + if err != nil { + return Endpoint{}, err + } + + endpoint.Key = endpointKey + c.Add(endpoint) + + return endpoint, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/crr/endpoint.go b/vendor/github.com/aws/aws-sdk-go/aws/crr/endpoint.go new file mode 100644 index 000000000..2b088bdbc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/crr/endpoint.go @@ -0,0 +1,132 @@ +package crr + +import ( + "net/url" + "sort" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" +) + +// Endpoint represents an endpoint used in endpoint discovery. +type Endpoint struct { + Key string + Addresses WeightedAddresses +} + +// WeightedAddresses represents a list of WeightedAddress. +type WeightedAddresses []WeightedAddress + +// WeightedAddress represents an address with a given weight. +type WeightedAddress struct { + URL *url.URL + Expired time.Time +} + +// HasExpired will return whether or not the endpoint has expired with +// the exception of a zero expiry meaning does not expire. +func (e WeightedAddress) HasExpired() bool { + return e.Expired.Before(time.Now()) +} + +// Add will add a given WeightedAddress to the address list of Endpoint. +func (e *Endpoint) Add(addr WeightedAddress) { + e.Addresses = append(e.Addresses, addr) +} + +// Len returns the number of valid endpoints where valid means the endpoint +// has not expired. +func (e *Endpoint) Len() int { + validEndpoints := 0 + for _, endpoint := range e.Addresses { + if endpoint.HasExpired() { + continue + } + + validEndpoints++ + } + return validEndpoints +} + +// GetValidAddress will return a non-expired weight endpoint +func (e *Endpoint) GetValidAddress() (WeightedAddress, bool) { + for i := 0; i < len(e.Addresses); i++ { + we := e.Addresses[i] + + if we.HasExpired() { + e.Addresses = append(e.Addresses[:i], e.Addresses[i+1:]...) + i-- + continue + } + + we.URL = cloneURL(we.URL) + + return we, true + } + + return WeightedAddress{}, false +} + +// Prune will prune the expired addresses from the endpoint by allocating a new []WeightAddress. +// This is not concurrent safe, and should be called from a single owning thread. +func (e *Endpoint) Prune() bool { + validLen := e.Len() + if validLen == len(e.Addresses) { + return false + } + wa := make([]WeightedAddress, 0, validLen) + for i := range e.Addresses { + if e.Addresses[i].HasExpired() { + continue + } + wa = append(wa, e.Addresses[i]) + } + e.Addresses = wa + return true +} + +// Discoverer is an interface used to discovery which endpoint hit. This +// allows for specifics about what parameters need to be used to be contained +// in the Discoverer implementor. +type Discoverer interface { + Discover() (Endpoint, error) +} + +// BuildEndpointKey will sort the keys in alphabetical order and then retrieve +// the values in that order. Those values are then concatenated together to form +// the endpoint key. +func BuildEndpointKey(params map[string]*string) string { + keys := make([]string, len(params)) + i := 0 + + for k := range params { + keys[i] = k + i++ + } + sort.Strings(keys) + + values := make([]string, len(params)) + for i, k := range keys { + if params[k] == nil { + continue + } + + values[i] = aws.StringValue(params[k]) + } + + return strings.Join(values, ".") +} + +func cloneURL(u *url.URL) (clone *url.URL) { + clone = &url.URL{} + + *clone = *u + + if u.User != nil { + user := *u.User + clone.User = &user + } + + return clone +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/crr/sync_map.go b/vendor/github.com/aws/aws-sdk-go/aws/crr/sync_map.go new file mode 100644 index 000000000..f7b65ac01 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/crr/sync_map.go @@ -0,0 +1,30 @@ +//go:build go1.9 +// +build go1.9 + +package crr + +import ( + "sync" +) + +type syncMap sync.Map + +func newSyncMap() syncMap { + return syncMap{} +} + +func (m *syncMap) Load(key interface{}) (interface{}, bool) { + return (*sync.Map)(m).Load(key) +} + +func (m *syncMap) Store(key interface{}, value interface{}) { + (*sync.Map)(m).Store(key, value) +} + +func (m *syncMap) Delete(key interface{}) { + (*sync.Map)(m).Delete(key) +} + +func (m *syncMap) Range(f func(interface{}, interface{}) bool) { + (*sync.Map)(m).Range(f) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/crr/sync_map_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/crr/sync_map_1_8.go new file mode 100644 index 000000000..eb4f6aca2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/crr/sync_map_1_8.go @@ -0,0 +1,49 @@ +//go:build !go1.9 +// +build !go1.9 + +package crr + +import ( + "sync" +) + +type syncMap struct { + container map[interface{}]interface{} + lock sync.RWMutex +} + +func newSyncMap() syncMap { + return syncMap{ + container: map[interface{}]interface{}{}, + } +} + +func (m *syncMap) Load(key interface{}) (interface{}, bool) { + m.lock.RLock() + defer m.lock.RUnlock() + + v, ok := m.container[key] + return v, ok +} + +func (m *syncMap) Store(key interface{}, value interface{}) { + m.lock.Lock() + defer m.lock.Unlock() + + m.container[key] = value +} + +func (m *syncMap) Delete(key interface{}) { + m.lock.Lock() + defer m.lock.Unlock() + + delete(m.container, key) +} + +func (m *syncMap) Range(f func(interface{}, interface{}) bool) { + for k, v := range m.container { + if !f(k, v) { + return + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 1ec4a1ee8..3e396bb3e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -23,13 +23,17 @@ const ( ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul). ApNortheast3RegionID = "ap-northeast-3" // Asia Pacific (Osaka). ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai). + ApSouth2RegionID = "ap-south-2" // Asia Pacific (Hyderabad). ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore). ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney). ApSoutheast3RegionID = "ap-southeast-3" // Asia Pacific (Jakarta). + ApSoutheast4RegionID = "ap-southeast-4" // Asia Pacific (Melbourne). CaCentral1RegionID = "ca-central-1" // Canada (Central). EuCentral1RegionID = "eu-central-1" // Europe (Frankfurt). + EuCentral2RegionID = "eu-central-2" // Europe (Zurich). EuNorth1RegionID = "eu-north-1" // Europe (Stockholm). EuSouth1RegionID = "eu-south-1" // Europe (Milan). + EuSouth2RegionID = "eu-south-2" // Europe (Spain). EuWest1RegionID = "eu-west-1" // Europe (Ireland). EuWest2RegionID = "eu-west-2" // Europe (London). EuWest3RegionID = "eu-west-3" // Europe (Paris). @@ -157,6 +161,9 @@ var awsPartition = partition{ "ap-south-1": region{ Description: "Asia Pacific (Mumbai)", }, + "ap-south-2": region{ + Description: "Asia Pacific (Hyderabad)", + }, "ap-southeast-1": region{ Description: "Asia Pacific (Singapore)", }, @@ -166,18 +173,27 @@ var awsPartition = partition{ "ap-southeast-3": region{ Description: "Asia Pacific (Jakarta)", }, + "ap-southeast-4": region{ + Description: "Asia Pacific (Melbourne)", + }, "ca-central-1": region{ Description: "Canada (Central)", }, "eu-central-1": region{ Description: "Europe (Frankfurt)", }, + "eu-central-2": region{ + Description: "Europe (Zurich)", + }, "eu-north-1": region{ Description: "Europe (Stockholm)", }, "eu-south-1": region{ Description: "Europe (Milan)", }, + "eu-south-2": region{ + Description: "Europe (Spain)", + }, "eu-west-1": region{ Description: "Europe (Ireland)", }, @@ -237,6 +253,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -246,6 +265,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -258,12 +280,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -399,6 +427,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -408,6 +439,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -429,12 +463,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -633,6 +673,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -897,6 +940,34 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "aoss": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "api.detective": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -1084,6 +1155,14 @@ var awsPartition = partition{ Region: "ap-south-1", }, }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "api.ecr.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{ @@ -1108,6 +1187,14 @@ var awsPartition = partition{ Region: "ap-southeast-3", }, }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "api.ecr.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, endpointKey{ Region: "ca-central-1", }: endpoint{ @@ -1196,6 +1283,14 @@ var awsPartition = partition{ Region: "eu-central-1", }, }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "api.ecr.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, endpointKey{ Region: "eu-north-1", }: endpoint{ @@ -1212,6 +1307,14 @@ var awsPartition = partition{ Region: "eu-south-1", }, }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "api.ecr.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, endpointKey{ Region: "eu-west-1", }: endpoint{ @@ -1402,6 +1505,26 @@ var awsPartition = partition{ }, }, }, + "api.ecr-public": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "api.ecr-public.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "api.ecr-public.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, "api.elastic-inference": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -1593,6 +1716,14 @@ var awsPartition = partition{ Region: "ap-southeast-2", }, }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "api.iotwireless.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, endpointKey{ Region: "eu-west-1", }: endpoint{ @@ -1601,6 +1732,14 @@ var awsPartition = partition{ Region: "eu-west-1", }, }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "api.iotwireless.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, endpointKey{ Region: "us-east-1", }: endpoint{ @@ -1621,9 +1760,15 @@ var awsPartition = partition{ }, "api.mediatailor": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -1639,6 +1784,9 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -1719,6 +1867,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -1960,6 +2111,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -1969,6 +2123,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -1981,12 +2138,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -2142,6 +2305,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -2151,18 +2317,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -2339,6 +2514,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -2348,18 +2526,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -2418,6 +2605,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -2771,6 +2961,15 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -2876,6 +3075,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -2919,6 +3121,9 @@ var awsPartition = partition{ }, "appsync": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-east-1", }: endpoint{}, @@ -2940,6 +3145,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -3020,56 +3228,186 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "arc-zonal-shift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "athena": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "af-south-1", }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.af-south-1.api.aws", + }, endpointKey{ Region: "ap-east-1", }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-east-1.api.aws", + }, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-northeast-1.api.aws", + }, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-northeast-2.api.aws", + }, endpointKey{ Region: "ap-northeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-northeast-3.api.aws", + }, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-south-1.api.aws", + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-southeast-1.api.aws", + }, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-southeast-2.api.aws", + }, endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-southeast-3.api.aws", + }, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ca-central-1.api.aws", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-central-1.api.aws", + }, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-north-1.api.aws", + }, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-south-1.api.aws", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-west-1.api.aws", + }, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-west-2.api.aws", + }, endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-west-3.api.aws", + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -3109,12 +3447,30 @@ var awsPartition = partition{ endpointKey{ Region: "me-south-1", }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.me-south-1.api.aws", + }, endpointKey{ Region: "sa-east-1", }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.sa-east-1.api.aws", + }, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-1", Variant: fipsVariant, @@ -3124,6 +3480,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.us-east-2.api.aws", + }, endpointKey{ Region: "us-east-2", Variant: fipsVariant, @@ -3133,6 +3495,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-1", Variant: fipsVariant, @@ -3142,6 +3510,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.us-west-2.api.aws", + }, endpointKey{ Region: "us-west-2", Variant: fipsVariant, @@ -3215,6 +3589,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -3224,18 +3601,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -3393,6 +3779,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -3590,6 +3979,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -3678,6 +4070,51 @@ var awsPartition = partition{ }, }, }, + "cases": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{}, + }, + }, "cassandra": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -3807,6 +4244,43 @@ var awsPartition = partition{ }, }, }, + "cleanrooms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "cloud9": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -3894,6 +4368,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -3903,6 +4380,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -3915,12 +4395,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -3975,6 +4461,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -4070,6 +4559,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -4079,18 +4571,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -4247,6 +4748,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, @@ -4262,6 +4766,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -4336,6 +4843,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -4345,18 +4855,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -4449,6 +4968,76 @@ var awsPartition = partition{ }, }, }, + "cloudtrail-data": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "codeartifact": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -4542,6 +5131,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -4622,6 +5214,17 @@ var awsPartition = partition{ }, }, }, + "codecatalyst": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "codecatalyst.global.api.aws", + }, + }, + }, "codecommit": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -4642,12 +5245,18 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -4693,6 +5302,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -4793,6 +5405,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -4802,18 +5417,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -5252,6 +5876,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -5352,6 +5979,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -5830,6 +6460,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -5839,18 +6472,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -5982,6 +6624,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, @@ -6264,6 +6909,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -6669,6 +7317,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -6690,12 +7341,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -6750,6 +7407,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -6966,6 +7626,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -6975,18 +7638,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -7133,18 +7805,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -7154,6 +7835,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -7194,6 +7878,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -7203,6 +7890,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -7236,12 +7926,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -7540,6 +8236,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -7561,12 +8260,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -7621,6 +8326,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -7690,6 +8398,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -7699,6 +8410,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -7720,12 +8434,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -7847,6 +8567,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -7856,6 +8579,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -7868,12 +8594,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -7928,6 +8660,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -8003,6 +8738,9 @@ var awsPartition = partition{ }: endpoint{ Hostname: "ec2.ap-south-1.api.aws", }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -8012,6 +8750,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -8024,12 +8765,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -8181,6 +8928,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -8190,18 +8940,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -8347,6 +9106,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -8362,12 +9124,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -8413,6 +9181,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -8477,6 +9248,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -8486,18 +9260,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -8785,6 +9568,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "elasticfilesystem-fips.ap-south-1.amazonaws.com", }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-south-2.amazonaws.com", + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -8830,6 +9622,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "elasticfilesystem-fips.eu-central-1.amazonaws.com", }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-central-2.amazonaws.com", + }, endpointKey{ Region: "eu-north-1", }: endpoint{}, @@ -8848,6 +9649,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "elasticfilesystem-fips.eu-south-1.amazonaws.com", }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-south-2.amazonaws.com", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -8929,6 +9739,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ap-south-2", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-ap-southeast-1", }: endpoint{ @@ -8974,6 +9793,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-eu-central-2", + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-eu-north-1", }: endpoint{ @@ -8992,6 +9820,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-eu-south-2", + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-eu-west-1", }: endpoint{ @@ -9019,6 +9856,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-me-central-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-me-south-1", }: endpoint{ @@ -9073,6 +9919,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.me-central-1.amazonaws.com", + }, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -9154,6 +10009,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -9163,18 +10021,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -9293,6 +10160,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -9302,6 +10172,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -9316,12 +10189,18 @@ var awsPartition = partition{ }: endpoint{ SSLCommonName: "{service}.{region}.{dnsSuffix}", }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -9477,6 +10356,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -9836,6 +10718,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -9845,18 +10730,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -9978,6 +10872,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -9987,18 +10884,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -10198,6 +11104,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -10213,12 +11122,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -10264,6 +11179,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -10381,6 +11299,9 @@ var awsPartition = partition{ }: endpoint{ Hostname: "fms-fips.ap-southeast-2.amazonaws.com", }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -10609,6 +11530,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -10847,15 +11771,33 @@ var awsPartition = partition{ }, "fsx": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -10868,12 +11810,21 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, endpointKey{ Region: "fips-ca-central-1", }: endpoint{ @@ -10910,6 +11861,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-prod-us-west-1", + }: endpoint{ + Hostname: "fsx-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-prod-us-west-2", }: endpoint{ @@ -10937,6 +11897,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "fsx-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-west-2", }: endpoint{ @@ -10946,6 +11915,12 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, endpointKey{ Region: "prod-ca-central-1", }: endpoint{ @@ -11000,6 +11975,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "prod-us-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "prod-us-west-2", }: endpoint{ @@ -11018,6 +12011,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -11036,6 +12032,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "fsx-fips.us-east-2.amazonaws.com", }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -11116,6 +12121,9 @@ var awsPartition = partition{ }, "gamesparks": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -11126,12 +12134,18 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -11141,6 +12155,12 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -11388,6 +12408,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -11673,6 +12696,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -11688,12 +12714,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -11703,6 +12735,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -11941,12 +12976,18 @@ var awsPartition = partition{ }, "identitystore": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -11956,6 +12997,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -11965,12 +13009,21 @@ var awsPartition = partition{ endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -11998,6 +13051,112 @@ var awsPartition = partition{ }, }, }, + "ingest.timestream": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "ingest-fips-us-east-1", + }: endpoint{ + Hostname: "ingest.timestream-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ingest-fips-us-east-2", + }: endpoint{ + Hostname: "ingest.timestream-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ingest-fips-us-west-2", + }: endpoint{ + Hostname: "ingest.timestream-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ingest-us-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ingest-us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ingest.timestream-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ingest-us-east-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ingest-us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ingest.timestream-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ingest-us-west-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ingest-us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ingest.timestream-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "inspector": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -12159,6 +13318,156 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "internetmonitor": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "internetmonitor.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "internetmonitor.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "internetmonitor.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "internetmonitor.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "internetmonitor.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "internetmonitor.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "internetmonitor.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "internetmonitor.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "internetmonitor.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "internetmonitor.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "internetmonitor.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "internetmonitor.ca-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "internetmonitor.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "internetmonitor.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "internetmonitor.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "internetmonitor.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "internetmonitor.eu-south-2.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "internetmonitor.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "internetmonitor.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "internetmonitor.eu-west-3.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "internetmonitor.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "internetmonitor.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "internetmonitor.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "internetmonitor.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "internetmonitor.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "internetmonitor.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "internetmonitor.us-west-2.api.aws", + }, + }, + }, "iot": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -12255,6 +13564,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -12609,6 +13921,16 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "iotroborunner": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, "iotsecuredtunneling": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{}, @@ -13042,6 +14364,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -13116,6 +14441,12 @@ var awsPartition = partition{ }, "kendra": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -13184,6 +14515,146 @@ var awsPartition = partition{ }, }, }, + "kendra-ranking": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "kendra-ranking.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "kendra-ranking.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "kendra-ranking.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "kendra-ranking.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "kendra-ranking.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "kendra-ranking.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "kendra-ranking.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "kendra-ranking.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "kendra-ranking.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "kendra-ranking.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "kendra-ranking.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "kendra-ranking.ca-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "kendra-ranking.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "kendra-ranking.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "kendra-ranking.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "kendra-ranking.eu-south-2.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "kendra-ranking.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "kendra-ranking.eu-west-3.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "kendra-ranking.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "kendra-ranking.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "kendra-ranking.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "kendra-ranking.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "kendra-ranking.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "kendra-ranking.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "kendra-ranking.us-west-2.api.aws", + }, + }, + }, "kinesis": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -13204,6 +14675,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -13213,18 +14687,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -13367,6 +14850,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -13441,6 +14927,15 @@ var awsPartition = partition{ }, "kms": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ProdFips", + }: endpoint{ + Hostname: "kms-fips.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "af-south-1", }: endpoint{}, @@ -13549,6 +15044,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-south-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-south-2-fips", + }: endpoint{ + Hostname: "kms-fips.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -13603,6 +15116,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-southeast-4.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-4-fips", + }: endpoint{ + Hostname: "kms-fips.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -13639,6 +15170,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.eu-central-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-2-fips", + }: endpoint{ + Hostname: "kms-fips.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-north-1", }: endpoint{}, @@ -13675,6 +15224,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.eu-south-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-south-2-fips", + }: endpoint{ + Hostname: "kms-fips.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -13883,6 +15450,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -14040,6 +15610,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "lambda.ap-south-1.api.aws", }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-south-2.api.aws", + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -14067,6 +15646,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "lambda.ap-southeast-3.api.aws", }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-southeast-4.api.aws", + }, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -14085,6 +15673,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "lambda.eu-central-1.api.aws", }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-central-2.api.aws", + }, endpointKey{ Region: "eu-north-1", }: endpoint{}, @@ -14103,6 +15700,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "lambda.eu-south-1.api.aws", }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-south-2.api.aws", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -14169,6 +15775,12 @@ var awsPartition = partition{ endpointKey{ Region: "me-central-1", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.me-central-1.api.aws", + }, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -14379,14 +15991,8 @@ var awsPartition = partition{ }, }, }, - "license-manager-user-subscriptions": service{ + "license-manager-linux-subscriptions": service{ Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, @@ -14414,9 +16020,6 @@ var awsPartition = partition{ endpointKey{ Region: "eu-north-1", }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -14429,7 +16032,7 @@ var awsPartition = partition{ endpointKey{ Region: "fips-us-east-1", }: endpoint{ - Hostname: "license-manager-user-subscriptions-fips.us-east-1.amazonaws.com", + Hostname: "license-manager-linux-subscriptions-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, @@ -14438,7 +16041,7 @@ var awsPartition = partition{ endpointKey{ Region: "fips-us-east-2", }: endpoint{ - Hostname: "license-manager-user-subscriptions-fips.us-east-2.amazonaws.com", + Hostname: "license-manager-linux-subscriptions-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, @@ -14447,7 +16050,7 @@ var awsPartition = partition{ endpointKey{ Region: "fips-us-west-1", }: endpoint{ - Hostname: "license-manager-user-subscriptions-fips.us-west-1.amazonaws.com", + Hostname: "license-manager-linux-subscriptions-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, @@ -14456,15 +16059,12 @@ var awsPartition = partition{ endpointKey{ Region: "fips-us-west-2", }: endpoint{ - Hostname: "license-manager-user-subscriptions-fips.us-west-2.amazonaws.com", + Hostname: "license-manager-linux-subscriptions-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, Deprecated: boxedTrue, }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -14475,7 +16075,7 @@ var awsPartition = partition{ Region: "us-east-1", Variant: fipsVariant, }: endpoint{ - Hostname: "license-manager-user-subscriptions-fips.us-east-1.amazonaws.com", + Hostname: "license-manager-linux-subscriptions-fips.us-east-1.amazonaws.com", }, endpointKey{ Region: "us-east-2", @@ -14484,7 +16084,7 @@ var awsPartition = partition{ Region: "us-east-2", Variant: fipsVariant, }: endpoint{ - Hostname: "license-manager-user-subscriptions-fips.us-east-2.amazonaws.com", + Hostname: "license-manager-linux-subscriptions-fips.us-east-2.amazonaws.com", }, endpointKey{ Region: "us-west-1", @@ -14493,7 +16093,7 @@ var awsPartition = partition{ Region: "us-west-1", Variant: fipsVariant, }: endpoint{ - Hostname: "license-manager-user-subscriptions-fips.us-west-1.amazonaws.com", + Hostname: "license-manager-linux-subscriptions-fips.us-west-1.amazonaws.com", }, endpointKey{ Region: "us-west-2", @@ -14502,57 +16102,11 @@ var awsPartition = partition{ Region: "us-west-2", Variant: fipsVariant, }: endpoint{ - Hostname: "license-manager-user-subscriptions-fips.us-west-2.amazonaws.com", + Hostname: "license-manager-linux-subscriptions-fips.us-west-2.amazonaws.com", }, }, }, - "lightsail": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "logs": service{ + "license-manager-user-subscriptions": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "af-south-1", @@ -14578,21 +16132,206 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "license-manager-user-subscriptions-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "license-manager-user-subscriptions-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "license-manager-user-subscriptions-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "license-manager-user-subscriptions-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-user-subscriptions-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-user-subscriptions-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-user-subscriptions-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-user-subscriptions-fips.us-west-2.amazonaws.com", + }, + }, + }, + "lightsail": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "logs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -14756,27 +16495,101 @@ var awsPartition = partition{ }, "m2": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + + Deprecated: boxedTrue, + }, endpointKey{ Region: "sa-east-1", }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{}, }, }, "machinelearning": service{ @@ -15085,6 +16898,9 @@ var awsPartition = partition{ }, "mediaconvert": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, @@ -15505,12 +17321,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, endpointKey{ Region: "fips", }: endpoint{ @@ -15588,6 +17410,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -15597,18 +17422,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -15641,6 +17475,76 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "metrics.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "mgh": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -15716,6 +17620,42 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "mgn-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "mgn-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "mgn-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "mgn-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -15725,15 +17665,39 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mgn-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mgn-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mgn-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mgn-fips.us-west-2.amazonaws.com", + }, }, }, "migrationhub-orchestrator": service{ @@ -15928,6 +17892,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -15937,18 +17904,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -16127,6 +18103,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -16358,6 +18337,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -16430,6 +18412,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -16510,8 +18495,101 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "oam": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "oidc": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "oidc.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, endpointKey{ Region: "ap-east-1", }: endpoint{ @@ -16568,6 +18646,14 @@ var awsPartition = partition{ Region: "ap-southeast-2", }, }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "oidc.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, endpointKey{ Region: "ca-central-1", }: endpoint{ @@ -16656,6 +18742,14 @@ var awsPartition = partition{ Region: "us-east-2", }, }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "oidc.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, endpointKey{ Region: "us-west-2", }: endpoint{ @@ -17044,6 +19138,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -17053,18 +19150,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -17123,7 +19229,21 @@ var awsPartition = partition{ }: endpoint{}, endpointKey{ Region: "ca-central-1", - }: endpoint{}, + }: endpoint{ + Hostname: "pinpoint.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pinpoint-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -17133,6 +19253,15 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "pinpoint-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -17142,6 +19271,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "pinpoint-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-west-2", }: endpoint{ @@ -17168,6 +19306,23 @@ var awsPartition = partition{ Region: "us-east-1", }, }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "pinpoint.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pinpoint-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, endpointKey{ Region: "us-west-2", }: endpoint{ @@ -17187,6 +19342,79 @@ var awsPartition = partition{ }, }, }, + "pipes": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "polly": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -17310,6 +19538,14 @@ var awsPartition = partition{ }, "portal.sso": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "portal.sso.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, endpointKey{ Region: "ap-east-1", }: endpoint{ @@ -17366,6 +19602,14 @@ var awsPartition = partition{ Region: "ap-southeast-2", }, }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "portal.sso.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, endpointKey{ Region: "ca-central-1", }: endpoint{ @@ -17454,6 +19698,14 @@ var awsPartition = partition{ Region: "us-east-2", }, }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "portal.sso.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, endpointKey{ Region: "us-west-2", }: endpoint{ @@ -17528,9 +19780,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -17656,21 +19926,24 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, - endpointKey{ - Region: "api", - }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -17705,6 +19978,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -17714,6 +19990,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -17726,12 +20005,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -17786,6 +20071,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -17850,6 +20138,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -17859,6 +20150,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -17871,12 +20165,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -17998,6 +20298,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -18007,6 +20310,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -18028,12 +20334,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -18390,6 +20702,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -18399,6 +20714,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -18411,12 +20729,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -18872,6 +21196,131 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "resource-explorer-2": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "resource-explorer-2.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "resource-explorer-2.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "resource-explorer-2.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "resource-explorer-2.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "resource-explorer-2.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "resource-explorer-2.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "resource-explorer-2.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "resource-explorer-2.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "resource-explorer-2.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "resource-explorer-2.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "resource-explorer-2.ca-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "resource-explorer-2.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "resource-explorer-2.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "resource-explorer-2.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "resource-explorer-2.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "resource-explorer-2.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "resource-explorer-2.eu-west-3.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "resource-explorer-2.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "resource-explorer-2.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "resource-explorer-2.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "resource-explorer-2.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "resource-explorer-2.us-west-2.api.aws", + }, + }, + }, "resource-groups": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -18892,6 +21341,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -18901,18 +21353,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -18958,6 +21419,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -19203,6 +21667,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -19425,6 +21892,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -19589,6 +22059,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "s3.dualstack.ap-south-1.amazonaws.com", }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-south-2.amazonaws.com", + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{ @@ -19624,6 +22103,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "s3.dualstack.ap-southeast-3.amazonaws.com", }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-southeast-4.amazonaws.com", + }, endpointKey{ Region: "aws-global", }: endpoint{ @@ -19663,6 +22151,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "s3.dualstack.eu-central-1.amazonaws.com", }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.eu-central-2.amazonaws.com", + }, endpointKey{ Region: "eu-north-1", }: endpoint{}, @@ -19681,6 +22178,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "s3.dualstack.eu-south-1.amazonaws.com", }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.eu-south-2.amazonaws.com", + }, endpointKey{ Region: "eu-west-1", }: endpoint{ @@ -20426,6 +22932,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -20517,6 +23026,13 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "sagemaker-geospatial": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "savingsplans": service{ PartitionEndpoint: "aws-global", IsRegionalized: boxedFalse, @@ -20531,6 +23047,37 @@ var awsPartition = partition{ }, }, }, + "scheduler": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "schemas": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -20642,6 +23189,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -20651,6 +23201,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -20672,12 +23225,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -20856,6 +23415,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -20900,6 +23462,31 @@ var awsPartition = partition{ }, }, }, + "securitylake": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "serverlessrepo": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -21230,6 +23817,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -21279,33 +23869,102 @@ var awsPartition = partition{ endpointKey{ Region: "af-south-1", }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.af-south-1.amazonaws.com", + }, endpointKey{ Region: "ap-east-1", }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-east-1.amazonaws.com", + }, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-northeast-1.amazonaws.com", + }, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-northeast-2.amazonaws.com", + }, endpointKey{ Region: "ap-northeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-northeast-3.amazonaws.com", + }, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-south-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-south-2.amazonaws.com", + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-southeast-1.amazonaws.com", + }, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-southeast-2.amazonaws.com", + }, endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-southeast-3.amazonaws.com", + }, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "ca-central-1", Variant: fipsVariant, @@ -21324,27 +23983,102 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-central-2.amazonaws.com", + }, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-north-1.amazonaws.com", + }, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-south-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-south-2.amazonaws.com", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-west-2.amazonaws.com", + }, endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-west-3.amazonaws.com", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.me-central-1.amazonaws.com", + }, endpointKey{ Region: "me-south-1", }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.me-south-1.amazonaws.com", + }, endpointKey{ Region: "sa-east-1", }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.sa-east-1.amazonaws.com", + }, endpointKey{ Region: "servicediscovery", }: endpoint{ @@ -21375,6 +24109,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-1", Variant: fipsVariant, @@ -21393,6 +24133,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-east-2", Variant: fipsVariant, @@ -21411,6 +24157,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-1", Variant: fipsVariant, @@ -21429,6 +24181,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.us-west-2.amazonaws.com", + }, endpointKey{ Region: "us-west-2", Variant: fipsVariant, @@ -21501,6 +24259,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -21641,6 +24402,34 @@ var awsPartition = partition{ }, }, }, + "simspaceweaver": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "sms": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -21782,6 +24571,12 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-voice-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -21791,12 +24586,51 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "sms-voice-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "sms-voice-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "sms-voice-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-voice-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-voice-fips.us-west-2.amazonaws.com", + }, }, }, "snowball": service{ @@ -22131,6 +24965,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -22140,18 +24977,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -22270,6 +25116,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -22279,18 +25128,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -22406,6 +25264,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -22415,6 +25276,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -22427,12 +25291,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -22586,8 +25456,81 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "ssm-sap": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "sso": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-east-1", }: endpoint{}, @@ -22609,6 +25552,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -22642,6 +25588,9 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -22667,6 +25616,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -22676,18 +25628,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -22830,12 +25791,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -22854,6 +25821,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -22962,6 +25932,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -22971,18 +25944,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -23045,6 +26027,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -23054,6 +26039,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "aws-global", }: endpoint{ @@ -23068,12 +26056,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -23212,6 +26206,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -23221,18 +26218,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -23345,6 +26351,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -23354,18 +26363,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -23478,6 +26496,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -23487,18 +26508,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -23967,6 +26997,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -24185,6 +27218,82 @@ var awsPartition = partition{ }, }, }, + "voice-chime": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "voice-chime-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "voice-chime-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "voice-chime-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "voice-chime-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "voice-chime-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "voice-chime-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, "voiceid": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -24196,22 +27305,46 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "voiceid-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "voiceid-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ - + Hostname: "voiceid-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, Deprecated: boxedTrue, }, endpointKey{ Region: "fips-us-west-2", }: endpoint{ - + Hostname: "voiceid-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, Deprecated: boxedTrue, }, endpointKey{ @@ -24220,14 +27353,18 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", Variant: fipsVariant, - }: endpoint{}, + }: endpoint{ + Hostname: "voiceid-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, endpointKey{ Region: "us-west-2", Variant: fipsVariant, - }: endpoint{}, + }: endpoint{ + Hostname: "voiceid-fips.us-west-2.amazonaws.com", + }, }, }, "waf": service{ @@ -24707,6 +27844,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-me-central-1", + }: endpoint{ + Hostname: "waf-regional-fips.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-me-south-1", }: endpoint{ @@ -24761,6 +27907,23 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "waf-regional.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, endpointKey{ Region: "me-south-1", }: endpoint{ @@ -25283,6 +28446,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-me-central-1", + }: endpoint{ + Hostname: "wafv2-fips.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-me-south-1", }: endpoint{ @@ -25337,6 +28509,23 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "wafv2.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, endpointKey{ Region: "me-south-1", }: endpoint{ @@ -25513,12 +28702,50 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ui-ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ui-ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ui-eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "ui-eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "ui-us-east-1", + }: endpoint{}, + endpointKey{ + Region: "ui-us-west-2", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{}, }, }, "workdocs": service{ @@ -25718,6 +28945,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -25727,18 +28957,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -26055,9 +29294,21 @@ var awscnPartition = partition{ endpointKey{ Region: "cn-north-1", }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.cn-north-1.api.amazonwebservices.com.cn", + }, endpointKey{ Region: "cn-northwest-1", }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.cn-northwest-1.api.amazonwebservices.com.cn", + }, }, }, "autoscaling": service{ @@ -26315,6 +29566,16 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "datasync": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "dax": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -26707,6 +29968,31 @@ var awscnPartition = partition{ }, }, }, + "internetmonitor": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.amazonwebservices.com.cn", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.amazonwebservices.com.cn", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "internetmonitor.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "internetmonitor.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, "iot": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -26777,6 +30063,31 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "kendra-ranking": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.amazonwebservices.com.cn", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.amazonwebservices.com.cn", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "kendra-ranking.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "kendra-ranking.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, "kinesis": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -26797,6 +30108,13 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "kinesisvideo": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, "kms": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -26881,6 +30199,16 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "metrics.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "monitoring": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -27004,6 +30332,31 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "resource-explorer-2": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.amazonwebservices.com.cn", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.amazonwebservices.com.cn", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "resource-explorer-2.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "resource-explorer-2.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, "resource-groups": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -27014,6 +30367,16 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "rolesanywhere": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "route53": service{ PartitionEndpoint: "aws-cn-global", IsRegionalized: boxedFalse, @@ -27195,6 +30558,33 @@ var awscnPartition = partition{ }, }, "servicediscovery": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.cn-north-1.amazonaws.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.cn-northwest-1.amazonaws.com.cn", + }, + }, + }, + "servicequotas": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, Endpoints: serviceEndpoints{ endpointKey{ Region: "cn-north-1", @@ -27890,6 +31280,9 @@ var awsusgovPartition = partition{ }, }, Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, @@ -28150,6 +31543,12 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-east-1", Variant: fipsVariant, @@ -28159,6 +31558,12 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.us-gov-west-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", Variant: fipsVariant, @@ -28341,6 +31746,21 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "clouddirectory.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "clouddirectory.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, "cloudformation": service{ @@ -28353,6 +31773,24 @@ var awsusgovPartition = partition{ Region: "us-gov-east-1", }, }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudformation.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "cloudformation.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{ @@ -28361,6 +31799,24 @@ var awsusgovPartition = partition{ Region: "us-gov-west-1", }, }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudformation.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "cloudformation.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, "cloudhsm": service{ @@ -28388,6 +31844,14 @@ var awsusgovPartition = partition{ }, }, "cloudtrail": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudtrail.us-gov-west-1.amazonaws.com", + }, + }, Endpoints: serviceEndpoints{ endpointKey{ Region: "fips-us-gov-east-1", @@ -28671,6 +32135,26 @@ var awsusgovPartition = partition{ }, }, }, + "compute-optimizer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "compute-optimizer-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "compute-optimizer-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, "config": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{}, @@ -28726,6 +32210,16 @@ var awsusgovPartition = partition{ }: endpoint{}, }, }, + "controltower": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "data-ats.iot": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -28816,9 +32310,24 @@ var awsusgovPartition = partition{ }, "databrew": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "databrew.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "databrew.us-gov-west-1.amazonaws.com", + }, }, }, "datasync": service{ @@ -29094,6 +32603,15 @@ var awsusgovPartition = partition{ Region: "us-gov-east-1", }, }, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "ec2.us-gov-east-1.api.aws", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{ @@ -29102,6 +32620,15 @@ var awsusgovPartition = partition{ Region: "us-gov-west-1", }, }, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "ec2.us-gov-west-1.api.aws", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "ecs": service{ @@ -29247,6 +32774,24 @@ var awsusgovPartition = partition{ Region: "us-gov-east-1", }, }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticbeanstalk.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "elasticbeanstalk.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{ @@ -29255,6 +32800,24 @@ var awsusgovPartition = partition{ Region: "us-gov-west-1", }, }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, "elasticfilesystem": service{ @@ -29421,6 +32984,16 @@ var awsusgovPartition = partition{ }, }, }, + "emr-containers": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "es": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -29692,21 +33265,43 @@ var awsusgovPartition = partition{ "glacier": service{ Endpoints: serviceEndpoints{ endpointKey{ - Region: "us-gov-east-1", + Region: "fips-us-gov-east-1", }: endpoint{ Hostname: "glacier.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, endpointKey{ - Region: "us-gov-west-1", + Region: "fips-us-gov-west-1", }: endpoint{ - Hostname: "glacier.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, + Hostname: "glacier.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glacier.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glacier.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, }, }, }, @@ -29777,36 +33372,38 @@ var awsusgovPartition = partition{ endpointKey{ Region: "fips-us-gov-east-1", }: endpoint{ - Hostname: "greengrass-fips.us-gov-east-1.amazonaws.com", + Hostname: "greengrass.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, Deprecated: boxedTrue, }, endpointKey{ - Region: "us-gov-east-1", + Region: "fips-us-gov-west-1", }: endpoint{ - Hostname: "greengrass.us-gov-east-1.amazonaws.com", + Hostname: "greengrass.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-gov-east-1", + Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, endpointKey{ Region: "us-gov-east-1", Variant: fipsVariant, }: endpoint{ - Hostname: "greengrass-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, + Hostname: "greengrass.us-gov-east-1.amazonaws.com", }, endpointKey{ Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, }: endpoint{ Hostname: "greengrass.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, }, }, }, @@ -30000,6 +33597,28 @@ var awsusgovPartition = partition{ }, }, }, + "ingest.timestream": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ingest.timestream.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "ingest.timestream.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, "inspector": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -30040,6 +33659,41 @@ var awsusgovPartition = partition{ }, }, }, + "inspector2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "internetmonitor": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "internetmonitor.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "internetmonitor.us-gov-west-1.api.aws", + }, + }, + }, "iot": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -30241,8 +33895,51 @@ var awsusgovPartition = partition{ }, }, }, + "kendra-ranking": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "kendra-ranking.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "kendra-ranking.us-gov-west-1.api.aws", + }, + }, + }, "kinesis": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "kinesis.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "kinesis.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-east-1", }: endpoint{ @@ -30251,6 +33948,15 @@ var awsusgovPartition = partition{ Region: "us-gov-east-1", }, }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesis.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{ @@ -30259,6 +33965,15 @@ var awsusgovPartition = partition{ Region: "us-gov-west-1", }, }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesis.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "kinesisanalytics": service{ @@ -30502,12 +34217,22 @@ var awsusgovPartition = partition{ "mediaconvert": service{ Endpoints: serviceEndpoints{ endpointKey{ - Region: "us-gov-west-1", + Region: "fips-us-gov-west-1", }: endpoint{ Hostname: "mediaconvert.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mediaconvert.us-gov-west-1.amazonaws.com", }, }, }, @@ -30568,6 +34293,16 @@ var awsusgovPartition = partition{ }: endpoint{}, }, }, + "metrics.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "models.lex": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -30822,20 +34557,40 @@ var awsusgovPartition = partition{ "outposts": service{ Endpoints: serviceEndpoints{ endpointKey{ - Region: "us-gov-east-1", + Region: "fips-us-gov-east-1", }: endpoint{ Hostname: "outposts.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, endpointKey{ - Region: "us-gov-west-1", + Region: "fips-us-gov-west-1", }: endpoint{ Hostname: "outposts.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "outposts.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "outposts.us-gov-west-1.amazonaws.com", }, }, }, @@ -30851,6 +34606,16 @@ var awsusgovPartition = partition{ }, }, }, + "pi": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "pinpoint": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -30950,6 +34715,24 @@ var awsusgovPartition = partition{ Region: "us-gov-east-1", }, }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "ram.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{ @@ -30958,6 +34741,24 @@ var awsusgovPartition = partition{ Region: "us-gov-west-1", }, }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "ram.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, "rbin": service{ @@ -31135,6 +34936,31 @@ var awsusgovPartition = partition{ }, }, }, + "resource-explorer-2": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "resource-explorer-2.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "resource-explorer-2.us-gov-west-1.api.aws", + }, + }, + }, "resource-groups": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{}, @@ -31279,6 +35105,9 @@ var awsusgovPartition = partition{ }, }, Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, @@ -31623,21 +35452,45 @@ var awsusgovPartition = partition{ Endpoints: serviceEndpoints{ endpointKey{ Region: "us-gov-east-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, }: endpoint{ Hostname: "serverlessrepo.us-gov-east-1.amazonaws.com", Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "serverlessrepo.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, endpointKey{ Region: "us-gov-west-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, }: endpoint{ Hostname: "serverlessrepo.us-gov-west-1.amazonaws.com", Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "serverlessrepo.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, }, }, @@ -31731,6 +35584,12 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.us-gov-east-1.amazonaws.com", + }, endpointKey{ Region: "us-gov-east-1", Variant: fipsVariant, @@ -31749,6 +35608,12 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.us-gov-west-1.amazonaws.com", + }, endpointKey{ Region: "us-gov-west-1", Variant: fipsVariant, @@ -31859,9 +35724,24 @@ var awsusgovPartition = partition{ }, "sms-voice": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "sms-voice-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-voice-fips.us-gov-west-1.amazonaws.com", + }, }, }, "snowball": service{ @@ -32287,6 +36167,24 @@ var awsusgovPartition = partition{ Region: "us-gov-east-1", }, }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "swf.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "swf.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{ @@ -32295,6 +36193,24 @@ var awsusgovPartition = partition{ Region: "us-gov-west-1", }, }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "swf.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "swf.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, "synthetics": service{ @@ -32971,6 +36887,9 @@ var awsisoPartition = partition{ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "dynamodb": service{ @@ -33045,6 +36964,15 @@ var awsisoPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-us-iso-west-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-iso-east-1", }: endpoint{}, @@ -33054,6 +36982,15 @@ var awsisoPartition = partition{ }: endpoint{ Hostname: "elasticfilesystem-fips.us-iso-east-1.c2s.ic.gov", }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-iso-west-1.c2s.ic.gov", + }, }, }, "elasticloadbalancing": service{ @@ -33105,6 +37042,9 @@ var awsisoPartition = partition{ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "glacier": service{ @@ -33119,6 +37059,13 @@ var awsisoPartition = partition{ }: endpoint{}, }, }, + "glue": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, "health": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -33240,6 +37187,13 @@ var awsisoPartition = partition{ }: endpoint{}, }, }, + "metrics.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, "monitoring": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -33262,6 +37216,9 @@ var awsisoPartition = partition{ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "rds": service{ @@ -33335,6 +37292,9 @@ var awsisoPartition = partition{ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "snowball": service{ @@ -33373,6 +37333,9 @@ var awsisoPartition = partition{ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "states": service{ @@ -33440,6 +37403,9 @@ var awsisoPartition = partition{ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "tagging": service{ @@ -33485,6 +37451,9 @@ var awsisoPartition = partition{ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, }, @@ -33611,6 +37580,13 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "dlm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "dms": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{}, @@ -33885,6 +37861,13 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "metrics.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "monitoring": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -33934,6 +37917,13 @@ var awsisobPartition = partition{ }, }, }, + "route53resolver": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "s3": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index c09688d1c..e9ac366d0 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.44.109" +const SDKVersion = "1.44.217" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go index ebcbc2b40..34fea49ca 100644 --- a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go @@ -1,9 +1,8 @@ package shareddefaults import ( - "os" + "os/user" "path/filepath" - "runtime" ) // SharedCredentialsFilename returns the SDK's default file path @@ -31,10 +30,17 @@ func SharedConfigFilename() string { // UserHomeDir returns the home directory for the user the process is // running under. func UserHomeDir() string { - if runtime.GOOS == "windows" { // Windows - return os.Getenv("USERPROFILE") + var home string + + home = userHomeDir() + if len(home) > 0 { + return home + } + + currUser, _ := user.Current() + if currUser != nil { + home = currUser.HomeDir } - // *nix - return os.Getenv("HOME") + return home } diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home.go new file mode 100644 index 000000000..eb298ae0f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home.go @@ -0,0 +1,18 @@ +//go:build !go1.12 +// +build !go1.12 + +package shareddefaults + +import ( + "os" + "runtime" +) + +func userHomeDir() string { + if runtime.GOOS == "windows" { // Windows + return os.Getenv("USERPROFILE") + } + + // *nix + return os.Getenv("HOME") +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home_go1.12.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home_go1.12.go new file mode 100644 index 000000000..51541b508 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home_go1.12.go @@ -0,0 +1,13 @@ +//go:build go1.12 +// +build go1.12 + +package shareddefaults + +import ( + "os" +) + +func userHomeDir() string { + home, _ := os.UserHomeDir() + return home +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go index 2aec80661..12e814ddf 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go @@ -4,7 +4,6 @@ package jsonutil import ( "bytes" "encoding/base64" - "encoding/json" "fmt" "math" "reflect" @@ -16,6 +15,12 @@ import ( "github.com/aws/aws-sdk-go/private/protocol" ) +const ( + floatNaN = "NaN" + floatInf = "Infinity" + floatNegInf = "-Infinity" +) + var timeType = reflect.ValueOf(time.Time{}).Type() var byteSliceType = reflect.ValueOf([]byte{}).Type() @@ -211,10 +216,16 @@ func buildScalar(v reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) erro buf.Write(strconv.AppendInt(scratch[:0], value.Int(), 10)) case reflect.Float64: f := value.Float() - if math.IsInf(f, 0) || math.IsNaN(f) { - return &json.UnsupportedValueError{Value: v, Str: strconv.FormatFloat(f, 'f', -1, 64)} + switch { + case math.IsNaN(f): + writeString(floatNaN, buf) + case math.IsInf(f, 1): + writeString(floatInf, buf) + case math.IsInf(f, -1): + writeString(floatNegInf, buf) + default: + buf.Write(strconv.AppendFloat(scratch[:0], f, 'f', -1, 64)) } - buf.Write(strconv.AppendFloat(scratch[:0], f, 'f', -1, 64)) default: switch converted := value.Interface().(type) { case time.Time: diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go index 8b2c9bbeb..f9334879b 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go @@ -6,6 +6,7 @@ import ( "encoding/json" "fmt" "io" + "math" "math/big" "reflect" "strings" @@ -258,6 +259,18 @@ func (u unmarshaler) unmarshalScalar(value reflect.Value, data interface{}, tag return err } value.Set(reflect.ValueOf(v)) + case *float64: + // These are regular strings when parsed by encoding/json's unmarshaler. + switch { + case strings.EqualFold(d, floatNaN): + value.Set(reflect.ValueOf(aws.Float64(math.NaN()))) + case strings.EqualFold(d, floatInf): + value.Set(reflect.ValueOf(aws.Float64(math.Inf(1)))) + case strings.EqualFold(d, floatNegInf): + value.Set(reflect.ValueOf(aws.Float64(math.Inf(-1)))) + default: + return fmt.Errorf("unknown JSON number value: %s", d) + } default: return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go index c0c52e2db..9c1ccde54 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go @@ -13,17 +13,46 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" ) +const ( + awsQueryError = "x-amzn-query-error" + // A valid header example - "x-amzn-query-error": ";" + awsQueryErrorPartsCount = 2 +) + // UnmarshalTypedError provides unmarshaling errors API response errors // for both typed and untyped errors. type UnmarshalTypedError struct { - exceptions map[string]func(protocol.ResponseMetadata) error + exceptions map[string]func(protocol.ResponseMetadata) error + queryExceptions map[string]func(protocol.ResponseMetadata, string) error } // NewUnmarshalTypedError returns an UnmarshalTypedError initialized for the // set of exception names to the error unmarshalers func NewUnmarshalTypedError(exceptions map[string]func(protocol.ResponseMetadata) error) *UnmarshalTypedError { return &UnmarshalTypedError{ - exceptions: exceptions, + exceptions: exceptions, + queryExceptions: map[string]func(protocol.ResponseMetadata, string) error{}, + } +} + +// NewUnmarshalTypedErrorWithOptions works similar to NewUnmarshalTypedError applying options to the UnmarshalTypedError +// before returning it +func NewUnmarshalTypedErrorWithOptions(exceptions map[string]func(protocol.ResponseMetadata) error, optFns ...func(*UnmarshalTypedError)) *UnmarshalTypedError { + unmarshaledError := NewUnmarshalTypedError(exceptions) + for _, fn := range optFns { + fn(unmarshaledError) + } + return unmarshaledError +} + +// WithQueryCompatibility is a helper function to construct a functional option for use with NewUnmarshalTypedErrorWithOptions. +// The queryExceptions given act as an override for unmarshalling errors when query compatible error codes are found. +// See also [awsQueryCompatible trait] +// +// [awsQueryCompatible trait]: https://smithy.io/2.0/aws/protocols/aws-query-protocol.html#aws-protocols-awsquerycompatible-trait +func WithQueryCompatibility(queryExceptions map[string]func(protocol.ResponseMetadata, string) error) func(*UnmarshalTypedError) { + return func(typedError *UnmarshalTypedError) { + typedError.queryExceptions = queryExceptions } } @@ -50,18 +79,32 @@ func (u *UnmarshalTypedError) UnmarshalError( code := codeParts[len(codeParts)-1] msg := jsonErr.Message + queryCodeParts := queryCodeParts(resp, u) + if fn, ok := u.exceptions[code]; ok { - // If exception code is know, use associated constructor to get a value + // If query-compatible exceptions are found and query-error-header is found, + // then use associated constructor to get exception with query error code. + // + // If exception code is known, use associated constructor to get a value // for the exception that the JSON body can be unmarshaled into. - v := fn(respMeta) + var v error + queryErrFn, queryExceptionsFound := u.queryExceptions[code] + if len(queryCodeParts) == awsQueryErrorPartsCount && queryExceptionsFound { + v = queryErrFn(respMeta, queryCodeParts[0]) + } else { + v = fn(respMeta) + } err := jsonutil.UnmarshalJSONCaseInsensitive(v, body) if err != nil { return nil, err } - return v, nil } + if len(queryCodeParts) == awsQueryErrorPartsCount && len(u.queryExceptions) > 0 { + code = queryCodeParts[0] + } + // fallback to unmodeled generic exceptions return awserr.NewRequestFailure( awserr.New(code, msg, nil), @@ -70,6 +113,16 @@ func (u *UnmarshalTypedError) UnmarshalError( ), nil } +// A valid header example - "x-amzn-query-error": ";" +func queryCodeParts(resp *http.Response, u *UnmarshalTypedError) []string { + queryCodeHeader := resp.Header.Get(awsQueryError) + var queryCodeParts []string + if queryCodeHeader != "" && len(u.queryExceptions) > 0 { + queryCodeParts = strings.Split(queryCodeHeader, ";") + } + return queryCodeParts +} + // UnmarshalErrorHandler is a named request handler for unmarshaling jsonrpc // protocol request errors var UnmarshalErrorHandler = request.NamedHandler{ diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go index 75866d012..058334053 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -3,6 +3,7 @@ package queryutil import ( "encoding/base64" "fmt" + "math" "net/url" "reflect" "sort" @@ -13,6 +14,12 @@ import ( "github.com/aws/aws-sdk-go/private/protocol" ) +const ( + floatNaN = "NaN" + floatInf = "Infinity" + floatNegInf = "-Infinity" +) + // Parse parses an object i and fills a url.Values object. The isEC2 flag // indicates if this is the EC2 Query sub-protocol. func Parse(body url.Values, i interface{}, isEC2 bool) error { @@ -228,9 +235,32 @@ func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, ta case int: v.Set(name, strconv.Itoa(value)) case float64: - v.Set(name, strconv.FormatFloat(value, 'f', -1, 64)) + var str string + switch { + case math.IsNaN(value): + str = floatNaN + case math.IsInf(value, 1): + str = floatInf + case math.IsInf(value, -1): + str = floatNegInf + default: + str = strconv.FormatFloat(value, 'f', -1, 64) + } + v.Set(name, str) case float32: - v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32)) + asFloat64 := float64(value) + var str string + switch { + case math.IsNaN(asFloat64): + str = floatNaN + case math.IsInf(asFloat64, 1): + str = floatInf + case math.IsInf(asFloat64, -1): + str = floatNegInf + default: + str = strconv.FormatFloat(asFloat64, 'f', -1, 32) + } + v.Set(name, str) case time.Time: const ISO8601UTC = "2006-01-02T15:04:05Z" format := tag.Get("timestampFormat") diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go index 63f66af2c..1d273ff0e 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -6,6 +6,7 @@ import ( "encoding/base64" "fmt" "io" + "math" "net/http" "net/url" "path" @@ -20,6 +21,12 @@ import ( "github.com/aws/aws-sdk-go/private/protocol" ) +const ( + floatNaN = "NaN" + floatInf = "Infinity" + floatNegInf = "-Infinity" +) + // Whether the byte value can be sent without escaping in AWS URLs var noEscape [256]bool @@ -302,7 +309,16 @@ func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) case int64: str = strconv.FormatInt(value, 10) case float64: - str = strconv.FormatFloat(value, 'f', -1, 64) + switch { + case math.IsNaN(value): + str = floatNaN + case math.IsInf(value, 1): + str = floatInf + case math.IsInf(value, -1): + str = floatNegInf + default: + str = strconv.FormatFloat(value, 'f', -1, 64) + } case time.Time: format := tag.Get("timestampFormat") if len(format) == 0 { diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go index cdef403e2..79fcf1699 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -6,6 +6,7 @@ import ( "fmt" "io" "io/ioutil" + "math" "net/http" "reflect" "strconv" @@ -231,9 +232,20 @@ func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) erro } v.Set(reflect.ValueOf(&i)) case *float64: - f, err := strconv.ParseFloat(header, 64) - if err != nil { - return err + var f float64 + switch { + case strings.EqualFold(header, floatNaN): + f = math.NaN() + case strings.EqualFold(header, floatInf): + f = math.Inf(1) + case strings.EqualFold(header, floatNegInf): + f = math.Inf(-1) + default: + var err error + f, err = strconv.ParseFloat(header, 64) + if err != nil { + return err + } } v.Set(reflect.ValueOf(&f)) case *time.Time: diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go index 2fbb93ae7..58c12bd8c 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go @@ -5,6 +5,7 @@ import ( "encoding/base64" "encoding/xml" "fmt" + "math" "reflect" "sort" "strconv" @@ -14,6 +15,12 @@ import ( "github.com/aws/aws-sdk-go/private/protocol" ) +const ( + floatNaN = "NaN" + floatInf = "Infinity" + floatNegInf = "-Infinity" +) + // BuildXML will serialize params into an xml.Encoder. Error will be returned // if the serialization of any of the params or nested values fails. func BuildXML(params interface{}, e *xml.Encoder) error { @@ -275,6 +282,7 @@ func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect // Error will be returned if the value type is unsupported. func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { var str string + switch converted := value.Interface().(type) { case string: str = converted @@ -289,9 +297,29 @@ func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag refl case int: str = strconv.Itoa(converted) case float64: - str = strconv.FormatFloat(converted, 'f', -1, 64) + switch { + case math.IsNaN(converted): + str = floatNaN + case math.IsInf(converted, 1): + str = floatInf + case math.IsInf(converted, -1): + str = floatNegInf + default: + str = strconv.FormatFloat(converted, 'f', -1, 64) + } case float32: - str = strconv.FormatFloat(float64(converted), 'f', -1, 32) + // The SDK doesn't render float32 values in types, only float64. This case would never be hit currently. + asFloat64 := float64(converted) + switch { + case math.IsNaN(asFloat64): + str = floatNaN + case math.IsInf(asFloat64, 1): + str = floatInf + case math.IsInf(asFloat64, -1): + str = floatNegInf + default: + str = strconv.FormatFloat(asFloat64, 'f', -1, 32) + } case time.Time: format := tag.Get("timestampFormat") if len(format) == 0 { diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go index 107c053f8..44a580a94 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go @@ -6,6 +6,7 @@ import ( "encoding/xml" "fmt" "io" + "math" "reflect" "strconv" "strings" @@ -276,9 +277,20 @@ func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { } r.Set(reflect.ValueOf(&v)) case *float64: - v, err := strconv.ParseFloat(node.Text, 64) - if err != nil { - return err + var v float64 + switch { + case strings.EqualFold(node.Text, floatNaN): + v = math.NaN() + case strings.EqualFold(node.Text, floatInf): + v = math.Inf(1) + case strings.EqualFold(node.Text, floatNegInf): + v = math.Inf(-1) + default: + var err error + v, err = strconv.ParseFloat(node.Text, 64) + if err != nil { + return err + } } r.Set(reflect.ValueOf(&v)) case *time.Time: diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go new file mode 100644 index 000000000..2539e6f01 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go @@ -0,0 +1,27190 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package dynamodb + +import ( + "fmt" + "net/url" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/crr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opBatchExecuteStatement = "BatchExecuteStatement" + +// BatchExecuteStatementRequest generates a "aws/request.Request" representing the +// client's request for the BatchExecuteStatement operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See BatchExecuteStatement for more information on using the BatchExecuteStatement +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the BatchExecuteStatementRequest method. +// req, resp := client.BatchExecuteStatementRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BatchExecuteStatement +func (c *DynamoDB) BatchExecuteStatementRequest(input *BatchExecuteStatementInput) (req *request.Request, output *BatchExecuteStatementOutput) { + op := &request.Operation{ + Name: opBatchExecuteStatement, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchExecuteStatementInput{} + } + + output = &BatchExecuteStatementOutput{} + req = c.newRequest(op, input, output) + return +} + +// BatchExecuteStatement API operation for Amazon DynamoDB. +// +// This operation allows you to perform batch reads or writes on data stored +// in DynamoDB, using PartiQL. Each read statement in a BatchExecuteStatement +// must specify an equality condition on all key attributes. This enforces that +// each SELECT statement in a batch returns at most a single item. +// +// The entire batch must consist of either read statements or write statements, +// you cannot mix both in one batch. +// +// A HTTP 200 response does not mean that all statements in the BatchExecuteStatement +// succeeded. Error details for individual statements can be found under the +// Error (https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_BatchStatementResponse.html#DDB-Type-BatchStatementResponse-Error) +// field of the BatchStatementResponse for each statement. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation BatchExecuteStatement for usage and error information. +// +// Returned Error Types: +// +// - RequestLimitExceeded +// Throughput exceeds the current throughput quota for your account. Please +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request +// a quota increase. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BatchExecuteStatement +func (c *DynamoDB) BatchExecuteStatement(input *BatchExecuteStatementInput) (*BatchExecuteStatementOutput, error) { + req, out := c.BatchExecuteStatementRequest(input) + return out, req.Send() +} + +// BatchExecuteStatementWithContext is the same as BatchExecuteStatement with the addition of +// the ability to pass a context and additional request options. +// +// See BatchExecuteStatement for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) BatchExecuteStatementWithContext(ctx aws.Context, input *BatchExecuteStatementInput, opts ...request.Option) (*BatchExecuteStatementOutput, error) { + req, out := c.BatchExecuteStatementRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opBatchGetItem = "BatchGetItem" + +// BatchGetItemRequest generates a "aws/request.Request" representing the +// client's request for the BatchGetItem operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See BatchGetItem for more information on using the BatchGetItem +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the BatchGetItemRequest method. +// req, resp := client.BatchGetItemRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BatchGetItem +func (c *DynamoDB) BatchGetItemRequest(input *BatchGetItemInput) (req *request.Request, output *BatchGetItemOutput) { + op := &request.Operation{ + Name: opBatchGetItem, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"RequestItems"}, + OutputTokens: []string{"UnprocessedKeys"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &BatchGetItemInput{} + } + + output = &BatchGetItemOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// BatchGetItem API operation for Amazon DynamoDB. +// +// The BatchGetItem operation returns the attributes of one or more items from +// one or more tables. You identify requested items by primary key. +// +// A single operation can retrieve up to 16 MB of data, which can contain as +// many as 100 items. BatchGetItem returns a partial result if the response +// size limit is exceeded, the table's provisioned throughput is exceeded, or +// an internal processing failure occurs. If a partial result is returned, the +// operation returns a value for UnprocessedKeys. You can use this value to +// retry the operation starting with the next item to get. +// +// If you request more than 100 items, BatchGetItem returns a ValidationException +// with the message "Too many items requested for the BatchGetItem call." +// +// For example, if you ask to retrieve 100 items, but each individual item is +// 300 KB in size, the system returns 52 items (so as not to exceed the 16 MB +// limit). It also returns an appropriate UnprocessedKeys value so you can get +// the next page of results. If desired, your application can include its own +// logic to assemble the pages of results into one dataset. +// +// If none of the items can be processed due to insufficient provisioned throughput +// on all of the tables in the request, then BatchGetItem returns a ProvisionedThroughputExceededException. +// If at least one of the items is successfully processed, then BatchGetItem +// completes successfully, while returning the keys of the unread items in UnprocessedKeys. +// +// If DynamoDB returns any unprocessed items, you should retry the batch operation +// on those items. However, we strongly recommend that you use an exponential +// backoff algorithm. If you retry the batch operation immediately, the underlying +// read or write requests can still fail due to throttling on the individual +// tables. If you delay the batch operation using exponential backoff, the individual +// requests in the batch are much more likely to succeed. +// +// For more information, see Batch Operations and Error Handling (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ErrorHandling.html#BatchOperations) +// in the Amazon DynamoDB Developer Guide. +// +// By default, BatchGetItem performs eventually consistent reads on every table +// in the request. If you want strongly consistent reads instead, you can set +// ConsistentRead to true for any or all tables. +// +// In order to minimize response latency, BatchGetItem retrieves items in parallel. +// +// When designing your application, keep in mind that DynamoDB does not return +// items in any particular order. To help parse the response by item, include +// the primary key values for the items in your request in the ProjectionExpression +// parameter. +// +// If a requested item does not exist, it is not returned in the result. Requests +// for nonexistent items consume the minimum read capacity units according to +// the type of read. For more information, see Working with Tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#CapacityUnitCalculations) +// in the Amazon DynamoDB Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation BatchGetItem for usage and error information. +// +// Returned Error Types: +// +// - ProvisionedThroughputExceededException +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - RequestLimitExceeded +// Throughput exceeds the current throughput quota for your account. Please +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request +// a quota increase. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BatchGetItem +func (c *DynamoDB) BatchGetItem(input *BatchGetItemInput) (*BatchGetItemOutput, error) { + req, out := c.BatchGetItemRequest(input) + return out, req.Send() +} + +// BatchGetItemWithContext is the same as BatchGetItem with the addition of +// the ability to pass a context and additional request options. +// +// See BatchGetItem for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) BatchGetItemWithContext(ctx aws.Context, input *BatchGetItemInput, opts ...request.Option) (*BatchGetItemOutput, error) { + req, out := c.BatchGetItemRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// BatchGetItemPages iterates over the pages of a BatchGetItem operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See BatchGetItem method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a BatchGetItem operation. +// pageNum := 0 +// err := client.BatchGetItemPages(params, +// func(page *dynamodb.BatchGetItemOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *DynamoDB) BatchGetItemPages(input *BatchGetItemInput, fn func(*BatchGetItemOutput, bool) bool) error { + return c.BatchGetItemPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// BatchGetItemPagesWithContext same as BatchGetItemPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) BatchGetItemPagesWithContext(ctx aws.Context, input *BatchGetItemInput, fn func(*BatchGetItemOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *BatchGetItemInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.BatchGetItemRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*BatchGetItemOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opBatchWriteItem = "BatchWriteItem" + +// BatchWriteItemRequest generates a "aws/request.Request" representing the +// client's request for the BatchWriteItem operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See BatchWriteItem for more information on using the BatchWriteItem +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the BatchWriteItemRequest method. +// req, resp := client.BatchWriteItemRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BatchWriteItem +func (c *DynamoDB) BatchWriteItemRequest(input *BatchWriteItemInput) (req *request.Request, output *BatchWriteItemOutput) { + op := &request.Operation{ + Name: opBatchWriteItem, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchWriteItemInput{} + } + + output = &BatchWriteItemOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// BatchWriteItem API operation for Amazon DynamoDB. +// +// The BatchWriteItem operation puts or deletes multiple items in one or more +// tables. A single call to BatchWriteItem can transmit up to 16MB of data over +// the network, consisting of up to 25 item put or delete operations. While +// individual items can be up to 400 KB once stored, it's important to note +// that an item's representation might be greater than 400KB while being sent +// in DynamoDB's JSON format for the API call. For more details on this distinction, +// see Naming Rules and Data Types (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html). +// +// BatchWriteItem cannot update items. If you perform a BatchWriteItem operation +// on an existing item, that item's values will be overwritten by the operation +// and it will appear like it was updated. To update items, we recommend you +// use the UpdateItem action. +// +// The individual PutItem and DeleteItem operations specified in BatchWriteItem +// are atomic; however BatchWriteItem as a whole is not. If any requested operations +// fail because the table's provisioned throughput is exceeded or an internal +// processing failure occurs, the failed operations are returned in the UnprocessedItems +// response parameter. You can investigate and optionally resend the requests. +// Typically, you would call BatchWriteItem in a loop. Each iteration would +// check for unprocessed items and submit a new BatchWriteItem request with +// those unprocessed items until all items have been processed. +// +// If none of the items can be processed due to insufficient provisioned throughput +// on all of the tables in the request, then BatchWriteItem returns a ProvisionedThroughputExceededException. +// +// If DynamoDB returns any unprocessed items, you should retry the batch operation +// on those items. However, we strongly recommend that you use an exponential +// backoff algorithm. If you retry the batch operation immediately, the underlying +// read or write requests can still fail due to throttling on the individual +// tables. If you delay the batch operation using exponential backoff, the individual +// requests in the batch are much more likely to succeed. +// +// For more information, see Batch Operations and Error Handling (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ErrorHandling.html#Programming.Errors.BatchOperations) +// in the Amazon DynamoDB Developer Guide. +// +// With BatchWriteItem, you can efficiently write or delete large amounts of +// data, such as from Amazon EMR, or copy data from another database into DynamoDB. +// In order to improve performance with these large-scale operations, BatchWriteItem +// does not behave in the same way as individual PutItem and DeleteItem calls +// would. For example, you cannot specify conditions on individual put and delete +// requests, and BatchWriteItem does not return deleted items in the response. +// +// If you use a programming language that supports concurrency, you can use +// threads to write items in parallel. Your application must include the necessary +// logic to manage the threads. With languages that don't support threading, +// you must update or delete the specified items one at a time. In both situations, +// BatchWriteItem performs the specified put and delete operations in parallel, +// giving you the power of the thread pool approach without having to introduce +// complexity into your application. +// +// Parallel processing reduces latency, but each specified put and delete request +// consumes the same number of write capacity units whether it is processed +// in parallel or not. Delete operations on nonexistent items consume one write +// capacity unit. +// +// If one or more of the following is true, DynamoDB rejects the entire batch +// write operation: +// +// - One or more tables specified in the BatchWriteItem request does not +// exist. +// +// - Primary key attributes specified on an item in the request do not match +// those in the corresponding table's primary key schema. +// +// - You try to perform multiple operations on the same item in the same +// BatchWriteItem request. For example, you cannot put and delete the same +// item in the same BatchWriteItem request. +// +// - Your request contains at least two items with identical hash and range +// keys (which essentially is two put operations). +// +// - There are more than 25 requests in the batch. +// +// - Any individual item in a batch exceeds 400 KB. +// +// - The total request size exceeds 16 MB. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation BatchWriteItem for usage and error information. +// +// Returned Error Types: +// +// - ProvisionedThroughputExceededException +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - ItemCollectionSizeLimitExceededException +// An item collection is too large. This exception is only returned for tables +// that have one or more local secondary indexes. +// +// - RequestLimitExceeded +// Throughput exceeds the current throughput quota for your account. Please +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request +// a quota increase. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BatchWriteItem +func (c *DynamoDB) BatchWriteItem(input *BatchWriteItemInput) (*BatchWriteItemOutput, error) { + req, out := c.BatchWriteItemRequest(input) + return out, req.Send() +} + +// BatchWriteItemWithContext is the same as BatchWriteItem with the addition of +// the ability to pass a context and additional request options. +// +// See BatchWriteItem for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) BatchWriteItemWithContext(ctx aws.Context, input *BatchWriteItemInput, opts ...request.Option) (*BatchWriteItemOutput, error) { + req, out := c.BatchWriteItemRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateBackup = "CreateBackup" + +// CreateBackupRequest generates a "aws/request.Request" representing the +// client's request for the CreateBackup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateBackup for more information on using the CreateBackup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CreateBackupRequest method. +// req, resp := client.CreateBackupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateBackup +func (c *DynamoDB) CreateBackupRequest(input *CreateBackupInput) (req *request.Request, output *CreateBackupOutput) { + op := &request.Operation{ + Name: opCreateBackup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateBackupInput{} + } + + output = &CreateBackupOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// CreateBackup API operation for Amazon DynamoDB. +// +// Creates a backup for an existing table. +// +// Each time you create an on-demand backup, the entire table data is backed +// up. There is no limit to the number of on-demand backups that can be taken. +// +// When you create an on-demand backup, a time marker of the request is cataloged, +// and the backup is created asynchronously, by applying all changes until the +// time of the request to the last full table snapshot. Backup requests are +// processed instantaneously and become available for restore within minutes. +// +// You can call CreateBackup at a maximum rate of 50 times per second. +// +// All backups in DynamoDB work without consuming any provisioned throughput +// on the table. +// +// If you submit a backup request on 2018-12-14 at 14:25:00, the backup is guaranteed +// to contain all data committed to the table up to 14:24:00, and data committed +// after 14:26:00 will not be. The backup might contain data modifications made +// between 14:24:00 and 14:26:00. On-demand backup does not support causal consistency. +// +// Along with data, the following are also included on the backups: +// +// - Global secondary indexes (GSIs) +// +// - Local secondary indexes (LSIs) +// +// - Streams +// +// - Provisioned read and write capacity +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation CreateBackup for usage and error information. +// +// Returned Error Types: +// +// - TableNotFoundException +// A source table with the name TableName does not currently exist within the +// subscriber's account or the subscriber is operating in the wrong Amazon Web +// Services Region. +// +// - TableInUseException +// A target table with the specified name is either being created or deleted. +// +// - ContinuousBackupsUnavailableException +// Backups have not yet been enabled for this table. +// +// - BackupInUseException +// There is another ongoing conflicting backup control plane operation on the +// table. The backup is either being created, deleted or restored to a table. +// +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. +// +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. +// +// There is a soft account quota of 2,500 tables. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateBackup +func (c *DynamoDB) CreateBackup(input *CreateBackupInput) (*CreateBackupOutput, error) { + req, out := c.CreateBackupRequest(input) + return out, req.Send() +} + +// CreateBackupWithContext is the same as CreateBackup with the addition of +// the ability to pass a context and additional request options. +// +// See CreateBackup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) CreateBackupWithContext(ctx aws.Context, input *CreateBackupInput, opts ...request.Option) (*CreateBackupOutput, error) { + req, out := c.CreateBackupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateGlobalTable = "CreateGlobalTable" + +// CreateGlobalTableRequest generates a "aws/request.Request" representing the +// client's request for the CreateGlobalTable operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateGlobalTable for more information on using the CreateGlobalTable +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CreateGlobalTableRequest method. +// req, resp := client.CreateGlobalTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateGlobalTable +func (c *DynamoDB) CreateGlobalTableRequest(input *CreateGlobalTableInput) (req *request.Request, output *CreateGlobalTableOutput) { + op := &request.Operation{ + Name: opCreateGlobalTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateGlobalTableInput{} + } + + output = &CreateGlobalTableOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// CreateGlobalTable API operation for Amazon DynamoDB. +// +// Creates a global table from an existing table. A global table creates a replication +// relationship between two or more DynamoDB tables with the same table name +// in the provided Regions. +// +// This operation only applies to Version 2017.11.29 (Legacy) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) +// of global tables. We recommend using Version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) +// when creating new global tables, as it provides greater flexibility, higher +// efficiency and consumes less write capacity than 2017.11.29 (Legacy). To +// determine which version you are using, see Determining the version (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html). +// To update existing global tables from version 2017.11.29 (Legacy) to version +// 2019.11.21 (Current), see Updating global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html). +// +// If you want to add a new replica table to a global table, each of the following +// conditions must be true: +// +// - The table must have the same primary key as all of the other replicas. +// +// - The table must have the same name as all of the other replicas. +// +// - The table must have DynamoDB Streams enabled, with the stream containing +// both the new and the old images of the item. +// +// - None of the replica tables in the global table can contain any data. +// +// If global secondary indexes are specified, then the following conditions +// must also be met: +// +// - The global secondary indexes must have the same name. +// +// - The global secondary indexes must have the same hash key and sort key +// (if present). +// +// If local secondary indexes are specified, then the following conditions must +// also be met: +// +// - The local secondary indexes must have the same name. +// +// - The local secondary indexes must have the same hash key and sort key +// (if present). +// +// Write capacity settings should be set consistently across your replica tables +// and secondary indexes. DynamoDB strongly recommends enabling auto scaling +// to manage the write capacity settings for all of your global tables replicas +// and indexes. +// +// If you prefer to manage write capacity settings manually, you should provision +// equal replicated write capacity units to your replica tables. You should +// also provision equal replicated write capacity units to matching secondary +// indexes across your global table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation CreateGlobalTable for usage and error information. +// +// Returned Error Types: +// +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. +// +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. +// +// There is a soft account quota of 2,500 tables. +// +// - InternalServerError +// An error occurred on the server side. +// +// - GlobalTableAlreadyExistsException +// The specified global table already exists. +// +// - TableNotFoundException +// A source table with the name TableName does not currently exist within the +// subscriber's account or the subscriber is operating in the wrong Amazon Web +// Services Region. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateGlobalTable +func (c *DynamoDB) CreateGlobalTable(input *CreateGlobalTableInput) (*CreateGlobalTableOutput, error) { + req, out := c.CreateGlobalTableRequest(input) + return out, req.Send() +} + +// CreateGlobalTableWithContext is the same as CreateGlobalTable with the addition of +// the ability to pass a context and additional request options. +// +// See CreateGlobalTable for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) CreateGlobalTableWithContext(ctx aws.Context, input *CreateGlobalTableInput, opts ...request.Option) (*CreateGlobalTableOutput, error) { + req, out := c.CreateGlobalTableRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateTable = "CreateTable" + +// CreateTableRequest generates a "aws/request.Request" representing the +// client's request for the CreateTable operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateTable for more information on using the CreateTable +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CreateTableRequest method. +// req, resp := client.CreateTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateTable +func (c *DynamoDB) CreateTableRequest(input *CreateTableInput) (req *request.Request, output *CreateTableOutput) { + op := &request.Operation{ + Name: opCreateTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTableInput{} + } + + output = &CreateTableOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// CreateTable API operation for Amazon DynamoDB. +// +// The CreateTable operation adds a new table to your account. In an Amazon +// Web Services account, table names must be unique within each Region. That +// is, you can have two tables with same name if you create the tables in different +// Regions. +// +// CreateTable is an asynchronous operation. Upon receiving a CreateTable request, +// DynamoDB immediately returns a response with a TableStatus of CREATING. After +// the table is created, DynamoDB sets the TableStatus to ACTIVE. You can perform +// read and write operations only on an ACTIVE table. +// +// You can optionally define secondary indexes on the new table, as part of +// the CreateTable operation. If you want to create multiple tables with secondary +// indexes on them, you must create the tables sequentially. Only one table +// with secondary indexes can be in the CREATING state at any given time. +// +// You can use the DescribeTable action to check the table status. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation CreateTable for usage and error information. +// +// Returned Error Types: +// +// - ResourceInUseException +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. +// +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. +// +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. +// +// There is a soft account quota of 2,500 tables. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateTable +func (c *DynamoDB) CreateTable(input *CreateTableInput) (*CreateTableOutput, error) { + req, out := c.CreateTableRequest(input) + return out, req.Send() +} + +// CreateTableWithContext is the same as CreateTable with the addition of +// the ability to pass a context and additional request options. +// +// See CreateTable for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) CreateTableWithContext(ctx aws.Context, input *CreateTableInput, opts ...request.Option) (*CreateTableOutput, error) { + req, out := c.CreateTableRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBackup = "DeleteBackup" + +// DeleteBackupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBackup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBackup for more information on using the DeleteBackup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteBackupRequest method. +// req, resp := client.DeleteBackupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteBackup +func (c *DynamoDB) DeleteBackupRequest(input *DeleteBackupInput) (req *request.Request, output *DeleteBackupOutput) { + op := &request.Operation{ + Name: opDeleteBackup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteBackupInput{} + } + + output = &DeleteBackupOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// DeleteBackup API operation for Amazon DynamoDB. +// +// Deletes an existing backup of a table. +// +// You can call DeleteBackup at a maximum rate of 10 times per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DeleteBackup for usage and error information. +// +// Returned Error Types: +// +// - BackupNotFoundException +// Backup not found for the given BackupARN. +// +// - BackupInUseException +// There is another ongoing conflicting backup control plane operation on the +// table. The backup is either being created, deleted or restored to a table. +// +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. +// +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. +// +// There is a soft account quota of 2,500 tables. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteBackup +func (c *DynamoDB) DeleteBackup(input *DeleteBackupInput) (*DeleteBackupOutput, error) { + req, out := c.DeleteBackupRequest(input) + return out, req.Send() +} + +// DeleteBackupWithContext is the same as DeleteBackup with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBackup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DeleteBackupWithContext(ctx aws.Context, input *DeleteBackupInput, opts ...request.Option) (*DeleteBackupOutput, error) { + req, out := c.DeleteBackupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteItem = "DeleteItem" + +// DeleteItemRequest generates a "aws/request.Request" representing the +// client's request for the DeleteItem operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteItem for more information on using the DeleteItem +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteItemRequest method. +// req, resp := client.DeleteItemRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteItem +func (c *DynamoDB) DeleteItemRequest(input *DeleteItemInput) (req *request.Request, output *DeleteItemOutput) { + op := &request.Operation{ + Name: opDeleteItem, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteItemInput{} + } + + output = &DeleteItemOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// DeleteItem API operation for Amazon DynamoDB. +// +// Deletes a single item in a table by primary key. You can perform a conditional +// delete operation that deletes the item if it exists, or if it has an expected +// attribute value. +// +// In addition to deleting an item, you can also return the item's attribute +// values in the same operation, using the ReturnValues parameter. +// +// Unless you specify conditions, the DeleteItem is an idempotent operation; +// running it multiple times on the same item or attribute does not result in +// an error response. +// +// Conditional deletes are useful for deleting items only if specific conditions +// are met. If those conditions are met, DynamoDB performs the delete. Otherwise, +// the item is not deleted. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DeleteItem for usage and error information. +// +// Returned Error Types: +// +// - ConditionalCheckFailedException +// A condition specified in the operation could not be evaluated. +// +// - ProvisionedThroughputExceededException +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - ItemCollectionSizeLimitExceededException +// An item collection is too large. This exception is only returned for tables +// that have one or more local secondary indexes. +// +// - TransactionConflictException +// Operation was rejected because there is an ongoing transaction for the item. +// +// - RequestLimitExceeded +// Throughput exceeds the current throughput quota for your account. Please +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request +// a quota increase. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteItem +func (c *DynamoDB) DeleteItem(input *DeleteItemInput) (*DeleteItemOutput, error) { + req, out := c.DeleteItemRequest(input) + return out, req.Send() +} + +// DeleteItemWithContext is the same as DeleteItem with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteItem for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DeleteItemWithContext(ctx aws.Context, input *DeleteItemInput, opts ...request.Option) (*DeleteItemOutput, error) { + req, out := c.DeleteItemRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteTable = "DeleteTable" + +// DeleteTableRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTable operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteTable for more information on using the DeleteTable +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteTableRequest method. +// req, resp := client.DeleteTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteTable +func (c *DynamoDB) DeleteTableRequest(input *DeleteTableInput) (req *request.Request, output *DeleteTableOutput) { + op := &request.Operation{ + Name: opDeleteTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTableInput{} + } + + output = &DeleteTableOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// DeleteTable API operation for Amazon DynamoDB. +// +// The DeleteTable operation deletes a table and all of its items. After a DeleteTable +// request, the specified table is in the DELETING state until DynamoDB completes +// the deletion. If the table is in the ACTIVE state, you can delete it. If +// a table is in CREATING or UPDATING states, then DynamoDB returns a ResourceInUseException. +// If the specified table does not exist, DynamoDB returns a ResourceNotFoundException. +// If table is already in the DELETING state, no error is returned. +// +// This operation only applies to Version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) +// of global tables. +// +// DynamoDB might continue to accept data read and write operations, such as +// GetItem and PutItem, on a table in the DELETING state until the table deletion +// is complete. +// +// When you delete a table, any indexes on that table are also deleted. +// +// If you have DynamoDB Streams enabled on the table, then the corresponding +// stream on that table goes into the DISABLED state, and the stream is automatically +// deleted after 24 hours. +// +// Use the DescribeTable action to check the status of the table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DeleteTable for usage and error information. +// +// Returned Error Types: +// +// - ResourceInUseException +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. +// +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. +// +// There is a soft account quota of 2,500 tables. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteTable +func (c *DynamoDB) DeleteTable(input *DeleteTableInput) (*DeleteTableOutput, error) { + req, out := c.DeleteTableRequest(input) + return out, req.Send() +} + +// DeleteTableWithContext is the same as DeleteTable with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteTable for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DeleteTableWithContext(ctx aws.Context, input *DeleteTableInput, opts ...request.Option) (*DeleteTableOutput, error) { + req, out := c.DeleteTableRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeBackup = "DescribeBackup" + +// DescribeBackupRequest generates a "aws/request.Request" representing the +// client's request for the DescribeBackup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeBackup for more information on using the DescribeBackup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeBackupRequest method. +// req, resp := client.DescribeBackupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeBackup +func (c *DynamoDB) DescribeBackupRequest(input *DescribeBackupInput) (req *request.Request, output *DescribeBackupOutput) { + op := &request.Operation{ + Name: opDescribeBackup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeBackupInput{} + } + + output = &DescribeBackupOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// DescribeBackup API operation for Amazon DynamoDB. +// +// Describes an existing backup of a table. +// +// You can call DescribeBackup at a maximum rate of 10 times per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DescribeBackup for usage and error information. +// +// Returned Error Types: +// +// - BackupNotFoundException +// Backup not found for the given BackupARN. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeBackup +func (c *DynamoDB) DescribeBackup(input *DescribeBackupInput) (*DescribeBackupOutput, error) { + req, out := c.DescribeBackupRequest(input) + return out, req.Send() +} + +// DescribeBackupWithContext is the same as DescribeBackup with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeBackup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DescribeBackupWithContext(ctx aws.Context, input *DescribeBackupInput, opts ...request.Option) (*DescribeBackupOutput, error) { + req, out := c.DescribeBackupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeContinuousBackups = "DescribeContinuousBackups" + +// DescribeContinuousBackupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeContinuousBackups operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeContinuousBackups for more information on using the DescribeContinuousBackups +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeContinuousBackupsRequest method. +// req, resp := client.DescribeContinuousBackupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeContinuousBackups +func (c *DynamoDB) DescribeContinuousBackupsRequest(input *DescribeContinuousBackupsInput) (req *request.Request, output *DescribeContinuousBackupsOutput) { + op := &request.Operation{ + Name: opDescribeContinuousBackups, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeContinuousBackupsInput{} + } + + output = &DescribeContinuousBackupsOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// DescribeContinuousBackups API operation for Amazon DynamoDB. +// +// Checks the status of continuous backups and point in time recovery on the +// specified table. Continuous backups are ENABLED on all tables at table creation. +// If point in time recovery is enabled, PointInTimeRecoveryStatus will be set +// to ENABLED. +// +// After continuous backups and point in time recovery are enabled, you can +// restore to any point in time within EarliestRestorableDateTime and LatestRestorableDateTime. +// +// LatestRestorableDateTime is typically 5 minutes before the current time. +// You can restore your table to any point in time during the last 35 days. +// +// You can call DescribeContinuousBackups at a maximum rate of 10 times per +// second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DescribeContinuousBackups for usage and error information. +// +// Returned Error Types: +// +// - TableNotFoundException +// A source table with the name TableName does not currently exist within the +// subscriber's account or the subscriber is operating in the wrong Amazon Web +// Services Region. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeContinuousBackups +func (c *DynamoDB) DescribeContinuousBackups(input *DescribeContinuousBackupsInput) (*DescribeContinuousBackupsOutput, error) { + req, out := c.DescribeContinuousBackupsRequest(input) + return out, req.Send() +} + +// DescribeContinuousBackupsWithContext is the same as DescribeContinuousBackups with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeContinuousBackups for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DescribeContinuousBackupsWithContext(ctx aws.Context, input *DescribeContinuousBackupsInput, opts ...request.Option) (*DescribeContinuousBackupsOutput, error) { + req, out := c.DescribeContinuousBackupsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeContributorInsights = "DescribeContributorInsights" + +// DescribeContributorInsightsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeContributorInsights operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeContributorInsights for more information on using the DescribeContributorInsights +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeContributorInsightsRequest method. +// req, resp := client.DescribeContributorInsightsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeContributorInsights +func (c *DynamoDB) DescribeContributorInsightsRequest(input *DescribeContributorInsightsInput) (req *request.Request, output *DescribeContributorInsightsOutput) { + op := &request.Operation{ + Name: opDescribeContributorInsights, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeContributorInsightsInput{} + } + + output = &DescribeContributorInsightsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeContributorInsights API operation for Amazon DynamoDB. +// +// Returns information about contributor insights for a given table or global +// secondary index. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DescribeContributorInsights for usage and error information. +// +// Returned Error Types: +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeContributorInsights +func (c *DynamoDB) DescribeContributorInsights(input *DescribeContributorInsightsInput) (*DescribeContributorInsightsOutput, error) { + req, out := c.DescribeContributorInsightsRequest(input) + return out, req.Send() +} + +// DescribeContributorInsightsWithContext is the same as DescribeContributorInsights with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeContributorInsights for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DescribeContributorInsightsWithContext(ctx aws.Context, input *DescribeContributorInsightsInput, opts ...request.Option) (*DescribeContributorInsightsOutput, error) { + req, out := c.DescribeContributorInsightsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeEndpoints = "DescribeEndpoints" + +// DescribeEndpointsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEndpoints operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeEndpoints for more information on using the DescribeEndpoints +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeEndpointsRequest method. +// req, resp := client.DescribeEndpointsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeEndpoints +func (c *DynamoDB) DescribeEndpointsRequest(input *DescribeEndpointsInput) (req *request.Request, output *DescribeEndpointsOutput) { + op := &request.Operation{ + Name: opDescribeEndpoints, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeEndpointsInput{} + } + + output = &DescribeEndpointsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeEndpoints API operation for Amazon DynamoDB. +// +// Returns the regional endpoint information. This action must be included in +// your VPC endpoint policies, or access to the DescribeEndpoints API will be +// denied. For more information on policy permissions, please see Internetwork +// traffic privacy (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/inter-network-traffic-privacy.html#inter-network-traffic-DescribeEndpoints). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DescribeEndpoints for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeEndpoints +func (c *DynamoDB) DescribeEndpoints(input *DescribeEndpointsInput) (*DescribeEndpointsOutput, error) { + req, out := c.DescribeEndpointsRequest(input) + return out, req.Send() +} + +// DescribeEndpointsWithContext is the same as DescribeEndpoints with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeEndpoints for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DescribeEndpointsWithContext(ctx aws.Context, input *DescribeEndpointsInput, opts ...request.Option) (*DescribeEndpointsOutput, error) { + req, out := c.DescribeEndpointsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +type discovererDescribeEndpoints struct { + Client *DynamoDB + Required bool + EndpointCache *crr.EndpointCache + Params map[string]*string + Key string + req *request.Request +} + +func (d *discovererDescribeEndpoints) Discover() (crr.Endpoint, error) { + input := &DescribeEndpointsInput{} + + resp, err := d.Client.DescribeEndpoints(input) + if err != nil { + return crr.Endpoint{}, err + } + + endpoint := crr.Endpoint{ + Key: d.Key, + } + + for _, e := range resp.Endpoints { + if e.Address == nil { + continue + } + + address := *e.Address + + var scheme string + if idx := strings.Index(address, "://"); idx != -1 { + scheme = address[:idx] + } + + if len(scheme) == 0 { + address = fmt.Sprintf("%s://%s", d.req.HTTPRequest.URL.Scheme, address) + } + + cachedInMinutes := aws.Int64Value(e.CachePeriodInMinutes) + u, err := url.Parse(address) + if err != nil { + continue + } + + addr := crr.WeightedAddress{ + URL: u, + Expired: time.Now().Add(time.Duration(cachedInMinutes) * time.Minute), + } + + endpoint.Add(addr) + } + + d.EndpointCache.Add(endpoint) + + return endpoint, nil +} + +func (d *discovererDescribeEndpoints) Handler(r *request.Request) { + endpointKey := crr.BuildEndpointKey(d.Params) + d.Key = endpointKey + d.req = r + + endpoint, err := d.EndpointCache.Get(d, endpointKey, d.Required) + if err != nil { + r.Error = err + return + } + + if endpoint.URL != nil && len(endpoint.URL.String()) > 0 { + r.HTTPRequest.URL = endpoint.URL + } +} + +const opDescribeExport = "DescribeExport" + +// DescribeExportRequest generates a "aws/request.Request" representing the +// client's request for the DescribeExport operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeExport for more information on using the DescribeExport +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeExportRequest method. +// req, resp := client.DescribeExportRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeExport +func (c *DynamoDB) DescribeExportRequest(input *DescribeExportInput) (req *request.Request, output *DescribeExportOutput) { + op := &request.Operation{ + Name: opDescribeExport, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeExportInput{} + } + + output = &DescribeExportOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeExport API operation for Amazon DynamoDB. +// +// Describes an existing table export. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DescribeExport for usage and error information. +// +// Returned Error Types: +// +// - ExportNotFoundException +// The specified export was not found. +// +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. +// +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. +// +// There is a soft account quota of 2,500 tables. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeExport +func (c *DynamoDB) DescribeExport(input *DescribeExportInput) (*DescribeExportOutput, error) { + req, out := c.DescribeExportRequest(input) + return out, req.Send() +} + +// DescribeExportWithContext is the same as DescribeExport with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeExport for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DescribeExportWithContext(ctx aws.Context, input *DescribeExportInput, opts ...request.Option) (*DescribeExportOutput, error) { + req, out := c.DescribeExportRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeGlobalTable = "DescribeGlobalTable" + +// DescribeGlobalTableRequest generates a "aws/request.Request" representing the +// client's request for the DescribeGlobalTable operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeGlobalTable for more information on using the DescribeGlobalTable +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeGlobalTableRequest method. +// req, resp := client.DescribeGlobalTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeGlobalTable +func (c *DynamoDB) DescribeGlobalTableRequest(input *DescribeGlobalTableInput) (req *request.Request, output *DescribeGlobalTableOutput) { + op := &request.Operation{ + Name: opDescribeGlobalTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeGlobalTableInput{} + } + + output = &DescribeGlobalTableOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// DescribeGlobalTable API operation for Amazon DynamoDB. +// +// Returns information about the specified global table. +// +// This operation only applies to Version 2017.11.29 (Legacy) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) +// of global tables. We recommend using Version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) +// when creating new global tables, as it provides greater flexibility, higher +// efficiency and consumes less write capacity than 2017.11.29 (Legacy). To +// determine which version you are using, see Determining the version (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html). +// To update existing global tables from version 2017.11.29 (Legacy) to version +// 2019.11.21 (Current), see Updating global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DescribeGlobalTable for usage and error information. +// +// Returned Error Types: +// +// - InternalServerError +// An error occurred on the server side. +// +// - GlobalTableNotFoundException +// The specified global table does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeGlobalTable +func (c *DynamoDB) DescribeGlobalTable(input *DescribeGlobalTableInput) (*DescribeGlobalTableOutput, error) { + req, out := c.DescribeGlobalTableRequest(input) + return out, req.Send() +} + +// DescribeGlobalTableWithContext is the same as DescribeGlobalTable with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeGlobalTable for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DescribeGlobalTableWithContext(ctx aws.Context, input *DescribeGlobalTableInput, opts ...request.Option) (*DescribeGlobalTableOutput, error) { + req, out := c.DescribeGlobalTableRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeGlobalTableSettings = "DescribeGlobalTableSettings" + +// DescribeGlobalTableSettingsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeGlobalTableSettings operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeGlobalTableSettings for more information on using the DescribeGlobalTableSettings +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeGlobalTableSettingsRequest method. +// req, resp := client.DescribeGlobalTableSettingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeGlobalTableSettings +func (c *DynamoDB) DescribeGlobalTableSettingsRequest(input *DescribeGlobalTableSettingsInput) (req *request.Request, output *DescribeGlobalTableSettingsOutput) { + op := &request.Operation{ + Name: opDescribeGlobalTableSettings, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeGlobalTableSettingsInput{} + } + + output = &DescribeGlobalTableSettingsOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// DescribeGlobalTableSettings API operation for Amazon DynamoDB. +// +// Describes Region-specific settings for a global table. +// +// This operation only applies to Version 2017.11.29 (Legacy) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) +// of global tables. We recommend using Version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) +// when creating new global tables, as it provides greater flexibility, higher +// efficiency and consumes less write capacity than 2017.11.29 (Legacy). To +// determine which version you are using, see Determining the version (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html). +// To update existing global tables from version 2017.11.29 (Legacy) to version +// 2019.11.21 (Current), see Updating global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DescribeGlobalTableSettings for usage and error information. +// +// Returned Error Types: +// +// - GlobalTableNotFoundException +// The specified global table does not exist. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeGlobalTableSettings +func (c *DynamoDB) DescribeGlobalTableSettings(input *DescribeGlobalTableSettingsInput) (*DescribeGlobalTableSettingsOutput, error) { + req, out := c.DescribeGlobalTableSettingsRequest(input) + return out, req.Send() +} + +// DescribeGlobalTableSettingsWithContext is the same as DescribeGlobalTableSettings with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeGlobalTableSettings for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DescribeGlobalTableSettingsWithContext(ctx aws.Context, input *DescribeGlobalTableSettingsInput, opts ...request.Option) (*DescribeGlobalTableSettingsOutput, error) { + req, out := c.DescribeGlobalTableSettingsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeImport = "DescribeImport" + +// DescribeImportRequest generates a "aws/request.Request" representing the +// client's request for the DescribeImport operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeImport for more information on using the DescribeImport +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeImportRequest method. +// req, resp := client.DescribeImportRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeImport +func (c *DynamoDB) DescribeImportRequest(input *DescribeImportInput) (req *request.Request, output *DescribeImportOutput) { + op := &request.Operation{ + Name: opDescribeImport, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeImportInput{} + } + + output = &DescribeImportOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeImport API operation for Amazon DynamoDB. +// +// Represents the properties of the import. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DescribeImport for usage and error information. +// +// Returned Error Types: +// - ImportNotFoundException +// The specified import was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeImport +func (c *DynamoDB) DescribeImport(input *DescribeImportInput) (*DescribeImportOutput, error) { + req, out := c.DescribeImportRequest(input) + return out, req.Send() +} + +// DescribeImportWithContext is the same as DescribeImport with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeImport for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DescribeImportWithContext(ctx aws.Context, input *DescribeImportInput, opts ...request.Option) (*DescribeImportOutput, error) { + req, out := c.DescribeImportRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeKinesisStreamingDestination = "DescribeKinesisStreamingDestination" + +// DescribeKinesisStreamingDestinationRequest generates a "aws/request.Request" representing the +// client's request for the DescribeKinesisStreamingDestination operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeKinesisStreamingDestination for more information on using the DescribeKinesisStreamingDestination +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeKinesisStreamingDestinationRequest method. +// req, resp := client.DescribeKinesisStreamingDestinationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeKinesisStreamingDestination +func (c *DynamoDB) DescribeKinesisStreamingDestinationRequest(input *DescribeKinesisStreamingDestinationInput) (req *request.Request, output *DescribeKinesisStreamingDestinationOutput) { + op := &request.Operation{ + Name: opDescribeKinesisStreamingDestination, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeKinesisStreamingDestinationInput{} + } + + output = &DescribeKinesisStreamingDestinationOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// DescribeKinesisStreamingDestination API operation for Amazon DynamoDB. +// +// Returns information about the status of Kinesis streaming. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DescribeKinesisStreamingDestination for usage and error information. +// +// Returned Error Types: +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeKinesisStreamingDestination +func (c *DynamoDB) DescribeKinesisStreamingDestination(input *DescribeKinesisStreamingDestinationInput) (*DescribeKinesisStreamingDestinationOutput, error) { + req, out := c.DescribeKinesisStreamingDestinationRequest(input) + return out, req.Send() +} + +// DescribeKinesisStreamingDestinationWithContext is the same as DescribeKinesisStreamingDestination with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeKinesisStreamingDestination for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DescribeKinesisStreamingDestinationWithContext(ctx aws.Context, input *DescribeKinesisStreamingDestinationInput, opts ...request.Option) (*DescribeKinesisStreamingDestinationOutput, error) { + req, out := c.DescribeKinesisStreamingDestinationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeLimits = "DescribeLimits" + +// DescribeLimitsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLimits operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeLimits for more information on using the DescribeLimits +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeLimitsRequest method. +// req, resp := client.DescribeLimitsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeLimits +func (c *DynamoDB) DescribeLimitsRequest(input *DescribeLimitsInput) (req *request.Request, output *DescribeLimitsOutput) { + op := &request.Operation{ + Name: opDescribeLimits, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeLimitsInput{} + } + + output = &DescribeLimitsOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// DescribeLimits API operation for Amazon DynamoDB. +// +// Returns the current provisioned-capacity quotas for your Amazon Web Services +// account in a Region, both for the Region as a whole and for any one DynamoDB +// table that you create there. +// +// When you establish an Amazon Web Services account, the account has initial +// quotas on the maximum read capacity units and write capacity units that you +// can provision across all of your DynamoDB tables in a given Region. Also, +// there are per-table quotas that apply when you create a table there. For +// more information, see Service, Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) +// page in the Amazon DynamoDB Developer Guide. +// +// Although you can increase these quotas by filing a case at Amazon Web Services +// Support Center (https://console.aws.amazon.com/support/home#/), obtaining +// the increase is not instantaneous. The DescribeLimits action lets you write +// code to compare the capacity you are currently using to those quotas imposed +// by your account so that you have enough time to apply for an increase before +// you hit a quota. +// +// For example, you could use one of the Amazon Web Services SDKs to do the +// following: +// +// Call DescribeLimits for a particular Region to obtain your current account +// quotas on provisioned capacity there. +// +// Create a variable to hold the aggregate read capacity units provisioned for +// all your tables in that Region, and one to hold the aggregate write capacity +// units. Zero them both. +// +// Call ListTables to obtain a list of all your DynamoDB tables. +// +// For each table name listed by ListTables, do the following: +// +// - Call DescribeTable with the table name. +// +// - Use the data returned by DescribeTable to add the read capacity units +// and write capacity units provisioned for the table itself to your variables. +// +// - If the table has one or more global secondary indexes (GSIs), loop over +// these GSIs and add their provisioned capacity values to your variables +// as well. +// +// Report the account quotas for that Region returned by DescribeLimits, along +// with the total current provisioned capacity levels you have calculated. +// +// This will let you see whether you are getting close to your account-level +// quotas. +// +// The per-table quotas apply only when you are creating a new table. They restrict +// the sum of the provisioned capacity of the new table itself and all its global +// secondary indexes. +// +// For existing tables and their GSIs, DynamoDB doesn't let you increase provisioned +// capacity extremely rapidly, but the only quota that applies is that the aggregate +// provisioned capacity over all your tables and GSIs cannot exceed either of +// the per-account quotas. +// +// DescribeLimits should only be called periodically. You can expect throttling +// errors if you call it more than once in a minute. +// +// The DescribeLimits Request element has no content. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DescribeLimits for usage and error information. +// +// Returned Error Types: +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeLimits +func (c *DynamoDB) DescribeLimits(input *DescribeLimitsInput) (*DescribeLimitsOutput, error) { + req, out := c.DescribeLimitsRequest(input) + return out, req.Send() +} + +// DescribeLimitsWithContext is the same as DescribeLimits with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeLimits for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DescribeLimitsWithContext(ctx aws.Context, input *DescribeLimitsInput, opts ...request.Option) (*DescribeLimitsOutput, error) { + req, out := c.DescribeLimitsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeTable = "DescribeTable" + +// DescribeTableRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTable operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeTable for more information on using the DescribeTable +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeTableRequest method. +// req, resp := client.DescribeTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeTable +func (c *DynamoDB) DescribeTableRequest(input *DescribeTableInput) (req *request.Request, output *DescribeTableOutput) { + op := &request.Operation{ + Name: opDescribeTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTableInput{} + } + + output = &DescribeTableOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// DescribeTable API operation for Amazon DynamoDB. +// +// Returns information about the table, including the current status of the +// table, when it was created, the primary key schema, and any indexes on the +// table. +// +// This operation only applies to Version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) +// of global tables. +// +// If you issue a DescribeTable request immediately after a CreateTable request, +// DynamoDB might return a ResourceNotFoundException. This is because DescribeTable +// uses an eventually consistent query, and the metadata for your table might +// not be available at that moment. Wait for a few seconds, and then try the +// DescribeTable request again. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DescribeTable for usage and error information. +// +// Returned Error Types: +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeTable +func (c *DynamoDB) DescribeTable(input *DescribeTableInput) (*DescribeTableOutput, error) { + req, out := c.DescribeTableRequest(input) + return out, req.Send() +} + +// DescribeTableWithContext is the same as DescribeTable with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeTable for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DescribeTableWithContext(ctx aws.Context, input *DescribeTableInput, opts ...request.Option) (*DescribeTableOutput, error) { + req, out := c.DescribeTableRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeTableReplicaAutoScaling = "DescribeTableReplicaAutoScaling" + +// DescribeTableReplicaAutoScalingRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTableReplicaAutoScaling operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeTableReplicaAutoScaling for more information on using the DescribeTableReplicaAutoScaling +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeTableReplicaAutoScalingRequest method. +// req, resp := client.DescribeTableReplicaAutoScalingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeTableReplicaAutoScaling +func (c *DynamoDB) DescribeTableReplicaAutoScalingRequest(input *DescribeTableReplicaAutoScalingInput) (req *request.Request, output *DescribeTableReplicaAutoScalingOutput) { + op := &request.Operation{ + Name: opDescribeTableReplicaAutoScaling, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTableReplicaAutoScalingInput{} + } + + output = &DescribeTableReplicaAutoScalingOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeTableReplicaAutoScaling API operation for Amazon DynamoDB. +// +// Describes auto scaling settings across replicas of the global table at once. +// +// This operation only applies to Version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) +// of global tables. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DescribeTableReplicaAutoScaling for usage and error information. +// +// Returned Error Types: +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeTableReplicaAutoScaling +func (c *DynamoDB) DescribeTableReplicaAutoScaling(input *DescribeTableReplicaAutoScalingInput) (*DescribeTableReplicaAutoScalingOutput, error) { + req, out := c.DescribeTableReplicaAutoScalingRequest(input) + return out, req.Send() +} + +// DescribeTableReplicaAutoScalingWithContext is the same as DescribeTableReplicaAutoScaling with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeTableReplicaAutoScaling for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DescribeTableReplicaAutoScalingWithContext(ctx aws.Context, input *DescribeTableReplicaAutoScalingInput, opts ...request.Option) (*DescribeTableReplicaAutoScalingOutput, error) { + req, out := c.DescribeTableReplicaAutoScalingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeTimeToLive = "DescribeTimeToLive" + +// DescribeTimeToLiveRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTimeToLive operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeTimeToLive for more information on using the DescribeTimeToLive +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeTimeToLiveRequest method. +// req, resp := client.DescribeTimeToLiveRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeTimeToLive +func (c *DynamoDB) DescribeTimeToLiveRequest(input *DescribeTimeToLiveInput) (req *request.Request, output *DescribeTimeToLiveOutput) { + op := &request.Operation{ + Name: opDescribeTimeToLive, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTimeToLiveInput{} + } + + output = &DescribeTimeToLiveOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// DescribeTimeToLive API operation for Amazon DynamoDB. +// +// Gives a description of the Time to Live (TTL) status on the specified table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DescribeTimeToLive for usage and error information. +// +// Returned Error Types: +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeTimeToLive +func (c *DynamoDB) DescribeTimeToLive(input *DescribeTimeToLiveInput) (*DescribeTimeToLiveOutput, error) { + req, out := c.DescribeTimeToLiveRequest(input) + return out, req.Send() +} + +// DescribeTimeToLiveWithContext is the same as DescribeTimeToLive with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeTimeToLive for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DescribeTimeToLiveWithContext(ctx aws.Context, input *DescribeTimeToLiveInput, opts ...request.Option) (*DescribeTimeToLiveOutput, error) { + req, out := c.DescribeTimeToLiveRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDisableKinesisStreamingDestination = "DisableKinesisStreamingDestination" + +// DisableKinesisStreamingDestinationRequest generates a "aws/request.Request" representing the +// client's request for the DisableKinesisStreamingDestination operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DisableKinesisStreamingDestination for more information on using the DisableKinesisStreamingDestination +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DisableKinesisStreamingDestinationRequest method. +// req, resp := client.DisableKinesisStreamingDestinationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DisableKinesisStreamingDestination +func (c *DynamoDB) DisableKinesisStreamingDestinationRequest(input *DisableKinesisStreamingDestinationInput) (req *request.Request, output *DisableKinesisStreamingDestinationOutput) { + op := &request.Operation{ + Name: opDisableKinesisStreamingDestination, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableKinesisStreamingDestinationInput{} + } + + output = &DisableKinesisStreamingDestinationOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// DisableKinesisStreamingDestination API operation for Amazon DynamoDB. +// +// Stops replication from the DynamoDB table to the Kinesis data stream. This +// is done without deleting either of the resources. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DisableKinesisStreamingDestination for usage and error information. +// +// Returned Error Types: +// +// - InternalServerError +// An error occurred on the server side. +// +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. +// +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. +// +// There is a soft account quota of 2,500 tables. +// +// - ResourceInUseException +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DisableKinesisStreamingDestination +func (c *DynamoDB) DisableKinesisStreamingDestination(input *DisableKinesisStreamingDestinationInput) (*DisableKinesisStreamingDestinationOutput, error) { + req, out := c.DisableKinesisStreamingDestinationRequest(input) + return out, req.Send() +} + +// DisableKinesisStreamingDestinationWithContext is the same as DisableKinesisStreamingDestination with the addition of +// the ability to pass a context and additional request options. +// +// See DisableKinesisStreamingDestination for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DisableKinesisStreamingDestinationWithContext(ctx aws.Context, input *DisableKinesisStreamingDestinationInput, opts ...request.Option) (*DisableKinesisStreamingDestinationOutput, error) { + req, out := c.DisableKinesisStreamingDestinationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opEnableKinesisStreamingDestination = "EnableKinesisStreamingDestination" + +// EnableKinesisStreamingDestinationRequest generates a "aws/request.Request" representing the +// client's request for the EnableKinesisStreamingDestination operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See EnableKinesisStreamingDestination for more information on using the EnableKinesisStreamingDestination +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the EnableKinesisStreamingDestinationRequest method. +// req, resp := client.EnableKinesisStreamingDestinationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/EnableKinesisStreamingDestination +func (c *DynamoDB) EnableKinesisStreamingDestinationRequest(input *EnableKinesisStreamingDestinationInput) (req *request.Request, output *EnableKinesisStreamingDestinationOutput) { + op := &request.Operation{ + Name: opEnableKinesisStreamingDestination, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableKinesisStreamingDestinationInput{} + } + + output = &EnableKinesisStreamingDestinationOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// EnableKinesisStreamingDestination API operation for Amazon DynamoDB. +// +// Starts table data replication to the specified Kinesis data stream at a timestamp +// chosen during the enable workflow. If this operation doesn't return results +// immediately, use DescribeKinesisStreamingDestination to check if streaming +// to the Kinesis data stream is ACTIVE. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation EnableKinesisStreamingDestination for usage and error information. +// +// Returned Error Types: +// +// - InternalServerError +// An error occurred on the server side. +// +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. +// +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. +// +// There is a soft account quota of 2,500 tables. +// +// - ResourceInUseException +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/EnableKinesisStreamingDestination +func (c *DynamoDB) EnableKinesisStreamingDestination(input *EnableKinesisStreamingDestinationInput) (*EnableKinesisStreamingDestinationOutput, error) { + req, out := c.EnableKinesisStreamingDestinationRequest(input) + return out, req.Send() +} + +// EnableKinesisStreamingDestinationWithContext is the same as EnableKinesisStreamingDestination with the addition of +// the ability to pass a context and additional request options. +// +// See EnableKinesisStreamingDestination for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) EnableKinesisStreamingDestinationWithContext(ctx aws.Context, input *EnableKinesisStreamingDestinationInput, opts ...request.Option) (*EnableKinesisStreamingDestinationOutput, error) { + req, out := c.EnableKinesisStreamingDestinationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opExecuteStatement = "ExecuteStatement" + +// ExecuteStatementRequest generates a "aws/request.Request" representing the +// client's request for the ExecuteStatement operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ExecuteStatement for more information on using the ExecuteStatement +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ExecuteStatementRequest method. +// req, resp := client.ExecuteStatementRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ExecuteStatement +func (c *DynamoDB) ExecuteStatementRequest(input *ExecuteStatementInput) (req *request.Request, output *ExecuteStatementOutput) { + op := &request.Operation{ + Name: opExecuteStatement, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ExecuteStatementInput{} + } + + output = &ExecuteStatementOutput{} + req = c.newRequest(op, input, output) + return +} + +// ExecuteStatement API operation for Amazon DynamoDB. +// +// This operation allows you to perform reads and singleton writes on data stored +// in DynamoDB, using PartiQL. +// +// For PartiQL reads (SELECT statement), if the total number of processed items +// exceeds the maximum dataset size limit of 1 MB, the read stops and results +// are returned to the user as a LastEvaluatedKey value to continue the read +// in a subsequent operation. If the filter criteria in WHERE clause does not +// match any data, the read will return an empty result set. +// +// A single SELECT statement response can return up to the maximum number of +// items (if using the Limit parameter) or a maximum of 1 MB of data (and then +// apply any filtering to the results using WHERE clause). If LastEvaluatedKey +// is present in the response, you need to paginate the result set. If NextToken +// is present, you need to paginate the result set and include NextToken. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation ExecuteStatement for usage and error information. +// +// Returned Error Types: +// +// - ConditionalCheckFailedException +// A condition specified in the operation could not be evaluated. +// +// - ProvisionedThroughputExceededException +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - ItemCollectionSizeLimitExceededException +// An item collection is too large. This exception is only returned for tables +// that have one or more local secondary indexes. +// +// - TransactionConflictException +// Operation was rejected because there is an ongoing transaction for the item. +// +// - RequestLimitExceeded +// Throughput exceeds the current throughput quota for your account. Please +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request +// a quota increase. +// +// - InternalServerError +// An error occurred on the server side. +// +// - DuplicateItemException +// There was an attempt to insert an item with the same primary key as an item +// that already exists in the DynamoDB table. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ExecuteStatement +func (c *DynamoDB) ExecuteStatement(input *ExecuteStatementInput) (*ExecuteStatementOutput, error) { + req, out := c.ExecuteStatementRequest(input) + return out, req.Send() +} + +// ExecuteStatementWithContext is the same as ExecuteStatement with the addition of +// the ability to pass a context and additional request options. +// +// See ExecuteStatement for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ExecuteStatementWithContext(ctx aws.Context, input *ExecuteStatementInput, opts ...request.Option) (*ExecuteStatementOutput, error) { + req, out := c.ExecuteStatementRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opExecuteTransaction = "ExecuteTransaction" + +// ExecuteTransactionRequest generates a "aws/request.Request" representing the +// client's request for the ExecuteTransaction operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ExecuteTransaction for more information on using the ExecuteTransaction +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ExecuteTransactionRequest method. +// req, resp := client.ExecuteTransactionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ExecuteTransaction +func (c *DynamoDB) ExecuteTransactionRequest(input *ExecuteTransactionInput) (req *request.Request, output *ExecuteTransactionOutput) { + op := &request.Operation{ + Name: opExecuteTransaction, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ExecuteTransactionInput{} + } + + output = &ExecuteTransactionOutput{} + req = c.newRequest(op, input, output) + return +} + +// ExecuteTransaction API operation for Amazon DynamoDB. +// +// This operation allows you to perform transactional reads or writes on data +// stored in DynamoDB, using PartiQL. +// +// The entire transaction must consist of either read statements or write statements, +// you cannot mix both in one transaction. The EXISTS function is an exception +// and can be used to check the condition of specific attributes of the item +// in a similar manner to ConditionCheck in the TransactWriteItems (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/transaction-apis.html#transaction-apis-txwriteitems) +// API. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation ExecuteTransaction for usage and error information. +// +// Returned Error Types: +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - TransactionCanceledException +// The entire transaction request was canceled. +// +// DynamoDB cancels a TransactWriteItems request under the following circumstances: +// +// - A condition in one of the condition expressions is not met. +// +// - A table in the TransactWriteItems request is in a different account +// or region. +// +// - More than one action in the TransactWriteItems operation targets the +// same item. +// +// - There is insufficient provisioned capacity for the transaction to be +// completed. +// +// - An item size becomes too large (larger than 400 KB), or a local secondary +// index (LSI) becomes too large, or a similar validation error occurs because +// of changes made by the transaction. +// +// - There is a user error, such as an invalid data format. +// +// DynamoDB cancels a TransactGetItems request under the following circumstances: +// +// - There is an ongoing TransactGetItems operation that conflicts with a +// concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request. +// In this case the TransactGetItems operation fails with a TransactionCanceledException. +// +// - A table in the TransactGetItems request is in a different account or +// region. +// +// - There is insufficient provisioned capacity for the transaction to be +// completed. +// +// - There is a user error, such as an invalid data format. +// +// If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons +// property. This property is not set for other languages. Transaction cancellation +// reasons are ordered in the order of requested items, if an item has no error +// it will have None code and Null message. +// +// Cancellation reason codes and possible error messages: +// +// - No Errors: Code: None Message: null +// +// - Conditional Check Failed: Code: ConditionalCheckFailed Message: The +// conditional request failed. +// +// - Item Collection Size Limit Exceeded: Code: ItemCollectionSizeLimitExceeded +// Message: Collection size exceeded. +// +// - Transaction Conflict: Code: TransactionConflict Message: Transaction +// is ongoing for the item. +// +// - Provisioned Throughput Exceeded: Code: ProvisionedThroughputExceeded +// Messages: The level of configured provisioned throughput for the table +// was exceeded. Consider increasing your provisioning level with the UpdateTable +// API. This Message is received when provisioned throughput is exceeded +// is on a provisioned DynamoDB table. The level of configured provisioned +// throughput for one or more global secondary indexes of the table was exceeded. +// Consider increasing your provisioning level for the under-provisioned +// global secondary indexes with the UpdateTable API. This message is returned +// when provisioned throughput is exceeded is on a provisioned GSI. +// +// - Throttling Error: Code: ThrottlingError Messages: Throughput exceeds +// the current capacity of your table or index. DynamoDB is automatically +// scaling your table or index so please try again shortly. If exceptions +// persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html. +// This message is returned when writes get throttled on an On-Demand table +// as DynamoDB is automatically scaling the table. Throughput exceeds the +// current capacity for one or more global secondary indexes. DynamoDB is +// automatically scaling your index so please try again shortly. This message +// is returned when writes get throttled on an On-Demand GSI as DynamoDB +// is automatically scaling the GSI. +// +// - Validation Error: Code: ValidationError Messages: One or more parameter +// values were invalid. The update expression attempted to update the secondary +// index key beyond allowed size limits. The update expression attempted +// to update the secondary index key to unsupported type. An operand in the +// update expression has an incorrect data type. Item size to update has +// exceeded the maximum allowed size. Number overflow. Attempting to store +// a number with magnitude larger than supported range. Type mismatch for +// attribute to update. Nesting Levels have exceeded supported limits. The +// document path provided in the update expression is invalid for update. +// The provided expression refers to an attribute that does not exist in +// the item. +// +// - TransactionInProgressException +// The transaction with the given request token is already in progress. +// +// Recommended Settings +// +// This is a general recommendation for handling the TransactionInProgressException. +// These settings help ensure that the client retries will trigger completion +// of the ongoing TransactWriteItems request. +// +// - Set clientExecutionTimeout to a value that allows at least one retry +// to be processed after 5 seconds have elapsed since the first attempt for +// the TransactWriteItems operation. +// +// - Set socketTimeout to a value a little lower than the requestTimeout +// setting. +// +// - requestTimeout should be set based on the time taken for the individual +// retries of a single HTTP request for your use case, but setting it to +// 1 second or higher should work well to reduce chances of retries and TransactionInProgressException +// errors. +// +// - Use exponential backoff when retrying and tune backoff if needed. +// +// Assuming default retry policy (https://github.com/aws/aws-sdk-java/blob/fd409dee8ae23fb8953e0bb4dbde65536a7e0514/aws-java-sdk-core/src/main/java/com/amazonaws/retry/PredefinedRetryPolicies.java#L97), +// example timeout settings based on the guidelines above are as follows: +// +// Example timeline: +// +// - 0-1000 first attempt +// +// - 1000-1500 first sleep/delay (default retry policy uses 500 ms as base +// delay for 4xx errors) +// +// - 1500-2500 second attempt +// +// - 2500-3500 second sleep/delay (500 * 2, exponential backoff) +// +// - 3500-4500 third attempt +// +// - 4500-6500 third sleep/delay (500 * 2^2) +// +// - 6500-7500 fourth attempt (this can trigger inline recovery since 5 seconds +// have elapsed since the first attempt reached TC) +// +// - IdempotentParameterMismatchException +// DynamoDB rejected the request because you retried a request with a different +// payload but with an idempotent token that was already used. +// +// - ProvisionedThroughputExceededException +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +// +// - RequestLimitExceeded +// Throughput exceeds the current throughput quota for your account. Please +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request +// a quota increase. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ExecuteTransaction +func (c *DynamoDB) ExecuteTransaction(input *ExecuteTransactionInput) (*ExecuteTransactionOutput, error) { + req, out := c.ExecuteTransactionRequest(input) + return out, req.Send() +} + +// ExecuteTransactionWithContext is the same as ExecuteTransaction with the addition of +// the ability to pass a context and additional request options. +// +// See ExecuteTransaction for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ExecuteTransactionWithContext(ctx aws.Context, input *ExecuteTransactionInput, opts ...request.Option) (*ExecuteTransactionOutput, error) { + req, out := c.ExecuteTransactionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opExportTableToPointInTime = "ExportTableToPointInTime" + +// ExportTableToPointInTimeRequest generates a "aws/request.Request" representing the +// client's request for the ExportTableToPointInTime operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ExportTableToPointInTime for more information on using the ExportTableToPointInTime +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ExportTableToPointInTimeRequest method. +// req, resp := client.ExportTableToPointInTimeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ExportTableToPointInTime +func (c *DynamoDB) ExportTableToPointInTimeRequest(input *ExportTableToPointInTimeInput) (req *request.Request, output *ExportTableToPointInTimeOutput) { + op := &request.Operation{ + Name: opExportTableToPointInTime, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ExportTableToPointInTimeInput{} + } + + output = &ExportTableToPointInTimeOutput{} + req = c.newRequest(op, input, output) + return +} + +// ExportTableToPointInTime API operation for Amazon DynamoDB. +// +// Exports table data to an S3 bucket. The table must have point in time recovery +// enabled, and you can export data from any time within the point in time recovery +// window. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation ExportTableToPointInTime for usage and error information. +// +// Returned Error Types: +// +// - TableNotFoundException +// A source table with the name TableName does not currently exist within the +// subscriber's account or the subscriber is operating in the wrong Amazon Web +// Services Region. +// +// - PointInTimeRecoveryUnavailableException +// Point in time recovery has not yet been enabled for this source table. +// +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. +// +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. +// +// There is a soft account quota of 2,500 tables. +// +// - InvalidExportTimeException +// The specified ExportTime is outside of the point in time recovery window. +// +// - ExportConflictException +// There was a conflict when writing to the specified S3 bucket. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ExportTableToPointInTime +func (c *DynamoDB) ExportTableToPointInTime(input *ExportTableToPointInTimeInput) (*ExportTableToPointInTimeOutput, error) { + req, out := c.ExportTableToPointInTimeRequest(input) + return out, req.Send() +} + +// ExportTableToPointInTimeWithContext is the same as ExportTableToPointInTime with the addition of +// the ability to pass a context and additional request options. +// +// See ExportTableToPointInTime for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ExportTableToPointInTimeWithContext(ctx aws.Context, input *ExportTableToPointInTimeInput, opts ...request.Option) (*ExportTableToPointInTimeOutput, error) { + req, out := c.ExportTableToPointInTimeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetItem = "GetItem" + +// GetItemRequest generates a "aws/request.Request" representing the +// client's request for the GetItem operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetItem for more information on using the GetItem +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetItemRequest method. +// req, resp := client.GetItemRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/GetItem +func (c *DynamoDB) GetItemRequest(input *GetItemInput) (req *request.Request, output *GetItemOutput) { + op := &request.Operation{ + Name: opGetItem, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetItemInput{} + } + + output = &GetItemOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// GetItem API operation for Amazon DynamoDB. +// +// The GetItem operation returns a set of attributes for the item with the given +// primary key. If there is no matching item, GetItem does not return any data +// and there will be no Item element in the response. +// +// GetItem provides an eventually consistent read by default. If your application +// requires a strongly consistent read, set ConsistentRead to true. Although +// a strongly consistent read might take more time than an eventually consistent +// read, it always returns the last updated value. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation GetItem for usage and error information. +// +// Returned Error Types: +// +// - ProvisionedThroughputExceededException +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - RequestLimitExceeded +// Throughput exceeds the current throughput quota for your account. Please +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request +// a quota increase. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/GetItem +func (c *DynamoDB) GetItem(input *GetItemInput) (*GetItemOutput, error) { + req, out := c.GetItemRequest(input) + return out, req.Send() +} + +// GetItemWithContext is the same as GetItem with the addition of +// the ability to pass a context and additional request options. +// +// See GetItem for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) GetItemWithContext(ctx aws.Context, input *GetItemInput, opts ...request.Option) (*GetItemOutput, error) { + req, out := c.GetItemRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opImportTable = "ImportTable" + +// ImportTableRequest generates a "aws/request.Request" representing the +// client's request for the ImportTable operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ImportTable for more information on using the ImportTable +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ImportTableRequest method. +// req, resp := client.ImportTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ImportTable +func (c *DynamoDB) ImportTableRequest(input *ImportTableInput) (req *request.Request, output *ImportTableOutput) { + op := &request.Operation{ + Name: opImportTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ImportTableInput{} + } + + output = &ImportTableOutput{} + req = c.newRequest(op, input, output) + return +} + +// ImportTable API operation for Amazon DynamoDB. +// +// Imports table data from an S3 bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation ImportTable for usage and error information. +// +// Returned Error Types: +// +// - ResourceInUseException +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. +// +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. +// +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. +// +// There is a soft account quota of 2,500 tables. +// +// - ImportConflictException +// There was a conflict when importing from the specified S3 source. This can +// occur when the current import conflicts with a previous import request that +// had the same client token. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ImportTable +func (c *DynamoDB) ImportTable(input *ImportTableInput) (*ImportTableOutput, error) { + req, out := c.ImportTableRequest(input) + return out, req.Send() +} + +// ImportTableWithContext is the same as ImportTable with the addition of +// the ability to pass a context and additional request options. +// +// See ImportTable for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ImportTableWithContext(ctx aws.Context, input *ImportTableInput, opts ...request.Option) (*ImportTableOutput, error) { + req, out := c.ImportTableRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBackups = "ListBackups" + +// ListBackupsRequest generates a "aws/request.Request" representing the +// client's request for the ListBackups operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListBackups for more information on using the ListBackups +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListBackupsRequest method. +// req, resp := client.ListBackupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListBackups +func (c *DynamoDB) ListBackupsRequest(input *ListBackupsInput) (req *request.Request, output *ListBackupsOutput) { + op := &request.Operation{ + Name: opListBackups, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListBackupsInput{} + } + + output = &ListBackupsOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// ListBackups API operation for Amazon DynamoDB. +// +// List backups associated with an Amazon Web Services account. To list backups +// for a given table, specify TableName. ListBackups returns a paginated list +// of results with at most 1 MB worth of items in a page. You can also specify +// a maximum number of entries to be returned in a page. +// +// In the request, start time is inclusive, but end time is exclusive. Note +// that these boundaries are for the time at which the original backup was requested. +// +// You can call ListBackups a maximum of five times per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation ListBackups for usage and error information. +// +// Returned Error Types: +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListBackups +func (c *DynamoDB) ListBackups(input *ListBackupsInput) (*ListBackupsOutput, error) { + req, out := c.ListBackupsRequest(input) + return out, req.Send() +} + +// ListBackupsWithContext is the same as ListBackups with the addition of +// the ability to pass a context and additional request options. +// +// See ListBackups for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ListBackupsWithContext(ctx aws.Context, input *ListBackupsInput, opts ...request.Option) (*ListBackupsOutput, error) { + req, out := c.ListBackupsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListContributorInsights = "ListContributorInsights" + +// ListContributorInsightsRequest generates a "aws/request.Request" representing the +// client's request for the ListContributorInsights operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListContributorInsights for more information on using the ListContributorInsights +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListContributorInsightsRequest method. +// req, resp := client.ListContributorInsightsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListContributorInsights +func (c *DynamoDB) ListContributorInsightsRequest(input *ListContributorInsightsInput) (req *request.Request, output *ListContributorInsightsOutput) { + op := &request.Operation{ + Name: opListContributorInsights, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListContributorInsightsInput{} + } + + output = &ListContributorInsightsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListContributorInsights API operation for Amazon DynamoDB. +// +// Returns a list of ContributorInsightsSummary for a table and all its global +// secondary indexes. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation ListContributorInsights for usage and error information. +// +// Returned Error Types: +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListContributorInsights +func (c *DynamoDB) ListContributorInsights(input *ListContributorInsightsInput) (*ListContributorInsightsOutput, error) { + req, out := c.ListContributorInsightsRequest(input) + return out, req.Send() +} + +// ListContributorInsightsWithContext is the same as ListContributorInsights with the addition of +// the ability to pass a context and additional request options. +// +// See ListContributorInsights for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ListContributorInsightsWithContext(ctx aws.Context, input *ListContributorInsightsInput, opts ...request.Option) (*ListContributorInsightsOutput, error) { + req, out := c.ListContributorInsightsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListContributorInsightsPages iterates over the pages of a ListContributorInsights operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListContributorInsights method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListContributorInsights operation. +// pageNum := 0 +// err := client.ListContributorInsightsPages(params, +// func(page *dynamodb.ListContributorInsightsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *DynamoDB) ListContributorInsightsPages(input *ListContributorInsightsInput, fn func(*ListContributorInsightsOutput, bool) bool) error { + return c.ListContributorInsightsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListContributorInsightsPagesWithContext same as ListContributorInsightsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ListContributorInsightsPagesWithContext(ctx aws.Context, input *ListContributorInsightsInput, fn func(*ListContributorInsightsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListContributorInsightsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListContributorInsightsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListContributorInsightsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListExports = "ListExports" + +// ListExportsRequest generates a "aws/request.Request" representing the +// client's request for the ListExports operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListExports for more information on using the ListExports +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListExportsRequest method. +// req, resp := client.ListExportsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListExports +func (c *DynamoDB) ListExportsRequest(input *ListExportsInput) (req *request.Request, output *ListExportsOutput) { + op := &request.Operation{ + Name: opListExports, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListExportsInput{} + } + + output = &ListExportsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListExports API operation for Amazon DynamoDB. +// +// Lists completed exports within the past 90 days. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation ListExports for usage and error information. +// +// Returned Error Types: +// +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. +// +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. +// +// There is a soft account quota of 2,500 tables. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListExports +func (c *DynamoDB) ListExports(input *ListExportsInput) (*ListExportsOutput, error) { + req, out := c.ListExportsRequest(input) + return out, req.Send() +} + +// ListExportsWithContext is the same as ListExports with the addition of +// the ability to pass a context and additional request options. +// +// See ListExports for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ListExportsWithContext(ctx aws.Context, input *ListExportsInput, opts ...request.Option) (*ListExportsOutput, error) { + req, out := c.ListExportsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListExportsPages iterates over the pages of a ListExports operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListExports method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListExports operation. +// pageNum := 0 +// err := client.ListExportsPages(params, +// func(page *dynamodb.ListExportsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *DynamoDB) ListExportsPages(input *ListExportsInput, fn func(*ListExportsOutput, bool) bool) error { + return c.ListExportsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListExportsPagesWithContext same as ListExportsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ListExportsPagesWithContext(ctx aws.Context, input *ListExportsInput, fn func(*ListExportsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListExportsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListExportsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListExportsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListGlobalTables = "ListGlobalTables" + +// ListGlobalTablesRequest generates a "aws/request.Request" representing the +// client's request for the ListGlobalTables operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListGlobalTables for more information on using the ListGlobalTables +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListGlobalTablesRequest method. +// req, resp := client.ListGlobalTablesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListGlobalTables +func (c *DynamoDB) ListGlobalTablesRequest(input *ListGlobalTablesInput) (req *request.Request, output *ListGlobalTablesOutput) { + op := &request.Operation{ + Name: opListGlobalTables, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListGlobalTablesInput{} + } + + output = &ListGlobalTablesOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// ListGlobalTables API operation for Amazon DynamoDB. +// +// Lists all global tables that have a replica in the specified Region. +// +// This operation only applies to Version 2017.11.29 (Legacy) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) +// of global tables. We recommend using Version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) +// when creating new global tables, as it provides greater flexibility, higher +// efficiency and consumes less write capacity than 2017.11.29 (Legacy). To +// determine which version you are using, see Determining the version (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html). +// To update existing global tables from version 2017.11.29 (Legacy) to version +// 2019.11.21 (Current), see Updating global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation ListGlobalTables for usage and error information. +// +// Returned Error Types: +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListGlobalTables +func (c *DynamoDB) ListGlobalTables(input *ListGlobalTablesInput) (*ListGlobalTablesOutput, error) { + req, out := c.ListGlobalTablesRequest(input) + return out, req.Send() +} + +// ListGlobalTablesWithContext is the same as ListGlobalTables with the addition of +// the ability to pass a context and additional request options. +// +// See ListGlobalTables for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ListGlobalTablesWithContext(ctx aws.Context, input *ListGlobalTablesInput, opts ...request.Option) (*ListGlobalTablesOutput, error) { + req, out := c.ListGlobalTablesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListImports = "ListImports" + +// ListImportsRequest generates a "aws/request.Request" representing the +// client's request for the ListImports operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListImports for more information on using the ListImports +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListImportsRequest method. +// req, resp := client.ListImportsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListImports +func (c *DynamoDB) ListImportsRequest(input *ListImportsInput) (req *request.Request, output *ListImportsOutput) { + op := &request.Operation{ + Name: opListImports, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "PageSize", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListImportsInput{} + } + + output = &ListImportsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListImports API operation for Amazon DynamoDB. +// +// Lists completed imports within the past 90 days. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation ListImports for usage and error information. +// +// Returned Error Types: +// +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. +// +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. +// +// There is a soft account quota of 2,500 tables. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListImports +func (c *DynamoDB) ListImports(input *ListImportsInput) (*ListImportsOutput, error) { + req, out := c.ListImportsRequest(input) + return out, req.Send() +} + +// ListImportsWithContext is the same as ListImports with the addition of +// the ability to pass a context and additional request options. +// +// See ListImports for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ListImportsWithContext(ctx aws.Context, input *ListImportsInput, opts ...request.Option) (*ListImportsOutput, error) { + req, out := c.ListImportsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListImportsPages iterates over the pages of a ListImports operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListImports method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListImports operation. +// pageNum := 0 +// err := client.ListImportsPages(params, +// func(page *dynamodb.ListImportsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *DynamoDB) ListImportsPages(input *ListImportsInput, fn func(*ListImportsOutput, bool) bool) error { + return c.ListImportsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListImportsPagesWithContext same as ListImportsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ListImportsPagesWithContext(ctx aws.Context, input *ListImportsInput, fn func(*ListImportsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListImportsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListImportsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListImportsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListTables = "ListTables" + +// ListTablesRequest generates a "aws/request.Request" representing the +// client's request for the ListTables operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTables for more information on using the ListTables +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListTablesRequest method. +// req, resp := client.ListTablesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTables +func (c *DynamoDB) ListTablesRequest(input *ListTablesInput) (req *request.Request, output *ListTablesOutput) { + op := &request.Operation{ + Name: opListTables, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"ExclusiveStartTableName"}, + OutputTokens: []string{"LastEvaluatedTableName"}, + LimitToken: "Limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListTablesInput{} + } + + output = &ListTablesOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// ListTables API operation for Amazon DynamoDB. +// +// Returns an array of table names associated with the current account and endpoint. +// The output from ListTables is paginated, with each page returning a maximum +// of 100 table names. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation ListTables for usage and error information. +// +// Returned Error Types: +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTables +func (c *DynamoDB) ListTables(input *ListTablesInput) (*ListTablesOutput, error) { + req, out := c.ListTablesRequest(input) + return out, req.Send() +} + +// ListTablesWithContext is the same as ListTables with the addition of +// the ability to pass a context and additional request options. +// +// See ListTables for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ListTablesWithContext(ctx aws.Context, input *ListTablesInput, opts ...request.Option) (*ListTablesOutput, error) { + req, out := c.ListTablesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListTablesPages iterates over the pages of a ListTables operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListTables method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListTables operation. +// pageNum := 0 +// err := client.ListTablesPages(params, +// func(page *dynamodb.ListTablesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *DynamoDB) ListTablesPages(input *ListTablesInput, fn func(*ListTablesOutput, bool) bool) error { + return c.ListTablesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListTablesPagesWithContext same as ListTablesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ListTablesPagesWithContext(ctx aws.Context, input *ListTablesInput, fn func(*ListTablesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListTablesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListTablesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListTablesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListTagsOfResource = "ListTagsOfResource" + +// ListTagsOfResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsOfResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsOfResource for more information on using the ListTagsOfResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListTagsOfResourceRequest method. +// req, resp := client.ListTagsOfResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTagsOfResource +func (c *DynamoDB) ListTagsOfResourceRequest(input *ListTagsOfResourceInput) (req *request.Request, output *ListTagsOfResourceOutput) { + op := &request.Operation{ + Name: opListTagsOfResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsOfResourceInput{} + } + + output = &ListTagsOfResourceOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// ListTagsOfResource API operation for Amazon DynamoDB. +// +// List all tags on an Amazon DynamoDB resource. You can call ListTagsOfResource +// up to 10 times per second, per account. +// +// For an overview on tagging DynamoDB resources, see Tagging for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html) +// in the Amazon DynamoDB Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation ListTagsOfResource for usage and error information. +// +// Returned Error Types: +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTagsOfResource +func (c *DynamoDB) ListTagsOfResource(input *ListTagsOfResourceInput) (*ListTagsOfResourceOutput, error) { + req, out := c.ListTagsOfResourceRequest(input) + return out, req.Send() +} + +// ListTagsOfResourceWithContext is the same as ListTagsOfResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsOfResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ListTagsOfResourceWithContext(ctx aws.Context, input *ListTagsOfResourceInput, opts ...request.Option) (*ListTagsOfResourceOutput, error) { + req, out := c.ListTagsOfResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutItem = "PutItem" + +// PutItemRequest generates a "aws/request.Request" representing the +// client's request for the PutItem operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutItem for more information on using the PutItem +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutItemRequest method. +// req, resp := client.PutItemRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/PutItem +func (c *DynamoDB) PutItemRequest(input *PutItemInput) (req *request.Request, output *PutItemOutput) { + op := &request.Operation{ + Name: opPutItem, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutItemInput{} + } + + output = &PutItemOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// PutItem API operation for Amazon DynamoDB. +// +// Creates a new item, or replaces an old item with a new item. If an item that +// has the same primary key as the new item already exists in the specified +// table, the new item completely replaces the existing item. You can perform +// a conditional put operation (add a new item if one with the specified primary +// key doesn't exist), or replace an existing item if it has certain attribute +// values. You can return the item's attribute values in the same operation, +// using the ReturnValues parameter. +// +// When you add an item, the primary key attributes are the only required attributes. +// +// Empty String and Binary attribute values are allowed. Attribute values of +// type String and Binary must have a length greater than zero if the attribute +// is used as a key attribute for a table or index. Set type attributes cannot +// be empty. +// +// Invalid Requests with empty values will be rejected with a ValidationException +// exception. +// +// To prevent a new item from replacing an existing item, use a conditional +// expression that contains the attribute_not_exists function with the name +// of the attribute being used as the partition key for the table. Since every +// record must contain that attribute, the attribute_not_exists function will +// only succeed if no matching item exists. +// +// For more information about PutItem, see Working with Items (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithItems.html) +// in the Amazon DynamoDB Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation PutItem for usage and error information. +// +// Returned Error Types: +// +// - ConditionalCheckFailedException +// A condition specified in the operation could not be evaluated. +// +// - ProvisionedThroughputExceededException +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - ItemCollectionSizeLimitExceededException +// An item collection is too large. This exception is only returned for tables +// that have one or more local secondary indexes. +// +// - TransactionConflictException +// Operation was rejected because there is an ongoing transaction for the item. +// +// - RequestLimitExceeded +// Throughput exceeds the current throughput quota for your account. Please +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request +// a quota increase. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/PutItem +func (c *DynamoDB) PutItem(input *PutItemInput) (*PutItemOutput, error) { + req, out := c.PutItemRequest(input) + return out, req.Send() +} + +// PutItemWithContext is the same as PutItem with the addition of +// the ability to pass a context and additional request options. +// +// See PutItem for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) PutItemWithContext(ctx aws.Context, input *PutItemInput, opts ...request.Option) (*PutItemOutput, error) { + req, out := c.PutItemRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opQuery = "Query" + +// QueryRequest generates a "aws/request.Request" representing the +// client's request for the Query operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See Query for more information on using the Query +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the QueryRequest method. +// req, resp := client.QueryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/Query +func (c *DynamoDB) QueryRequest(input *QueryInput) (req *request.Request, output *QueryOutput) { + op := &request.Operation{ + Name: opQuery, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"ExclusiveStartKey"}, + OutputTokens: []string{"LastEvaluatedKey"}, + LimitToken: "Limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &QueryInput{} + } + + output = &QueryOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// Query API operation for Amazon DynamoDB. +// +// You must provide the name of the partition key attribute and a single value +// for that attribute. Query returns all items with that partition key value. +// Optionally, you can provide a sort key attribute and use a comparison operator +// to refine the search results. +// +// Use the KeyConditionExpression parameter to provide a specific value for +// the partition key. The Query operation will return all of the items from +// the table or index with that partition key value. You can optionally narrow +// the scope of the Query operation by specifying a sort key value and a comparison +// operator in KeyConditionExpression. To further refine the Query results, +// you can optionally provide a FilterExpression. A FilterExpression determines +// which items within the results should be returned to you. All of the other +// results are discarded. +// +// A Query operation always returns a result set. If no matching items are found, +// the result set will be empty. Queries that do not return results consume +// the minimum number of read capacity units for that type of read operation. +// +// DynamoDB calculates the number of read capacity units consumed based on item +// size, not on the amount of data that is returned to an application. The number +// of capacity units consumed will be the same whether you request all of the +// attributes (the default behavior) or just some of them (using a projection +// expression). The number will also be the same whether or not you use a FilterExpression. +// +// Query results are always sorted by the sort key value. If the data type of +// the sort key is Number, the results are returned in numeric order; otherwise, +// the results are returned in order of UTF-8 bytes. By default, the sort order +// is ascending. To reverse the order, set the ScanIndexForward parameter to +// false. +// +// A single Query operation will read up to the maximum number of items set +// (if using the Limit parameter) or a maximum of 1 MB of data and then apply +// any filtering to the results using FilterExpression. If LastEvaluatedKey +// is present in the response, you will need to paginate the result set. For +// more information, see Paginating the Results (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Query.html#Query.Pagination) +// in the Amazon DynamoDB Developer Guide. +// +// FilterExpression is applied after a Query finishes, but before the results +// are returned. A FilterExpression cannot contain partition key or sort key +// attributes. You need to specify those attributes in the KeyConditionExpression. +// +// A Query operation can return an empty result set and a LastEvaluatedKey if +// all the items read for the page of results are filtered out. +// +// You can query a table, a local secondary index, or a global secondary index. +// For a query on a table or on a local secondary index, you can set the ConsistentRead +// parameter to true and obtain a strongly consistent result. Global secondary +// indexes support eventually consistent reads only, so do not specify ConsistentRead +// when querying a global secondary index. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation Query for usage and error information. +// +// Returned Error Types: +// +// - ProvisionedThroughputExceededException +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - RequestLimitExceeded +// Throughput exceeds the current throughput quota for your account. Please +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request +// a quota increase. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/Query +func (c *DynamoDB) Query(input *QueryInput) (*QueryOutput, error) { + req, out := c.QueryRequest(input) + return out, req.Send() +} + +// QueryWithContext is the same as Query with the addition of +// the ability to pass a context and additional request options. +// +// See Query for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) QueryWithContext(ctx aws.Context, input *QueryInput, opts ...request.Option) (*QueryOutput, error) { + req, out := c.QueryRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// QueryPages iterates over the pages of a Query operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See Query method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a Query operation. +// pageNum := 0 +// err := client.QueryPages(params, +// func(page *dynamodb.QueryOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *DynamoDB) QueryPages(input *QueryInput, fn func(*QueryOutput, bool) bool) error { + return c.QueryPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// QueryPagesWithContext same as QueryPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) QueryPagesWithContext(ctx aws.Context, input *QueryInput, fn func(*QueryOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *QueryInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.QueryRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*QueryOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opRestoreTableFromBackup = "RestoreTableFromBackup" + +// RestoreTableFromBackupRequest generates a "aws/request.Request" representing the +// client's request for the RestoreTableFromBackup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RestoreTableFromBackup for more information on using the RestoreTableFromBackup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the RestoreTableFromBackupRequest method. +// req, resp := client.RestoreTableFromBackupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/RestoreTableFromBackup +func (c *DynamoDB) RestoreTableFromBackupRequest(input *RestoreTableFromBackupInput) (req *request.Request, output *RestoreTableFromBackupOutput) { + op := &request.Operation{ + Name: opRestoreTableFromBackup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RestoreTableFromBackupInput{} + } + + output = &RestoreTableFromBackupOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// RestoreTableFromBackup API operation for Amazon DynamoDB. +// +// Creates a new table from an existing backup. Any number of users can execute +// up to 4 concurrent restores (any type of restore) in a given account. +// +// You can call RestoreTableFromBackup at a maximum rate of 10 times per second. +// +// You must manually set up the following on the restored table: +// +// - Auto scaling policies +// +// - IAM policies +// +// - Amazon CloudWatch metrics and alarms +// +// - Tags +// +// - Stream settings +// +// - Time to Live (TTL) settings +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation RestoreTableFromBackup for usage and error information. +// +// Returned Error Types: +// +// - TableAlreadyExistsException +// A target table with the specified name already exists. +// +// - TableInUseException +// A target table with the specified name is either being created or deleted. +// +// - BackupNotFoundException +// Backup not found for the given BackupARN. +// +// - BackupInUseException +// There is another ongoing conflicting backup control plane operation on the +// table. The backup is either being created, deleted or restored to a table. +// +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. +// +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. +// +// There is a soft account quota of 2,500 tables. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/RestoreTableFromBackup +func (c *DynamoDB) RestoreTableFromBackup(input *RestoreTableFromBackupInput) (*RestoreTableFromBackupOutput, error) { + req, out := c.RestoreTableFromBackupRequest(input) + return out, req.Send() +} + +// RestoreTableFromBackupWithContext is the same as RestoreTableFromBackup with the addition of +// the ability to pass a context and additional request options. +// +// See RestoreTableFromBackup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) RestoreTableFromBackupWithContext(ctx aws.Context, input *RestoreTableFromBackupInput, opts ...request.Option) (*RestoreTableFromBackupOutput, error) { + req, out := c.RestoreTableFromBackupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRestoreTableToPointInTime = "RestoreTableToPointInTime" + +// RestoreTableToPointInTimeRequest generates a "aws/request.Request" representing the +// client's request for the RestoreTableToPointInTime operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RestoreTableToPointInTime for more information on using the RestoreTableToPointInTime +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the RestoreTableToPointInTimeRequest method. +// req, resp := client.RestoreTableToPointInTimeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/RestoreTableToPointInTime +func (c *DynamoDB) RestoreTableToPointInTimeRequest(input *RestoreTableToPointInTimeInput) (req *request.Request, output *RestoreTableToPointInTimeOutput) { + op := &request.Operation{ + Name: opRestoreTableToPointInTime, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RestoreTableToPointInTimeInput{} + } + + output = &RestoreTableToPointInTimeOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// RestoreTableToPointInTime API operation for Amazon DynamoDB. +// +// Restores the specified table to the specified point in time within EarliestRestorableDateTime +// and LatestRestorableDateTime. You can restore your table to any point in +// time during the last 35 days. Any number of users can execute up to 4 concurrent +// restores (any type of restore) in a given account. +// +// When you restore using point in time recovery, DynamoDB restores your table +// data to the state based on the selected date and time (day:hour:minute:second) +// to a new table. +// +// Along with data, the following are also included on the new restored table +// using point in time recovery: +// +// - Global secondary indexes (GSIs) +// +// - Local secondary indexes (LSIs) +// +// - Provisioned read and write capacity +// +// - Encryption settings All these settings come from the current settings +// of the source table at the time of restore. +// +// You must manually set up the following on the restored table: +// +// - Auto scaling policies +// +// - IAM policies +// +// - Amazon CloudWatch metrics and alarms +// +// - Tags +// +// - Stream settings +// +// - Time to Live (TTL) settings +// +// - Point in time recovery settings +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation RestoreTableToPointInTime for usage and error information. +// +// Returned Error Types: +// +// - TableAlreadyExistsException +// A target table with the specified name already exists. +// +// - TableNotFoundException +// A source table with the name TableName does not currently exist within the +// subscriber's account or the subscriber is operating in the wrong Amazon Web +// Services Region. +// +// - TableInUseException +// A target table with the specified name is either being created or deleted. +// +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. +// +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. +// +// There is a soft account quota of 2,500 tables. +// +// - InvalidRestoreTimeException +// An invalid restore time was specified. RestoreDateTime must be between EarliestRestorableDateTime +// and LatestRestorableDateTime. +// +// - PointInTimeRecoveryUnavailableException +// Point in time recovery has not yet been enabled for this source table. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/RestoreTableToPointInTime +func (c *DynamoDB) RestoreTableToPointInTime(input *RestoreTableToPointInTimeInput) (*RestoreTableToPointInTimeOutput, error) { + req, out := c.RestoreTableToPointInTimeRequest(input) + return out, req.Send() +} + +// RestoreTableToPointInTimeWithContext is the same as RestoreTableToPointInTime with the addition of +// the ability to pass a context and additional request options. +// +// See RestoreTableToPointInTime for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) RestoreTableToPointInTimeWithContext(ctx aws.Context, input *RestoreTableToPointInTimeInput, opts ...request.Option) (*RestoreTableToPointInTimeOutput, error) { + req, out := c.RestoreTableToPointInTimeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opScan = "Scan" + +// ScanRequest generates a "aws/request.Request" representing the +// client's request for the Scan operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See Scan for more information on using the Scan +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ScanRequest method. +// req, resp := client.ScanRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/Scan +func (c *DynamoDB) ScanRequest(input *ScanInput) (req *request.Request, output *ScanOutput) { + op := &request.Operation{ + Name: opScan, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"ExclusiveStartKey"}, + OutputTokens: []string{"LastEvaluatedKey"}, + LimitToken: "Limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &ScanInput{} + } + + output = &ScanOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// Scan API operation for Amazon DynamoDB. +// +// The Scan operation returns one or more items and item attributes by accessing +// every item in a table or a secondary index. To have DynamoDB return fewer +// items, you can provide a FilterExpression operation. +// +// If the total number of scanned items exceeds the maximum dataset size limit +// of 1 MB, the scan stops and results are returned to the user as a LastEvaluatedKey +// value to continue the scan in a subsequent operation. The results also include +// the number of items exceeding the limit. A scan can result in no table data +// meeting the filter criteria. +// +// A single Scan operation reads up to the maximum number of items set (if using +// the Limit parameter) or a maximum of 1 MB of data and then apply any filtering +// to the results using FilterExpression. If LastEvaluatedKey is present in +// the response, you need to paginate the result set. For more information, +// see Paginating the Results (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.Pagination) +// in the Amazon DynamoDB Developer Guide. +// +// Scan operations proceed sequentially; however, for faster performance on +// a large table or secondary index, applications can request a parallel Scan +// operation by providing the Segment and TotalSegments parameters. For more +// information, see Parallel Scan (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.ParallelScan) +// in the Amazon DynamoDB Developer Guide. +// +// Scan uses eventually consistent reads when accessing the data in a table; +// therefore, the result set might not include the changes to data in the table +// immediately before the operation began. If you need a consistent copy of +// the data, as of the time that the Scan begins, you can set the ConsistentRead +// parameter to true. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation Scan for usage and error information. +// +// Returned Error Types: +// +// - ProvisionedThroughputExceededException +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - RequestLimitExceeded +// Throughput exceeds the current throughput quota for your account. Please +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request +// a quota increase. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/Scan +func (c *DynamoDB) Scan(input *ScanInput) (*ScanOutput, error) { + req, out := c.ScanRequest(input) + return out, req.Send() +} + +// ScanWithContext is the same as Scan with the addition of +// the ability to pass a context and additional request options. +// +// See Scan for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ScanWithContext(ctx aws.Context, input *ScanInput, opts ...request.Option) (*ScanOutput, error) { + req, out := c.ScanRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ScanPages iterates over the pages of a Scan operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See Scan method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a Scan operation. +// pageNum := 0 +// err := client.ScanPages(params, +// func(page *dynamodb.ScanOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *DynamoDB) ScanPages(input *ScanInput, fn func(*ScanOutput, bool) bool) error { + return c.ScanPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ScanPagesWithContext same as ScanPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ScanPagesWithContext(ctx aws.Context, input *ScanInput, fn func(*ScanOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ScanInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ScanRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ScanOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TagResource +func (c *DynamoDB) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// TagResource API operation for Amazon DynamoDB. +// +// Associate a set of tags with an Amazon DynamoDB resource. You can then activate +// these user-defined tags so that they appear on the Billing and Cost Management +// console for cost allocation tracking. You can call TagResource up to five +// times per second, per account. +// +// For an overview on tagging DynamoDB resources, see Tagging for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html) +// in the Amazon DynamoDB Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation TagResource for usage and error information. +// +// Returned Error Types: +// +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. +// +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. +// +// There is a soft account quota of 2,500 tables. +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - InternalServerError +// An error occurred on the server side. +// +// - ResourceInUseException +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TagResource +func (c *DynamoDB) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTransactGetItems = "TransactGetItems" + +// TransactGetItemsRequest generates a "aws/request.Request" representing the +// client's request for the TransactGetItems operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TransactGetItems for more information on using the TransactGetItems +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the TransactGetItemsRequest method. +// req, resp := client.TransactGetItemsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TransactGetItems +func (c *DynamoDB) TransactGetItemsRequest(input *TransactGetItemsInput) (req *request.Request, output *TransactGetItemsOutput) { + op := &request.Operation{ + Name: opTransactGetItems, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TransactGetItemsInput{} + } + + output = &TransactGetItemsOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// TransactGetItems API operation for Amazon DynamoDB. +// +// TransactGetItems is a synchronous operation that atomically retrieves multiple +// items from one or more tables (but not from indexes) in a single account +// and Region. A TransactGetItems call can contain up to 100 TransactGetItem +// objects, each of which contains a Get structure that specifies an item to +// retrieve from a table in the account and Region. A call to TransactGetItems +// cannot retrieve items from tables in more than one Amazon Web Services account +// or Region. The aggregate size of the items in the transaction cannot exceed +// 4 MB. +// +// DynamoDB rejects the entire TransactGetItems request if any of the following +// is true: +// +// - A conflicting operation is in the process of updating an item to be +// read. +// +// - There is insufficient provisioned capacity for the transaction to be +// completed. +// +// - There is a user error, such as an invalid data format. +// +// - The aggregate size of the items in the transaction exceeded 4 MB. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation TransactGetItems for usage and error information. +// +// Returned Error Types: +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - TransactionCanceledException +// The entire transaction request was canceled. +// +// DynamoDB cancels a TransactWriteItems request under the following circumstances: +// +// - A condition in one of the condition expressions is not met. +// +// - A table in the TransactWriteItems request is in a different account +// or region. +// +// - More than one action in the TransactWriteItems operation targets the +// same item. +// +// - There is insufficient provisioned capacity for the transaction to be +// completed. +// +// - An item size becomes too large (larger than 400 KB), or a local secondary +// index (LSI) becomes too large, or a similar validation error occurs because +// of changes made by the transaction. +// +// - There is a user error, such as an invalid data format. +// +// DynamoDB cancels a TransactGetItems request under the following circumstances: +// +// - There is an ongoing TransactGetItems operation that conflicts with a +// concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request. +// In this case the TransactGetItems operation fails with a TransactionCanceledException. +// +// - A table in the TransactGetItems request is in a different account or +// region. +// +// - There is insufficient provisioned capacity for the transaction to be +// completed. +// +// - There is a user error, such as an invalid data format. +// +// If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons +// property. This property is not set for other languages. Transaction cancellation +// reasons are ordered in the order of requested items, if an item has no error +// it will have None code and Null message. +// +// Cancellation reason codes and possible error messages: +// +// - No Errors: Code: None Message: null +// +// - Conditional Check Failed: Code: ConditionalCheckFailed Message: The +// conditional request failed. +// +// - Item Collection Size Limit Exceeded: Code: ItemCollectionSizeLimitExceeded +// Message: Collection size exceeded. +// +// - Transaction Conflict: Code: TransactionConflict Message: Transaction +// is ongoing for the item. +// +// - Provisioned Throughput Exceeded: Code: ProvisionedThroughputExceeded +// Messages: The level of configured provisioned throughput for the table +// was exceeded. Consider increasing your provisioning level with the UpdateTable +// API. This Message is received when provisioned throughput is exceeded +// is on a provisioned DynamoDB table. The level of configured provisioned +// throughput for one or more global secondary indexes of the table was exceeded. +// Consider increasing your provisioning level for the under-provisioned +// global secondary indexes with the UpdateTable API. This message is returned +// when provisioned throughput is exceeded is on a provisioned GSI. +// +// - Throttling Error: Code: ThrottlingError Messages: Throughput exceeds +// the current capacity of your table or index. DynamoDB is automatically +// scaling your table or index so please try again shortly. If exceptions +// persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html. +// This message is returned when writes get throttled on an On-Demand table +// as DynamoDB is automatically scaling the table. Throughput exceeds the +// current capacity for one or more global secondary indexes. DynamoDB is +// automatically scaling your index so please try again shortly. This message +// is returned when writes get throttled on an On-Demand GSI as DynamoDB +// is automatically scaling the GSI. +// +// - Validation Error: Code: ValidationError Messages: One or more parameter +// values were invalid. The update expression attempted to update the secondary +// index key beyond allowed size limits. The update expression attempted +// to update the secondary index key to unsupported type. An operand in the +// update expression has an incorrect data type. Item size to update has +// exceeded the maximum allowed size. Number overflow. Attempting to store +// a number with magnitude larger than supported range. Type mismatch for +// attribute to update. Nesting Levels have exceeded supported limits. The +// document path provided in the update expression is invalid for update. +// The provided expression refers to an attribute that does not exist in +// the item. +// +// - ProvisionedThroughputExceededException +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +// +// - RequestLimitExceeded +// Throughput exceeds the current throughput quota for your account. Please +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request +// a quota increase. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TransactGetItems +func (c *DynamoDB) TransactGetItems(input *TransactGetItemsInput) (*TransactGetItemsOutput, error) { + req, out := c.TransactGetItemsRequest(input) + return out, req.Send() +} + +// TransactGetItemsWithContext is the same as TransactGetItems with the addition of +// the ability to pass a context and additional request options. +// +// See TransactGetItems for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) TransactGetItemsWithContext(ctx aws.Context, input *TransactGetItemsInput, opts ...request.Option) (*TransactGetItemsOutput, error) { + req, out := c.TransactGetItemsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTransactWriteItems = "TransactWriteItems" + +// TransactWriteItemsRequest generates a "aws/request.Request" representing the +// client's request for the TransactWriteItems operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TransactWriteItems for more information on using the TransactWriteItems +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the TransactWriteItemsRequest method. +// req, resp := client.TransactWriteItemsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TransactWriteItems +func (c *DynamoDB) TransactWriteItemsRequest(input *TransactWriteItemsInput) (req *request.Request, output *TransactWriteItemsOutput) { + op := &request.Operation{ + Name: opTransactWriteItems, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TransactWriteItemsInput{} + } + + output = &TransactWriteItemsOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// TransactWriteItems API operation for Amazon DynamoDB. +// +// TransactWriteItems is a synchronous write operation that groups up to 100 +// action requests. These actions can target items in different tables, but +// not in different Amazon Web Services accounts or Regions, and no two actions +// can target the same item. For example, you cannot both ConditionCheck and +// Update the same item. The aggregate size of the items in the transaction +// cannot exceed 4 MB. +// +// The actions are completed atomically so that either all of them succeed, +// or all of them fail. They are defined by the following objects: +// +// - Put — Initiates a PutItem operation to write a new item. This structure +// specifies the primary key of the item to be written, the name of the table +// to write it in, an optional condition expression that must be satisfied +// for the write to succeed, a list of the item's attributes, and a field +// indicating whether to retrieve the item's attributes if the condition +// is not met. +// +// - Update — Initiates an UpdateItem operation to update an existing item. +// This structure specifies the primary key of the item to be updated, the +// name of the table where it resides, an optional condition expression that +// must be satisfied for the update to succeed, an expression that defines +// one or more attributes to be updated, and a field indicating whether to +// retrieve the item's attributes if the condition is not met. +// +// - Delete — Initiates a DeleteItem operation to delete an existing item. +// This structure specifies the primary key of the item to be deleted, the +// name of the table where it resides, an optional condition expression that +// must be satisfied for the deletion to succeed, and a field indicating +// whether to retrieve the item's attributes if the condition is not met. +// +// - ConditionCheck — Applies a condition to an item that is not being +// modified by the transaction. This structure specifies the primary key +// of the item to be checked, the name of the table where it resides, a condition +// expression that must be satisfied for the transaction to succeed, and +// a field indicating whether to retrieve the item's attributes if the condition +// is not met. +// +// DynamoDB rejects the entire TransactWriteItems request if any of the following +// is true: +// +// - A condition in one of the condition expressions is not met. +// +// - An ongoing operation is in the process of updating the same item. +// +// - There is insufficient provisioned capacity for the transaction to be +// completed. +// +// - An item size becomes too large (bigger than 400 KB), a local secondary +// index (LSI) becomes too large, or a similar validation error occurs because +// of changes made by the transaction. +// +// - The aggregate size of the items in the transaction exceeds 4 MB. +// +// - There is a user error, such as an invalid data format. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation TransactWriteItems for usage and error information. +// +// Returned Error Types: +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - TransactionCanceledException +// The entire transaction request was canceled. +// +// DynamoDB cancels a TransactWriteItems request under the following circumstances: +// +// - A condition in one of the condition expressions is not met. +// +// - A table in the TransactWriteItems request is in a different account +// or region. +// +// - More than one action in the TransactWriteItems operation targets the +// same item. +// +// - There is insufficient provisioned capacity for the transaction to be +// completed. +// +// - An item size becomes too large (larger than 400 KB), or a local secondary +// index (LSI) becomes too large, or a similar validation error occurs because +// of changes made by the transaction. +// +// - There is a user error, such as an invalid data format. +// +// DynamoDB cancels a TransactGetItems request under the following circumstances: +// +// - There is an ongoing TransactGetItems operation that conflicts with a +// concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request. +// In this case the TransactGetItems operation fails with a TransactionCanceledException. +// +// - A table in the TransactGetItems request is in a different account or +// region. +// +// - There is insufficient provisioned capacity for the transaction to be +// completed. +// +// - There is a user error, such as an invalid data format. +// +// If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons +// property. This property is not set for other languages. Transaction cancellation +// reasons are ordered in the order of requested items, if an item has no error +// it will have None code and Null message. +// +// Cancellation reason codes and possible error messages: +// +// - No Errors: Code: None Message: null +// +// - Conditional Check Failed: Code: ConditionalCheckFailed Message: The +// conditional request failed. +// +// - Item Collection Size Limit Exceeded: Code: ItemCollectionSizeLimitExceeded +// Message: Collection size exceeded. +// +// - Transaction Conflict: Code: TransactionConflict Message: Transaction +// is ongoing for the item. +// +// - Provisioned Throughput Exceeded: Code: ProvisionedThroughputExceeded +// Messages: The level of configured provisioned throughput for the table +// was exceeded. Consider increasing your provisioning level with the UpdateTable +// API. This Message is received when provisioned throughput is exceeded +// is on a provisioned DynamoDB table. The level of configured provisioned +// throughput for one or more global secondary indexes of the table was exceeded. +// Consider increasing your provisioning level for the under-provisioned +// global secondary indexes with the UpdateTable API. This message is returned +// when provisioned throughput is exceeded is on a provisioned GSI. +// +// - Throttling Error: Code: ThrottlingError Messages: Throughput exceeds +// the current capacity of your table or index. DynamoDB is automatically +// scaling your table or index so please try again shortly. If exceptions +// persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html. +// This message is returned when writes get throttled on an On-Demand table +// as DynamoDB is automatically scaling the table. Throughput exceeds the +// current capacity for one or more global secondary indexes. DynamoDB is +// automatically scaling your index so please try again shortly. This message +// is returned when writes get throttled on an On-Demand GSI as DynamoDB +// is automatically scaling the GSI. +// +// - Validation Error: Code: ValidationError Messages: One or more parameter +// values were invalid. The update expression attempted to update the secondary +// index key beyond allowed size limits. The update expression attempted +// to update the secondary index key to unsupported type. An operand in the +// update expression has an incorrect data type. Item size to update has +// exceeded the maximum allowed size. Number overflow. Attempting to store +// a number with magnitude larger than supported range. Type mismatch for +// attribute to update. Nesting Levels have exceeded supported limits. The +// document path provided in the update expression is invalid for update. +// The provided expression refers to an attribute that does not exist in +// the item. +// +// - TransactionInProgressException +// The transaction with the given request token is already in progress. +// +// Recommended Settings +// +// This is a general recommendation for handling the TransactionInProgressException. +// These settings help ensure that the client retries will trigger completion +// of the ongoing TransactWriteItems request. +// +// - Set clientExecutionTimeout to a value that allows at least one retry +// to be processed after 5 seconds have elapsed since the first attempt for +// the TransactWriteItems operation. +// +// - Set socketTimeout to a value a little lower than the requestTimeout +// setting. +// +// - requestTimeout should be set based on the time taken for the individual +// retries of a single HTTP request for your use case, but setting it to +// 1 second or higher should work well to reduce chances of retries and TransactionInProgressException +// errors. +// +// - Use exponential backoff when retrying and tune backoff if needed. +// +// Assuming default retry policy (https://github.com/aws/aws-sdk-java/blob/fd409dee8ae23fb8953e0bb4dbde65536a7e0514/aws-java-sdk-core/src/main/java/com/amazonaws/retry/PredefinedRetryPolicies.java#L97), +// example timeout settings based on the guidelines above are as follows: +// +// Example timeline: +// +// - 0-1000 first attempt +// +// - 1000-1500 first sleep/delay (default retry policy uses 500 ms as base +// delay for 4xx errors) +// +// - 1500-2500 second attempt +// +// - 2500-3500 second sleep/delay (500 * 2, exponential backoff) +// +// - 3500-4500 third attempt +// +// - 4500-6500 third sleep/delay (500 * 2^2) +// +// - 6500-7500 fourth attempt (this can trigger inline recovery since 5 seconds +// have elapsed since the first attempt reached TC) +// +// - IdempotentParameterMismatchException +// DynamoDB rejected the request because you retried a request with a different +// payload but with an idempotent token that was already used. +// +// - ProvisionedThroughputExceededException +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +// +// - RequestLimitExceeded +// Throughput exceeds the current throughput quota for your account. Please +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request +// a quota increase. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TransactWriteItems +func (c *DynamoDB) TransactWriteItems(input *TransactWriteItemsInput) (*TransactWriteItemsOutput, error) { + req, out := c.TransactWriteItemsRequest(input) + return out, req.Send() +} + +// TransactWriteItemsWithContext is the same as TransactWriteItems with the addition of +// the ability to pass a context and additional request options. +// +// See TransactWriteItems for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) TransactWriteItemsWithContext(ctx aws.Context, input *TransactWriteItemsInput, opts ...request.Option) (*TransactWriteItemsOutput, error) { + req, out := c.TransactWriteItemsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UntagResource +func (c *DynamoDB) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// UntagResource API operation for Amazon DynamoDB. +// +// Removes the association of tags from an Amazon DynamoDB resource. You can +// call UntagResource up to five times per second, per account. +// +// For an overview on tagging DynamoDB resources, see Tagging for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html) +// in the Amazon DynamoDB Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation UntagResource for usage and error information. +// +// Returned Error Types: +// +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. +// +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. +// +// There is a soft account quota of 2,500 tables. +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - InternalServerError +// An error occurred on the server side. +// +// - ResourceInUseException +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UntagResource +func (c *DynamoDB) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateContinuousBackups = "UpdateContinuousBackups" + +// UpdateContinuousBackupsRequest generates a "aws/request.Request" representing the +// client's request for the UpdateContinuousBackups operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateContinuousBackups for more information on using the UpdateContinuousBackups +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the UpdateContinuousBackupsRequest method. +// req, resp := client.UpdateContinuousBackupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateContinuousBackups +func (c *DynamoDB) UpdateContinuousBackupsRequest(input *UpdateContinuousBackupsInput) (req *request.Request, output *UpdateContinuousBackupsOutput) { + op := &request.Operation{ + Name: opUpdateContinuousBackups, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateContinuousBackupsInput{} + } + + output = &UpdateContinuousBackupsOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// UpdateContinuousBackups API operation for Amazon DynamoDB. +// +// UpdateContinuousBackups enables or disables point in time recovery for the +// specified table. A successful UpdateContinuousBackups call returns the current +// ContinuousBackupsDescription. Continuous backups are ENABLED on all tables +// at table creation. If point in time recovery is enabled, PointInTimeRecoveryStatus +// will be set to ENABLED. +// +// Once continuous backups and point in time recovery are enabled, you can restore +// to any point in time within EarliestRestorableDateTime and LatestRestorableDateTime. +// +// LatestRestorableDateTime is typically 5 minutes before the current time. +// You can restore your table to any point in time during the last 35 days. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation UpdateContinuousBackups for usage and error information. +// +// Returned Error Types: +// +// - TableNotFoundException +// A source table with the name TableName does not currently exist within the +// subscriber's account or the subscriber is operating in the wrong Amazon Web +// Services Region. +// +// - ContinuousBackupsUnavailableException +// Backups have not yet been enabled for this table. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateContinuousBackups +func (c *DynamoDB) UpdateContinuousBackups(input *UpdateContinuousBackupsInput) (*UpdateContinuousBackupsOutput, error) { + req, out := c.UpdateContinuousBackupsRequest(input) + return out, req.Send() +} + +// UpdateContinuousBackupsWithContext is the same as UpdateContinuousBackups with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateContinuousBackups for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) UpdateContinuousBackupsWithContext(ctx aws.Context, input *UpdateContinuousBackupsInput, opts ...request.Option) (*UpdateContinuousBackupsOutput, error) { + req, out := c.UpdateContinuousBackupsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateContributorInsights = "UpdateContributorInsights" + +// UpdateContributorInsightsRequest generates a "aws/request.Request" representing the +// client's request for the UpdateContributorInsights operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateContributorInsights for more information on using the UpdateContributorInsights +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the UpdateContributorInsightsRequest method. +// req, resp := client.UpdateContributorInsightsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateContributorInsights +func (c *DynamoDB) UpdateContributorInsightsRequest(input *UpdateContributorInsightsInput) (req *request.Request, output *UpdateContributorInsightsOutput) { + op := &request.Operation{ + Name: opUpdateContributorInsights, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateContributorInsightsInput{} + } + + output = &UpdateContributorInsightsOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateContributorInsights API operation for Amazon DynamoDB. +// +// Updates the status for contributor insights for a specific table or index. +// CloudWatch Contributor Insights for DynamoDB graphs display the partition +// key and (if applicable) sort key of frequently accessed items and frequently +// throttled items in plaintext. If you require the use of Amazon Web Services +// Key Management Service (KMS) to encrypt this table’s partition key and +// sort key data with an Amazon Web Services managed key or customer managed +// key, you should not enable CloudWatch Contributor Insights for DynamoDB for +// this table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation UpdateContributorInsights for usage and error information. +// +// Returned Error Types: +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateContributorInsights +func (c *DynamoDB) UpdateContributorInsights(input *UpdateContributorInsightsInput) (*UpdateContributorInsightsOutput, error) { + req, out := c.UpdateContributorInsightsRequest(input) + return out, req.Send() +} + +// UpdateContributorInsightsWithContext is the same as UpdateContributorInsights with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateContributorInsights for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) UpdateContributorInsightsWithContext(ctx aws.Context, input *UpdateContributorInsightsInput, opts ...request.Option) (*UpdateContributorInsightsOutput, error) { + req, out := c.UpdateContributorInsightsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateGlobalTable = "UpdateGlobalTable" + +// UpdateGlobalTableRequest generates a "aws/request.Request" representing the +// client's request for the UpdateGlobalTable operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateGlobalTable for more information on using the UpdateGlobalTable +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the UpdateGlobalTableRequest method. +// req, resp := client.UpdateGlobalTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateGlobalTable +func (c *DynamoDB) UpdateGlobalTableRequest(input *UpdateGlobalTableInput) (req *request.Request, output *UpdateGlobalTableOutput) { + op := &request.Operation{ + Name: opUpdateGlobalTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateGlobalTableInput{} + } + + output = &UpdateGlobalTableOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// UpdateGlobalTable API operation for Amazon DynamoDB. +// +// Adds or removes replicas in the specified global table. The global table +// must already exist to be able to use this operation. Any replica to be added +// must be empty, have the same name as the global table, have the same key +// schema, have DynamoDB Streams enabled, and have the same provisioned and +// maximum write capacity units. +// +// This operation only applies to Version 2017.11.29 (Legacy) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) +// of global tables. We recommend using Version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) +// when creating new global tables, as it provides greater flexibility, higher +// efficiency and consumes less write capacity than 2017.11.29 (Legacy). To +// determine which version you are using, see Determining the version (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html). +// To update existing global tables from version 2017.11.29 (Legacy) to version +// 2019.11.21 (Current), see Updating global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html). +// +// This operation only applies to Version 2017.11.29 (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) +// of global tables. If you are using global tables Version 2019.11.21 (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) +// you can use DescribeTable (https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_DescribeTable.html) +// instead. +// +// Although you can use UpdateGlobalTable to add replicas and remove replicas +// in a single request, for simplicity we recommend that you issue separate +// requests for adding or removing replicas. +// +// If global secondary indexes are specified, then the following conditions +// must also be met: +// +// - The global secondary indexes must have the same name. +// +// - The global secondary indexes must have the same hash key and sort key +// (if present). +// +// - The global secondary indexes must have the same provisioned and maximum +// write capacity units. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation UpdateGlobalTable for usage and error information. +// +// Returned Error Types: +// +// - InternalServerError +// An error occurred on the server side. +// +// - GlobalTableNotFoundException +// The specified global table does not exist. +// +// - ReplicaAlreadyExistsException +// The specified replica is already part of the global table. +// +// - ReplicaNotFoundException +// The specified replica is no longer part of the global table. +// +// - TableNotFoundException +// A source table with the name TableName does not currently exist within the +// subscriber's account or the subscriber is operating in the wrong Amazon Web +// Services Region. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateGlobalTable +func (c *DynamoDB) UpdateGlobalTable(input *UpdateGlobalTableInput) (*UpdateGlobalTableOutput, error) { + req, out := c.UpdateGlobalTableRequest(input) + return out, req.Send() +} + +// UpdateGlobalTableWithContext is the same as UpdateGlobalTable with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateGlobalTable for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) UpdateGlobalTableWithContext(ctx aws.Context, input *UpdateGlobalTableInput, opts ...request.Option) (*UpdateGlobalTableOutput, error) { + req, out := c.UpdateGlobalTableRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateGlobalTableSettings = "UpdateGlobalTableSettings" + +// UpdateGlobalTableSettingsRequest generates a "aws/request.Request" representing the +// client's request for the UpdateGlobalTableSettings operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateGlobalTableSettings for more information on using the UpdateGlobalTableSettings +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the UpdateGlobalTableSettingsRequest method. +// req, resp := client.UpdateGlobalTableSettingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateGlobalTableSettings +func (c *DynamoDB) UpdateGlobalTableSettingsRequest(input *UpdateGlobalTableSettingsInput) (req *request.Request, output *UpdateGlobalTableSettingsOutput) { + op := &request.Operation{ + Name: opUpdateGlobalTableSettings, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateGlobalTableSettingsInput{} + } + + output = &UpdateGlobalTableSettingsOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// UpdateGlobalTableSettings API operation for Amazon DynamoDB. +// +// Updates settings for a global table. +// +// This operation only applies to Version 2017.11.29 (Legacy) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) +// of global tables. We recommend using Version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) +// when creating new global tables, as it provides greater flexibility, higher +// efficiency and consumes less write capacity than 2017.11.29 (Legacy). To +// determine which version you are using, see Determining the version (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html). +// To update existing global tables from version 2017.11.29 (Legacy) to version +// 2019.11.21 (Current), see Updating global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation UpdateGlobalTableSettings for usage and error information. +// +// Returned Error Types: +// +// - GlobalTableNotFoundException +// The specified global table does not exist. +// +// - ReplicaNotFoundException +// The specified replica is no longer part of the global table. +// +// - IndexNotFoundException +// The operation tried to access a nonexistent index. +// +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. +// +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. +// +// There is a soft account quota of 2,500 tables. +// +// - ResourceInUseException +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateGlobalTableSettings +func (c *DynamoDB) UpdateGlobalTableSettings(input *UpdateGlobalTableSettingsInput) (*UpdateGlobalTableSettingsOutput, error) { + req, out := c.UpdateGlobalTableSettingsRequest(input) + return out, req.Send() +} + +// UpdateGlobalTableSettingsWithContext is the same as UpdateGlobalTableSettings with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateGlobalTableSettings for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) UpdateGlobalTableSettingsWithContext(ctx aws.Context, input *UpdateGlobalTableSettingsInput, opts ...request.Option) (*UpdateGlobalTableSettingsOutput, error) { + req, out := c.UpdateGlobalTableSettingsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateItem = "UpdateItem" + +// UpdateItemRequest generates a "aws/request.Request" representing the +// client's request for the UpdateItem operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateItem for more information on using the UpdateItem +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the UpdateItemRequest method. +// req, resp := client.UpdateItemRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateItem +func (c *DynamoDB) UpdateItemRequest(input *UpdateItemInput) (req *request.Request, output *UpdateItemOutput) { + op := &request.Operation{ + Name: opUpdateItem, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateItemInput{} + } + + output = &UpdateItemOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// UpdateItem API operation for Amazon DynamoDB. +// +// Edits an existing item's attributes, or adds a new item to the table if it +// does not already exist. You can put, delete, or add attribute values. You +// can also perform a conditional update on an existing item (insert a new attribute +// name-value pair if it doesn't exist, or replace an existing name-value pair +// if it has certain expected attribute values). +// +// You can also return the item's attribute values in the same UpdateItem operation +// using the ReturnValues parameter. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation UpdateItem for usage and error information. +// +// Returned Error Types: +// +// - ConditionalCheckFailedException +// A condition specified in the operation could not be evaluated. +// +// - ProvisionedThroughputExceededException +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - ItemCollectionSizeLimitExceededException +// An item collection is too large. This exception is only returned for tables +// that have one or more local secondary indexes. +// +// - TransactionConflictException +// Operation was rejected because there is an ongoing transaction for the item. +// +// - RequestLimitExceeded +// Throughput exceeds the current throughput quota for your account. Please +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request +// a quota increase. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateItem +func (c *DynamoDB) UpdateItem(input *UpdateItemInput) (*UpdateItemOutput, error) { + req, out := c.UpdateItemRequest(input) + return out, req.Send() +} + +// UpdateItemWithContext is the same as UpdateItem with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateItem for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) UpdateItemWithContext(ctx aws.Context, input *UpdateItemInput, opts ...request.Option) (*UpdateItemOutput, error) { + req, out := c.UpdateItemRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateTable = "UpdateTable" + +// UpdateTableRequest generates a "aws/request.Request" representing the +// client's request for the UpdateTable operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateTable for more information on using the UpdateTable +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the UpdateTableRequest method. +// req, resp := client.UpdateTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTable +func (c *DynamoDB) UpdateTableRequest(input *UpdateTableInput) (req *request.Request, output *UpdateTableOutput) { + op := &request.Operation{ + Name: opUpdateTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateTableInput{} + } + + output = &UpdateTableOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// UpdateTable API operation for Amazon DynamoDB. +// +// Modifies the provisioned throughput settings, global secondary indexes, or +// DynamoDB Streams settings for a given table. +// +// This operation only applies to Version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) +// of global tables. +// +// You can only perform one of the following operations at once: +// +// - Modify the provisioned throughput settings of the table. +// +// - Remove a global secondary index from the table. +// +// - Create a new global secondary index on the table. After the index begins +// backfilling, you can use UpdateTable to perform other operations. +// +// UpdateTable is an asynchronous operation; while it is executing, the table +// status changes from ACTIVE to UPDATING. While it is UPDATING, you cannot +// issue another UpdateTable request. When the table returns to the ACTIVE state, +// the UpdateTable operation is complete. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation UpdateTable for usage and error information. +// +// Returned Error Types: +// +// - ResourceInUseException +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. +// +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. +// +// There is a soft account quota of 2,500 tables. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTable +func (c *DynamoDB) UpdateTable(input *UpdateTableInput) (*UpdateTableOutput, error) { + req, out := c.UpdateTableRequest(input) + return out, req.Send() +} + +// UpdateTableWithContext is the same as UpdateTable with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateTable for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) UpdateTableWithContext(ctx aws.Context, input *UpdateTableInput, opts ...request.Option) (*UpdateTableOutput, error) { + req, out := c.UpdateTableRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateTableReplicaAutoScaling = "UpdateTableReplicaAutoScaling" + +// UpdateTableReplicaAutoScalingRequest generates a "aws/request.Request" representing the +// client's request for the UpdateTableReplicaAutoScaling operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateTableReplicaAutoScaling for more information on using the UpdateTableReplicaAutoScaling +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the UpdateTableReplicaAutoScalingRequest method. +// req, resp := client.UpdateTableReplicaAutoScalingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTableReplicaAutoScaling +func (c *DynamoDB) UpdateTableReplicaAutoScalingRequest(input *UpdateTableReplicaAutoScalingInput) (req *request.Request, output *UpdateTableReplicaAutoScalingOutput) { + op := &request.Operation{ + Name: opUpdateTableReplicaAutoScaling, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateTableReplicaAutoScalingInput{} + } + + output = &UpdateTableReplicaAutoScalingOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateTableReplicaAutoScaling API operation for Amazon DynamoDB. +// +// Updates auto scaling settings on your global tables at once. +// +// This operation only applies to Version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) +// of global tables. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation UpdateTableReplicaAutoScaling for usage and error information. +// +// Returned Error Types: +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - ResourceInUseException +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. +// +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. +// +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. +// +// There is a soft account quota of 2,500 tables. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTableReplicaAutoScaling +func (c *DynamoDB) UpdateTableReplicaAutoScaling(input *UpdateTableReplicaAutoScalingInput) (*UpdateTableReplicaAutoScalingOutput, error) { + req, out := c.UpdateTableReplicaAutoScalingRequest(input) + return out, req.Send() +} + +// UpdateTableReplicaAutoScalingWithContext is the same as UpdateTableReplicaAutoScaling with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateTableReplicaAutoScaling for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) UpdateTableReplicaAutoScalingWithContext(ctx aws.Context, input *UpdateTableReplicaAutoScalingInput, opts ...request.Option) (*UpdateTableReplicaAutoScalingOutput, error) { + req, out := c.UpdateTableReplicaAutoScalingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateTimeToLive = "UpdateTimeToLive" + +// UpdateTimeToLiveRequest generates a "aws/request.Request" representing the +// client's request for the UpdateTimeToLive operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateTimeToLive for more information on using the UpdateTimeToLive +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the UpdateTimeToLiveRequest method. +// req, resp := client.UpdateTimeToLiveRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTimeToLive +func (c *DynamoDB) UpdateTimeToLiveRequest(input *UpdateTimeToLiveInput) (req *request.Request, output *UpdateTimeToLiveOutput) { + op := &request.Operation{ + Name: opUpdateTimeToLive, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateTimeToLiveInput{} + } + + output = &UpdateTimeToLiveOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// UpdateTimeToLive API operation for Amazon DynamoDB. +// +// The UpdateTimeToLive method enables or disables Time to Live (TTL) for the +// specified table. A successful UpdateTimeToLive call returns the current TimeToLiveSpecification. +// It can take up to one hour for the change to fully process. Any additional +// UpdateTimeToLive calls for the same table during this one hour duration result +// in a ValidationException. +// +// TTL compares the current time in epoch time format to the time stored in +// the TTL attribute of an item. If the epoch time value stored in the attribute +// is less than the current time, the item is marked as expired and subsequently +// deleted. +// +// The epoch time format is the number of seconds elapsed since 12:00:00 AM +// January 1, 1970 UTC. +// +// DynamoDB deletes expired items on a best-effort basis to ensure availability +// of throughput for other data operations. +// +// DynamoDB typically deletes expired items within two days of expiration. The +// exact duration within which an item gets deleted after expiration is specific +// to the nature of the workload. Items that have expired and not been deleted +// will still show up in reads, queries, and scans. +// +// As items are deleted, they are removed from any local secondary index and +// global secondary index immediately in the same eventually consistent way +// as a standard delete operation. +// +// For more information, see Time To Live (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/TTL.html) +// in the Amazon DynamoDB Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation UpdateTimeToLive for usage and error information. +// +// Returned Error Types: +// +// - ResourceInUseException +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. +// +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. +// +// There is a soft account quota of 2,500 tables. +// +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTimeToLive +func (c *DynamoDB) UpdateTimeToLive(input *UpdateTimeToLiveInput) (*UpdateTimeToLiveOutput, error) { + req, out := c.UpdateTimeToLiveRequest(input) + return out, req.Send() +} + +// UpdateTimeToLiveWithContext is the same as UpdateTimeToLive with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateTimeToLive for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) UpdateTimeToLiveWithContext(ctx aws.Context, input *UpdateTimeToLiveInput, opts ...request.Option) (*UpdateTimeToLiveOutput, error) { + req, out := c.UpdateTimeToLiveRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Contains details of a table archival operation. +type ArchivalSummary struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the backup the table was archived to, when + // applicable in the archival reason. If you wish to restore this backup to + // the same table name, you will need to delete the original table. + ArchivalBackupArn *string `min:"37" type:"string"` + + // The date and time when table archival was initiated by DynamoDB, in UNIX + // epoch time format. + ArchivalDateTime *time.Time `type:"timestamp"` + + // The reason DynamoDB archived the table. Currently, the only possible value + // is: + // + // * INACCESSIBLE_ENCRYPTION_CREDENTIALS - The table was archived due to + // the table's KMS key being inaccessible for more than seven days. An On-Demand + // backup was created at the archival time. + ArchivalReason *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ArchivalSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ArchivalSummary) GoString() string { + return s.String() +} + +// SetArchivalBackupArn sets the ArchivalBackupArn field's value. +func (s *ArchivalSummary) SetArchivalBackupArn(v string) *ArchivalSummary { + s.ArchivalBackupArn = &v + return s +} + +// SetArchivalDateTime sets the ArchivalDateTime field's value. +func (s *ArchivalSummary) SetArchivalDateTime(v time.Time) *ArchivalSummary { + s.ArchivalDateTime = &v + return s +} + +// SetArchivalReason sets the ArchivalReason field's value. +func (s *ArchivalSummary) SetArchivalReason(v string) *ArchivalSummary { + s.ArchivalReason = &v + return s +} + +// Represents an attribute for describing the key schema for the table and indexes. +type AttributeDefinition struct { + _ struct{} `type:"structure"` + + // A name for the attribute. + // + // AttributeName is a required field + AttributeName *string `min:"1" type:"string" required:"true"` + + // The data type for the attribute, where: + // + // * S - the attribute is of type String + // + // * N - the attribute is of type Number + // + // * B - the attribute is of type Binary + // + // AttributeType is a required field + AttributeType *string `type:"string" required:"true" enum:"ScalarAttributeType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AttributeDefinition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AttributeDefinition) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AttributeDefinition) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AttributeDefinition"} + if s.AttributeName == nil { + invalidParams.Add(request.NewErrParamRequired("AttributeName")) + } + if s.AttributeName != nil && len(*s.AttributeName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AttributeName", 1)) + } + if s.AttributeType == nil { + invalidParams.Add(request.NewErrParamRequired("AttributeType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributeName sets the AttributeName field's value. +func (s *AttributeDefinition) SetAttributeName(v string) *AttributeDefinition { + s.AttributeName = &v + return s +} + +// SetAttributeType sets the AttributeType field's value. +func (s *AttributeDefinition) SetAttributeType(v string) *AttributeDefinition { + s.AttributeType = &v + return s +} + +// Represents the data for an attribute. +// +// Each attribute value is described as a name-value pair. The name is the data +// type, and the value is the data itself. +// +// For more information, see Data Types (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes) +// in the Amazon DynamoDB Developer Guide. +type AttributeValue struct { + _ struct{} `type:"structure"` + + // An attribute of type Binary. For example: + // + // "B": "dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk" + // B is automatically base64 encoded/decoded by the SDK. + B []byte `type:"blob"` + + // An attribute of type Boolean. For example: + // + // "BOOL": true + BOOL *bool `type:"boolean"` + + // An attribute of type Binary Set. For example: + // + // "BS": ["U3Vubnk=", "UmFpbnk=", "U25vd3k="] + BS [][]byte `type:"list"` + + // An attribute of type List. For example: + // + // "L": [ {"S": "Cookies"} , {"S": "Coffee"}, {"N": "3.14159"}] + L []*AttributeValue `type:"list"` + + // An attribute of type Map. For example: + // + // "M": {"Name": {"S": "Joe"}, "Age": {"N": "35"}} + M map[string]*AttributeValue `type:"map"` + + // An attribute of type Number. For example: + // + // "N": "123.45" + // + // Numbers are sent across the network to DynamoDB as strings, to maximize compatibility + // across languages and libraries. However, DynamoDB treats them as number type + // attributes for mathematical operations. + N *string `type:"string"` + + // An attribute of type Number Set. For example: + // + // "NS": ["42.2", "-19", "7.5", "3.14"] + // + // Numbers are sent across the network to DynamoDB as strings, to maximize compatibility + // across languages and libraries. However, DynamoDB treats them as number type + // attributes for mathematical operations. + NS []*string `type:"list"` + + // An attribute of type Null. For example: + // + // "NULL": true + NULL *bool `type:"boolean"` + + // An attribute of type String. For example: + // + // "S": "Hello" + S *string `type:"string"` + + // An attribute of type String Set. For example: + // + // "SS": ["Giraffe", "Hippo" ,"Zebra"] + SS []*string `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AttributeValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AttributeValue) GoString() string { + return s.String() +} + +// SetB sets the B field's value. +func (s *AttributeValue) SetB(v []byte) *AttributeValue { + s.B = v + return s +} + +// SetBOOL sets the BOOL field's value. +func (s *AttributeValue) SetBOOL(v bool) *AttributeValue { + s.BOOL = &v + return s +} + +// SetBS sets the BS field's value. +func (s *AttributeValue) SetBS(v [][]byte) *AttributeValue { + s.BS = v + return s +} + +// SetL sets the L field's value. +func (s *AttributeValue) SetL(v []*AttributeValue) *AttributeValue { + s.L = v + return s +} + +// SetM sets the M field's value. +func (s *AttributeValue) SetM(v map[string]*AttributeValue) *AttributeValue { + s.M = v + return s +} + +// SetN sets the N field's value. +func (s *AttributeValue) SetN(v string) *AttributeValue { + s.N = &v + return s +} + +// SetNS sets the NS field's value. +func (s *AttributeValue) SetNS(v []*string) *AttributeValue { + s.NS = v + return s +} + +// SetNULL sets the NULL field's value. +func (s *AttributeValue) SetNULL(v bool) *AttributeValue { + s.NULL = &v + return s +} + +// SetS sets the S field's value. +func (s *AttributeValue) SetS(v string) *AttributeValue { + s.S = &v + return s +} + +// SetSS sets the SS field's value. +func (s *AttributeValue) SetSS(v []*string) *AttributeValue { + s.SS = v + return s +} + +// For the UpdateItem operation, represents the attributes to be modified, the +// action to perform on each, and the new value for each. +// +// You cannot use UpdateItem to update any primary key attributes. Instead, +// you will need to delete the item, and then use PutItem to create a new item +// with new attributes. +// +// Attribute values cannot be null; string and binary type attributes must have +// lengths greater than zero; and set type attributes must not be empty. Requests +// with empty values will be rejected with a ValidationException exception. +type AttributeValueUpdate struct { + _ struct{} `type:"structure"` + + // Specifies how to perform the update. Valid values are PUT (default), DELETE, + // and ADD. The behavior depends on whether the specified primary key already + // exists in the table. + // + // If an item with the specified Key is found in the table: + // + // * PUT - Adds the specified attribute to the item. If the attribute already + // exists, it is replaced by the new value. + // + // * DELETE - If no value is specified, the attribute and its value are removed + // from the item. The data type of the specified value must match the existing + // value's data type. If a set of values is specified, then those values + // are subtracted from the old set. For example, if the attribute value was + // the set [a,b,c] and the DELETE action specified [a,c], then the final + // attribute value would be [b]. Specifying an empty set is an error. + // + // * ADD - If the attribute does not already exist, then the attribute and + // its values are added to the item. If the attribute does exist, then the + // behavior of ADD depends on the data type of the attribute: If the existing + // attribute is a number, and if Value is also a number, then the Value is + // mathematically added to the existing attribute. If Value is a negative + // number, then it is subtracted from the existing attribute. If you use + // ADD to increment or decrement a number value for an item that doesn't + // exist before the update, DynamoDB uses 0 as the initial value. In addition, + // if you use ADD to update an existing item, and intend to increment or + // decrement an attribute value which does not yet exist, DynamoDB uses 0 + // as the initial value. For example, suppose that the item you want to update + // does not yet have an attribute named itemcount, but you decide to ADD + // the number 3 to this attribute anyway, even though it currently does not + // exist. DynamoDB will create the itemcount attribute, set its initial value + // to 0, and finally add 3 to it. The result will be a new itemcount attribute + // in the item, with a value of 3. If the existing data type is a set, and + // if the Value is also a set, then the Value is added to the existing set. + // (This is a set operation, not mathematical addition.) For example, if + // the attribute value was the set [1,2], and the ADD action specified [3], + // then the final attribute value would be [1,2,3]. An error occurs if an + // Add action is specified for a set attribute and the attribute type specified + // does not match the existing set type. Both sets must have the same primitive + // data type. For example, if the existing data type is a set of strings, + // the Value must also be a set of strings. The same holds true for number + // sets and binary sets. This action is only valid for an existing attribute + // whose data type is number or is a set. Do not use ADD for any other data + // types. + // + // If no item with the specified Key is found: + // + // * PUT - DynamoDB creates a new item with the specified primary key, and + // then adds the attribute. + // + // * DELETE - Nothing happens; there is no attribute to delete. + // + // * ADD - DynamoDB creates a new item with the supplied primary key and + // number (or set) for the attribute value. The only data types allowed are + // number, number set, string set or binary set. + Action *string `type:"string" enum:"AttributeAction"` + + // Represents the data for an attribute. + // + // Each attribute value is described as a name-value pair. The name is the data + // type, and the value is the data itself. + // + // For more information, see Data Types (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes) + // in the Amazon DynamoDB Developer Guide. + Value *AttributeValue `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AttributeValueUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AttributeValueUpdate) GoString() string { + return s.String() +} + +// SetAction sets the Action field's value. +func (s *AttributeValueUpdate) SetAction(v string) *AttributeValueUpdate { + s.Action = &v + return s +} + +// SetValue sets the Value field's value. +func (s *AttributeValueUpdate) SetValue(v *AttributeValue) *AttributeValueUpdate { + s.Value = v + return s +} + +// Represents the properties of the scaling policy. +type AutoScalingPolicyDescription struct { + _ struct{} `type:"structure"` + + // The name of the scaling policy. + PolicyName *string `min:"1" type:"string"` + + // Represents a target tracking scaling policy configuration. + TargetTrackingScalingPolicyConfiguration *AutoScalingTargetTrackingScalingPolicyConfigurationDescription `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AutoScalingPolicyDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AutoScalingPolicyDescription) GoString() string { + return s.String() +} + +// SetPolicyName sets the PolicyName field's value. +func (s *AutoScalingPolicyDescription) SetPolicyName(v string) *AutoScalingPolicyDescription { + s.PolicyName = &v + return s +} + +// SetTargetTrackingScalingPolicyConfiguration sets the TargetTrackingScalingPolicyConfiguration field's value. +func (s *AutoScalingPolicyDescription) SetTargetTrackingScalingPolicyConfiguration(v *AutoScalingTargetTrackingScalingPolicyConfigurationDescription) *AutoScalingPolicyDescription { + s.TargetTrackingScalingPolicyConfiguration = v + return s +} + +// Represents the auto scaling policy to be modified. +type AutoScalingPolicyUpdate struct { + _ struct{} `type:"structure"` + + // The name of the scaling policy. + PolicyName *string `min:"1" type:"string"` + + // Represents a target tracking scaling policy configuration. + // + // TargetTrackingScalingPolicyConfiguration is a required field + TargetTrackingScalingPolicyConfiguration *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AutoScalingPolicyUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AutoScalingPolicyUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AutoScalingPolicyUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AutoScalingPolicyUpdate"} + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + if s.TargetTrackingScalingPolicyConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("TargetTrackingScalingPolicyConfiguration")) + } + if s.TargetTrackingScalingPolicyConfiguration != nil { + if err := s.TargetTrackingScalingPolicyConfiguration.Validate(); err != nil { + invalidParams.AddNested("TargetTrackingScalingPolicyConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPolicyName sets the PolicyName field's value. +func (s *AutoScalingPolicyUpdate) SetPolicyName(v string) *AutoScalingPolicyUpdate { + s.PolicyName = &v + return s +} + +// SetTargetTrackingScalingPolicyConfiguration sets the TargetTrackingScalingPolicyConfiguration field's value. +func (s *AutoScalingPolicyUpdate) SetTargetTrackingScalingPolicyConfiguration(v *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) *AutoScalingPolicyUpdate { + s.TargetTrackingScalingPolicyConfiguration = v + return s +} + +// Represents the auto scaling settings for a global table or global secondary +// index. +type AutoScalingSettingsDescription struct { + _ struct{} `type:"structure"` + + // Disabled auto scaling for this global table or global secondary index. + AutoScalingDisabled *bool `type:"boolean"` + + // Role ARN used for configuring the auto scaling policy. + AutoScalingRoleArn *string `type:"string"` + + // The maximum capacity units that a global table or global secondary index + // should be scaled up to. + MaximumUnits *int64 `min:"1" type:"long"` + + // The minimum capacity units that a global table or global secondary index + // should be scaled down to. + MinimumUnits *int64 `min:"1" type:"long"` + + // Information about the scaling policies. + ScalingPolicies []*AutoScalingPolicyDescription `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AutoScalingSettingsDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AutoScalingSettingsDescription) GoString() string { + return s.String() +} + +// SetAutoScalingDisabled sets the AutoScalingDisabled field's value. +func (s *AutoScalingSettingsDescription) SetAutoScalingDisabled(v bool) *AutoScalingSettingsDescription { + s.AutoScalingDisabled = &v + return s +} + +// SetAutoScalingRoleArn sets the AutoScalingRoleArn field's value. +func (s *AutoScalingSettingsDescription) SetAutoScalingRoleArn(v string) *AutoScalingSettingsDescription { + s.AutoScalingRoleArn = &v + return s +} + +// SetMaximumUnits sets the MaximumUnits field's value. +func (s *AutoScalingSettingsDescription) SetMaximumUnits(v int64) *AutoScalingSettingsDescription { + s.MaximumUnits = &v + return s +} + +// SetMinimumUnits sets the MinimumUnits field's value. +func (s *AutoScalingSettingsDescription) SetMinimumUnits(v int64) *AutoScalingSettingsDescription { + s.MinimumUnits = &v + return s +} + +// SetScalingPolicies sets the ScalingPolicies field's value. +func (s *AutoScalingSettingsDescription) SetScalingPolicies(v []*AutoScalingPolicyDescription) *AutoScalingSettingsDescription { + s.ScalingPolicies = v + return s +} + +// Represents the auto scaling settings to be modified for a global table or +// global secondary index. +type AutoScalingSettingsUpdate struct { + _ struct{} `type:"structure"` + + // Disabled auto scaling for this global table or global secondary index. + AutoScalingDisabled *bool `type:"boolean"` + + // Role ARN used for configuring auto scaling policy. + AutoScalingRoleArn *string `min:"1" type:"string"` + + // The maximum capacity units that a global table or global secondary index + // should be scaled up to. + MaximumUnits *int64 `min:"1" type:"long"` + + // The minimum capacity units that a global table or global secondary index + // should be scaled down to. + MinimumUnits *int64 `min:"1" type:"long"` + + // The scaling policy to apply for scaling target global table or global secondary + // index capacity units. + ScalingPolicyUpdate *AutoScalingPolicyUpdate `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AutoScalingSettingsUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AutoScalingSettingsUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AutoScalingSettingsUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AutoScalingSettingsUpdate"} + if s.AutoScalingRoleArn != nil && len(*s.AutoScalingRoleArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutoScalingRoleArn", 1)) + } + if s.MaximumUnits != nil && *s.MaximumUnits < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaximumUnits", 1)) + } + if s.MinimumUnits != nil && *s.MinimumUnits < 1 { + invalidParams.Add(request.NewErrParamMinValue("MinimumUnits", 1)) + } + if s.ScalingPolicyUpdate != nil { + if err := s.ScalingPolicyUpdate.Validate(); err != nil { + invalidParams.AddNested("ScalingPolicyUpdate", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAutoScalingDisabled sets the AutoScalingDisabled field's value. +func (s *AutoScalingSettingsUpdate) SetAutoScalingDisabled(v bool) *AutoScalingSettingsUpdate { + s.AutoScalingDisabled = &v + return s +} + +// SetAutoScalingRoleArn sets the AutoScalingRoleArn field's value. +func (s *AutoScalingSettingsUpdate) SetAutoScalingRoleArn(v string) *AutoScalingSettingsUpdate { + s.AutoScalingRoleArn = &v + return s +} + +// SetMaximumUnits sets the MaximumUnits field's value. +func (s *AutoScalingSettingsUpdate) SetMaximumUnits(v int64) *AutoScalingSettingsUpdate { + s.MaximumUnits = &v + return s +} + +// SetMinimumUnits sets the MinimumUnits field's value. +func (s *AutoScalingSettingsUpdate) SetMinimumUnits(v int64) *AutoScalingSettingsUpdate { + s.MinimumUnits = &v + return s +} + +// SetScalingPolicyUpdate sets the ScalingPolicyUpdate field's value. +func (s *AutoScalingSettingsUpdate) SetScalingPolicyUpdate(v *AutoScalingPolicyUpdate) *AutoScalingSettingsUpdate { + s.ScalingPolicyUpdate = v + return s +} + +// Represents the properties of a target tracking scaling policy. +type AutoScalingTargetTrackingScalingPolicyConfigurationDescription struct { + _ struct{} `type:"structure"` + + // Indicates whether scale in by the target tracking policy is disabled. If + // the value is true, scale in is disabled and the target tracking policy won't + // remove capacity from the scalable resource. Otherwise, scale in is enabled + // and the target tracking policy can remove capacity from the scalable resource. + // The default value is false. + DisableScaleIn *bool `type:"boolean"` + + // The amount of time, in seconds, after a scale in activity completes before + // another scale in activity can start. The cooldown period is used to block + // subsequent scale in requests until it has expired. You should scale in conservatively + // to protect your application's availability. However, if another alarm triggers + // a scale out policy during the cooldown period after a scale-in, application + // auto scaling scales out your scalable target immediately. + ScaleInCooldown *int64 `type:"integer"` + + // The amount of time, in seconds, after a scale out activity completes before + // another scale out activity can start. While the cooldown period is in effect, + // the capacity that has been added by the previous scale out event that initiated + // the cooldown is calculated as part of the desired capacity for the next scale + // out. You should continuously (but not excessively) scale out. + ScaleOutCooldown *int64 `type:"integer"` + + // The target value for the metric. The range is 8.515920e-109 to 1.174271e+108 + // (Base 10) or 2e-360 to 2e360 (Base 2). + // + // TargetValue is a required field + TargetValue *float64 `type:"double" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AutoScalingTargetTrackingScalingPolicyConfigurationDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AutoScalingTargetTrackingScalingPolicyConfigurationDescription) GoString() string { + return s.String() +} + +// SetDisableScaleIn sets the DisableScaleIn field's value. +func (s *AutoScalingTargetTrackingScalingPolicyConfigurationDescription) SetDisableScaleIn(v bool) *AutoScalingTargetTrackingScalingPolicyConfigurationDescription { + s.DisableScaleIn = &v + return s +} + +// SetScaleInCooldown sets the ScaleInCooldown field's value. +func (s *AutoScalingTargetTrackingScalingPolicyConfigurationDescription) SetScaleInCooldown(v int64) *AutoScalingTargetTrackingScalingPolicyConfigurationDescription { + s.ScaleInCooldown = &v + return s +} + +// SetScaleOutCooldown sets the ScaleOutCooldown field's value. +func (s *AutoScalingTargetTrackingScalingPolicyConfigurationDescription) SetScaleOutCooldown(v int64) *AutoScalingTargetTrackingScalingPolicyConfigurationDescription { + s.ScaleOutCooldown = &v + return s +} + +// SetTargetValue sets the TargetValue field's value. +func (s *AutoScalingTargetTrackingScalingPolicyConfigurationDescription) SetTargetValue(v float64) *AutoScalingTargetTrackingScalingPolicyConfigurationDescription { + s.TargetValue = &v + return s +} + +// Represents the settings of a target tracking scaling policy that will be +// modified. +type AutoScalingTargetTrackingScalingPolicyConfigurationUpdate struct { + _ struct{} `type:"structure"` + + // Indicates whether scale in by the target tracking policy is disabled. If + // the value is true, scale in is disabled and the target tracking policy won't + // remove capacity from the scalable resource. Otherwise, scale in is enabled + // and the target tracking policy can remove capacity from the scalable resource. + // The default value is false. + DisableScaleIn *bool `type:"boolean"` + + // The amount of time, in seconds, after a scale in activity completes before + // another scale in activity can start. The cooldown period is used to block + // subsequent scale in requests until it has expired. You should scale in conservatively + // to protect your application's availability. However, if another alarm triggers + // a scale out policy during the cooldown period after a scale-in, application + // auto scaling scales out your scalable target immediately. + ScaleInCooldown *int64 `type:"integer"` + + // The amount of time, in seconds, after a scale out activity completes before + // another scale out activity can start. While the cooldown period is in effect, + // the capacity that has been added by the previous scale out event that initiated + // the cooldown is calculated as part of the desired capacity for the next scale + // out. You should continuously (but not excessively) scale out. + ScaleOutCooldown *int64 `type:"integer"` + + // The target value for the metric. The range is 8.515920e-109 to 1.174271e+108 + // (Base 10) or 2e-360 to 2e360 (Base 2). + // + // TargetValue is a required field + TargetValue *float64 `type:"double" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AutoScalingTargetTrackingScalingPolicyConfigurationUpdate"} + if s.TargetValue == nil { + invalidParams.Add(request.NewErrParamRequired("TargetValue")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDisableScaleIn sets the DisableScaleIn field's value. +func (s *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) SetDisableScaleIn(v bool) *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate { + s.DisableScaleIn = &v + return s +} + +// SetScaleInCooldown sets the ScaleInCooldown field's value. +func (s *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) SetScaleInCooldown(v int64) *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate { + s.ScaleInCooldown = &v + return s +} + +// SetScaleOutCooldown sets the ScaleOutCooldown field's value. +func (s *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) SetScaleOutCooldown(v int64) *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate { + s.ScaleOutCooldown = &v + return s +} + +// SetTargetValue sets the TargetValue field's value. +func (s *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) SetTargetValue(v float64) *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate { + s.TargetValue = &v + return s +} + +// Contains the description of the backup created for the table. +type BackupDescription struct { + _ struct{} `type:"structure"` + + // Contains the details of the backup created for the table. + BackupDetails *BackupDetails `type:"structure"` + + // Contains the details of the table when the backup was created. + SourceTableDetails *SourceTableDetails `type:"structure"` + + // Contains the details of the features enabled on the table when the backup + // was created. For example, LSIs, GSIs, streams, TTL. + SourceTableFeatureDetails *SourceTableFeatureDetails `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BackupDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BackupDescription) GoString() string { + return s.String() +} + +// SetBackupDetails sets the BackupDetails field's value. +func (s *BackupDescription) SetBackupDetails(v *BackupDetails) *BackupDescription { + s.BackupDetails = v + return s +} + +// SetSourceTableDetails sets the SourceTableDetails field's value. +func (s *BackupDescription) SetSourceTableDetails(v *SourceTableDetails) *BackupDescription { + s.SourceTableDetails = v + return s +} + +// SetSourceTableFeatureDetails sets the SourceTableFeatureDetails field's value. +func (s *BackupDescription) SetSourceTableFeatureDetails(v *SourceTableFeatureDetails) *BackupDescription { + s.SourceTableFeatureDetails = v + return s +} + +// Contains the details of the backup created for the table. +type BackupDetails struct { + _ struct{} `type:"structure"` + + // ARN associated with the backup. + // + // BackupArn is a required field + BackupArn *string `min:"37" type:"string" required:"true"` + + // Time at which the backup was created. This is the request time of the backup. + // + // BackupCreationDateTime is a required field + BackupCreationDateTime *time.Time `type:"timestamp" required:"true"` + + // Time at which the automatic on-demand backup created by DynamoDB will expire. + // This SYSTEM on-demand backup expires automatically 35 days after its creation. + BackupExpiryDateTime *time.Time `type:"timestamp"` + + // Name of the requested backup. + // + // BackupName is a required field + BackupName *string `min:"3" type:"string" required:"true"` + + // Size of the backup in bytes. DynamoDB updates this value approximately every + // six hours. Recent changes might not be reflected in this value. + BackupSizeBytes *int64 `type:"long"` + + // Backup can be in one of the following states: CREATING, ACTIVE, DELETED. + // + // BackupStatus is a required field + BackupStatus *string `type:"string" required:"true" enum:"BackupStatus"` + + // BackupType: + // + // * USER - You create and manage these using the on-demand backup feature. + // + // * SYSTEM - If you delete a table with point-in-time recovery enabled, + // a SYSTEM backup is automatically created and is retained for 35 days (at + // no additional cost). System backups allow you to restore the deleted table + // to the state it was in just before the point of deletion. + // + // * AWS_BACKUP - On-demand backup created by you from Backup service. + // + // BackupType is a required field + BackupType *string `type:"string" required:"true" enum:"BackupType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BackupDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BackupDetails) GoString() string { + return s.String() +} + +// SetBackupArn sets the BackupArn field's value. +func (s *BackupDetails) SetBackupArn(v string) *BackupDetails { + s.BackupArn = &v + return s +} + +// SetBackupCreationDateTime sets the BackupCreationDateTime field's value. +func (s *BackupDetails) SetBackupCreationDateTime(v time.Time) *BackupDetails { + s.BackupCreationDateTime = &v + return s +} + +// SetBackupExpiryDateTime sets the BackupExpiryDateTime field's value. +func (s *BackupDetails) SetBackupExpiryDateTime(v time.Time) *BackupDetails { + s.BackupExpiryDateTime = &v + return s +} + +// SetBackupName sets the BackupName field's value. +func (s *BackupDetails) SetBackupName(v string) *BackupDetails { + s.BackupName = &v + return s +} + +// SetBackupSizeBytes sets the BackupSizeBytes field's value. +func (s *BackupDetails) SetBackupSizeBytes(v int64) *BackupDetails { + s.BackupSizeBytes = &v + return s +} + +// SetBackupStatus sets the BackupStatus field's value. +func (s *BackupDetails) SetBackupStatus(v string) *BackupDetails { + s.BackupStatus = &v + return s +} + +// SetBackupType sets the BackupType field's value. +func (s *BackupDetails) SetBackupType(v string) *BackupDetails { + s.BackupType = &v + return s +} + +// There is another ongoing conflicting backup control plane operation on the +// table. The backup is either being created, deleted or restored to a table. +type BackupInUseException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BackupInUseException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BackupInUseException) GoString() string { + return s.String() +} + +func newErrorBackupInUseException(v protocol.ResponseMetadata) error { + return &BackupInUseException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *BackupInUseException) Code() string { + return "BackupInUseException" +} + +// Message returns the exception's message. +func (s *BackupInUseException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *BackupInUseException) OrigErr() error { + return nil +} + +func (s *BackupInUseException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *BackupInUseException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *BackupInUseException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Backup not found for the given BackupARN. +type BackupNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BackupNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BackupNotFoundException) GoString() string { + return s.String() +} + +func newErrorBackupNotFoundException(v protocol.ResponseMetadata) error { + return &BackupNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *BackupNotFoundException) Code() string { + return "BackupNotFoundException" +} + +// Message returns the exception's message. +func (s *BackupNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *BackupNotFoundException) OrigErr() error { + return nil +} + +func (s *BackupNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *BackupNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *BackupNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Contains details for the backup. +type BackupSummary struct { + _ struct{} `type:"structure"` + + // ARN associated with the backup. + BackupArn *string `min:"37" type:"string"` + + // Time at which the backup was created. + BackupCreationDateTime *time.Time `type:"timestamp"` + + // Time at which the automatic on-demand backup created by DynamoDB will expire. + // This SYSTEM on-demand backup expires automatically 35 days after its creation. + BackupExpiryDateTime *time.Time `type:"timestamp"` + + // Name of the specified backup. + BackupName *string `min:"3" type:"string"` + + // Size of the backup in bytes. + BackupSizeBytes *int64 `type:"long"` + + // Backup can be in one of the following states: CREATING, ACTIVE, DELETED. + BackupStatus *string `type:"string" enum:"BackupStatus"` + + // BackupType: + // + // * USER - You create and manage these using the on-demand backup feature. + // + // * SYSTEM - If you delete a table with point-in-time recovery enabled, + // a SYSTEM backup is automatically created and is retained for 35 days (at + // no additional cost). System backups allow you to restore the deleted table + // to the state it was in just before the point of deletion. + // + // * AWS_BACKUP - On-demand backup created by you from Backup service. + BackupType *string `type:"string" enum:"BackupType"` + + // ARN associated with the table. + TableArn *string `type:"string"` + + // Unique identifier for the table. + TableId *string `type:"string"` + + // Name of the table. + TableName *string `min:"3" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BackupSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BackupSummary) GoString() string { + return s.String() +} + +// SetBackupArn sets the BackupArn field's value. +func (s *BackupSummary) SetBackupArn(v string) *BackupSummary { + s.BackupArn = &v + return s +} + +// SetBackupCreationDateTime sets the BackupCreationDateTime field's value. +func (s *BackupSummary) SetBackupCreationDateTime(v time.Time) *BackupSummary { + s.BackupCreationDateTime = &v + return s +} + +// SetBackupExpiryDateTime sets the BackupExpiryDateTime field's value. +func (s *BackupSummary) SetBackupExpiryDateTime(v time.Time) *BackupSummary { + s.BackupExpiryDateTime = &v + return s +} + +// SetBackupName sets the BackupName field's value. +func (s *BackupSummary) SetBackupName(v string) *BackupSummary { + s.BackupName = &v + return s +} + +// SetBackupSizeBytes sets the BackupSizeBytes field's value. +func (s *BackupSummary) SetBackupSizeBytes(v int64) *BackupSummary { + s.BackupSizeBytes = &v + return s +} + +// SetBackupStatus sets the BackupStatus field's value. +func (s *BackupSummary) SetBackupStatus(v string) *BackupSummary { + s.BackupStatus = &v + return s +} + +// SetBackupType sets the BackupType field's value. +func (s *BackupSummary) SetBackupType(v string) *BackupSummary { + s.BackupType = &v + return s +} + +// SetTableArn sets the TableArn field's value. +func (s *BackupSummary) SetTableArn(v string) *BackupSummary { + s.TableArn = &v + return s +} + +// SetTableId sets the TableId field's value. +func (s *BackupSummary) SetTableId(v string) *BackupSummary { + s.TableId = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *BackupSummary) SetTableName(v string) *BackupSummary { + s.TableName = &v + return s +} + +type BatchExecuteStatementInput struct { + _ struct{} `type:"structure"` + + // Determines the level of detail about either provisioned or on-demand throughput + // consumption that is returned in the response: + // + // * INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary + // index that was accessed. Note that some operations, such as GetItem and + // BatchGetItem, do not access any indexes at all. In these cases, specifying + // INDEXES will only return ConsumedCapacity information for table(s). + // + // * TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. + // + // * NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // The list of PartiQL statements representing the batch to run. + // + // Statements is a required field + Statements []*BatchStatementRequest `min:"1" type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchExecuteStatementInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchExecuteStatementInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchExecuteStatementInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchExecuteStatementInput"} + if s.Statements == nil { + invalidParams.Add(request.NewErrParamRequired("Statements")) + } + if s.Statements != nil && len(s.Statements) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Statements", 1)) + } + if s.Statements != nil { + for i, v := range s.Statements { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Statements", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. +func (s *BatchExecuteStatementInput) SetReturnConsumedCapacity(v string) *BatchExecuteStatementInput { + s.ReturnConsumedCapacity = &v + return s +} + +// SetStatements sets the Statements field's value. +func (s *BatchExecuteStatementInput) SetStatements(v []*BatchStatementRequest) *BatchExecuteStatementInput { + s.Statements = v + return s +} + +type BatchExecuteStatementOutput struct { + _ struct{} `type:"structure"` + + // The capacity units consumed by the entire operation. The values of the list + // are ordered according to the ordering of the statements. + ConsumedCapacity []*ConsumedCapacity `type:"list"` + + // The response to each PartiQL statement in the batch. + Responses []*BatchStatementResponse `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchExecuteStatementOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchExecuteStatementOutput) GoString() string { + return s.String() +} + +// SetConsumedCapacity sets the ConsumedCapacity field's value. +func (s *BatchExecuteStatementOutput) SetConsumedCapacity(v []*ConsumedCapacity) *BatchExecuteStatementOutput { + s.ConsumedCapacity = v + return s +} + +// SetResponses sets the Responses field's value. +func (s *BatchExecuteStatementOutput) SetResponses(v []*BatchStatementResponse) *BatchExecuteStatementOutput { + s.Responses = v + return s +} + +// Represents the input of a BatchGetItem operation. +type BatchGetItemInput struct { + _ struct{} `type:"structure"` + + // A map of one or more table names and, for each table, a map that describes + // one or more items to retrieve from that table. Each table name can be used + // only once per BatchGetItem request. + // + // Each element in the map of items to retrieve consists of the following: + // + // * ConsistentRead - If true, a strongly consistent read is used; if false + // (the default), an eventually consistent read is used. + // + // * ExpressionAttributeNames - One or more substitution tokens for attribute + // names in the ProjectionExpression parameter. The following are some use + // cases for using ExpressionAttributeNames: To access an attribute whose + // name conflicts with a DynamoDB reserved word. To create a placeholder + // for repeating occurrences of an attribute name in an expression. To prevent + // special characters in an attribute name from being misinterpreted in an + // expression. Use the # character in an expression to dereference an attribute + // name. For example, consider the following attribute name: Percentile The + // name of this attribute conflicts with a reserved word, so it cannot be + // used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide). To work around this, you could + // specify the following for ExpressionAttributeNames: {"#P":"Percentile"} + // You could then use this substitution in an expression, as in this example: + // #P = :val Tokens that begin with the : character are expression attribute + // values, which are placeholders for the actual value at runtime. For more + // information about expression attribute names, see Accessing Item Attributes + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + // + // * Keys - An array of primary key attribute values that define specific + // items in the table. For each primary key, you must provide all of the + // key attributes. For example, with a simple primary key, you only need + // to provide the partition key value. For a composite key, you must provide + // both the partition key value and the sort key value. + // + // * ProjectionExpression - A string that identifies one or more attributes + // to retrieve from the table. These attributes can include scalars, sets, + // or elements of a JSON document. The attributes in the expression must + // be separated by commas. If no attribute names are specified, then all + // attributes are returned. If any of the requested attributes are not found, + // they do not appear in the result. For more information, see Accessing + // Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + // + // * AttributesToGet - This is a legacy parameter. Use ProjectionExpression + // instead. For more information, see AttributesToGet (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html) + // in the Amazon DynamoDB Developer Guide. + // + // RequestItems is a required field + RequestItems map[string]*KeysAndAttributes `min:"1" type:"map" required:"true"` + + // Determines the level of detail about either provisioned or on-demand throughput + // consumption that is returned in the response: + // + // * INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary + // index that was accessed. Note that some operations, such as GetItem and + // BatchGetItem, do not access any indexes at all. In these cases, specifying + // INDEXES will only return ConsumedCapacity information for table(s). + // + // * TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. + // + // * NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchGetItemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchGetItemInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchGetItemInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchGetItemInput"} + if s.RequestItems == nil { + invalidParams.Add(request.NewErrParamRequired("RequestItems")) + } + if s.RequestItems != nil && len(s.RequestItems) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RequestItems", 1)) + } + if s.RequestItems != nil { + for i, v := range s.RequestItems { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RequestItems", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRequestItems sets the RequestItems field's value. +func (s *BatchGetItemInput) SetRequestItems(v map[string]*KeysAndAttributes) *BatchGetItemInput { + s.RequestItems = v + return s +} + +// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. +func (s *BatchGetItemInput) SetReturnConsumedCapacity(v string) *BatchGetItemInput { + s.ReturnConsumedCapacity = &v + return s +} + +// Represents the output of a BatchGetItem operation. +type BatchGetItemOutput struct { + _ struct{} `type:"structure"` + + // The read capacity units consumed by the entire BatchGetItem operation. + // + // Each element consists of: + // + // * TableName - The table that consumed the provisioned throughput. + // + // * CapacityUnits - The total number of capacity units consumed. + ConsumedCapacity []*ConsumedCapacity `type:"list"` + + // A map of table name to a list of items. Each object in Responses consists + // of a table name, along with a map of attribute data consisting of the data + // type and attribute value. + Responses map[string][]map[string]*AttributeValue `type:"map"` + + // A map of tables and their respective keys that were not processed with the + // current response. The UnprocessedKeys value is in the same form as RequestItems, + // so the value can be provided directly to a subsequent BatchGetItem operation. + // For more information, see RequestItems in the Request Parameters section. + // + // Each element consists of: + // + // * Keys - An array of primary key attribute values that define specific + // items in the table. + // + // * ProjectionExpression - One or more attributes to be retrieved from the + // table or index. By default, all attributes are returned. If a requested + // attribute is not found, it does not appear in the result. + // + // * ConsistentRead - The consistency of a read operation. If set to true, + // then a strongly consistent read is used; otherwise, an eventually consistent + // read is used. + // + // If there are no unprocessed keys remaining, the response contains an empty + // UnprocessedKeys map. + UnprocessedKeys map[string]*KeysAndAttributes `min:"1" type:"map"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchGetItemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchGetItemOutput) GoString() string { + return s.String() +} + +// SetConsumedCapacity sets the ConsumedCapacity field's value. +func (s *BatchGetItemOutput) SetConsumedCapacity(v []*ConsumedCapacity) *BatchGetItemOutput { + s.ConsumedCapacity = v + return s +} + +// SetResponses sets the Responses field's value. +func (s *BatchGetItemOutput) SetResponses(v map[string][]map[string]*AttributeValue) *BatchGetItemOutput { + s.Responses = v + return s +} + +// SetUnprocessedKeys sets the UnprocessedKeys field's value. +func (s *BatchGetItemOutput) SetUnprocessedKeys(v map[string]*KeysAndAttributes) *BatchGetItemOutput { + s.UnprocessedKeys = v + return s +} + +// An error associated with a statement in a PartiQL batch that was run. +type BatchStatementError struct { + _ struct{} `type:"structure"` + + // The error code associated with the failed PartiQL batch statement. + Code *string `type:"string" enum:"BatchStatementErrorCodeEnum"` + + // The error message associated with the PartiQL batch response. + Message *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchStatementError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchStatementError) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *BatchStatementError) SetCode(v string) *BatchStatementError { + s.Code = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *BatchStatementError) SetMessage(v string) *BatchStatementError { + s.Message = &v + return s +} + +// A PartiQL batch statement request. +type BatchStatementRequest struct { + _ struct{} `type:"structure"` + + // The read consistency of the PartiQL batch request. + ConsistentRead *bool `type:"boolean"` + + // The parameters associated with a PartiQL statement in the batch request. + Parameters []*AttributeValue `min:"1" type:"list"` + + // A valid PartiQL statement. + // + // Statement is a required field + Statement *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchStatementRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchStatementRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchStatementRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchStatementRequest"} + if s.Parameters != nil && len(s.Parameters) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Parameters", 1)) + } + if s.Statement == nil { + invalidParams.Add(request.NewErrParamRequired("Statement")) + } + if s.Statement != nil && len(*s.Statement) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Statement", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConsistentRead sets the ConsistentRead field's value. +func (s *BatchStatementRequest) SetConsistentRead(v bool) *BatchStatementRequest { + s.ConsistentRead = &v + return s +} + +// SetParameters sets the Parameters field's value. +func (s *BatchStatementRequest) SetParameters(v []*AttributeValue) *BatchStatementRequest { + s.Parameters = v + return s +} + +// SetStatement sets the Statement field's value. +func (s *BatchStatementRequest) SetStatement(v string) *BatchStatementRequest { + s.Statement = &v + return s +} + +// A PartiQL batch statement response.. +type BatchStatementResponse struct { + _ struct{} `type:"structure"` + + // The error associated with a failed PartiQL batch statement. + Error *BatchStatementError `type:"structure"` + + // A DynamoDB item associated with a BatchStatementResponse + Item map[string]*AttributeValue `type:"map"` + + // The table name associated with a failed PartiQL batch statement. + TableName *string `min:"3" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchStatementResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchStatementResponse) GoString() string { + return s.String() +} + +// SetError sets the Error field's value. +func (s *BatchStatementResponse) SetError(v *BatchStatementError) *BatchStatementResponse { + s.Error = v + return s +} + +// SetItem sets the Item field's value. +func (s *BatchStatementResponse) SetItem(v map[string]*AttributeValue) *BatchStatementResponse { + s.Item = v + return s +} + +// SetTableName sets the TableName field's value. +func (s *BatchStatementResponse) SetTableName(v string) *BatchStatementResponse { + s.TableName = &v + return s +} + +// Represents the input of a BatchWriteItem operation. +type BatchWriteItemInput struct { + _ struct{} `type:"structure"` + + // A map of one or more table names and, for each table, a list of operations + // to be performed (DeleteRequest or PutRequest). Each element in the map consists + // of the following: + // + // * DeleteRequest - Perform a DeleteItem operation on the specified item. + // The item to be deleted is identified by a Key subelement: Key - A map + // of primary key attribute values that uniquely identify the item. Each + // entry in this map consists of an attribute name and an attribute value. + // For each primary key, you must provide all of the key attributes. For + // example, with a simple primary key, you only need to provide a value for + // the partition key. For a composite primary key, you must provide values + // for both the partition key and the sort key. + // + // * PutRequest - Perform a PutItem operation on the specified item. The + // item to be put is identified by an Item subelement: Item - A map of attributes + // and their values. Each entry in this map consists of an attribute name + // and an attribute value. Attribute values must not be null; string and + // binary type attributes must have lengths greater than zero; and set type + // attributes must not be empty. Requests that contain empty values are rejected + // with a ValidationException exception. If you specify any attributes that + // are part of an index key, then the data types for those attributes must + // match those of the schema in the table's attribute definition. + // + // RequestItems is a required field + RequestItems map[string][]*WriteRequest `min:"1" type:"map" required:"true"` + + // Determines the level of detail about either provisioned or on-demand throughput + // consumption that is returned in the response: + // + // * INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary + // index that was accessed. Note that some operations, such as GetItem and + // BatchGetItem, do not access any indexes at all. In these cases, specifying + // INDEXES will only return ConsumedCapacity information for table(s). + // + // * TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. + // + // * NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // Determines whether item collection metrics are returned. If set to SIZE, + // the response includes statistics about item collections, if any, that were + // modified during the operation are returned in the response. If set to NONE + // (the default), no statistics are returned. + ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchWriteItemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchWriteItemInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchWriteItemInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchWriteItemInput"} + if s.RequestItems == nil { + invalidParams.Add(request.NewErrParamRequired("RequestItems")) + } + if s.RequestItems != nil && len(s.RequestItems) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RequestItems", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRequestItems sets the RequestItems field's value. +func (s *BatchWriteItemInput) SetRequestItems(v map[string][]*WriteRequest) *BatchWriteItemInput { + s.RequestItems = v + return s +} + +// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. +func (s *BatchWriteItemInput) SetReturnConsumedCapacity(v string) *BatchWriteItemInput { + s.ReturnConsumedCapacity = &v + return s +} + +// SetReturnItemCollectionMetrics sets the ReturnItemCollectionMetrics field's value. +func (s *BatchWriteItemInput) SetReturnItemCollectionMetrics(v string) *BatchWriteItemInput { + s.ReturnItemCollectionMetrics = &v + return s +} + +// Represents the output of a BatchWriteItem operation. +type BatchWriteItemOutput struct { + _ struct{} `type:"structure"` + + // The capacity units consumed by the entire BatchWriteItem operation. + // + // Each element consists of: + // + // * TableName - The table that consumed the provisioned throughput. + // + // * CapacityUnits - The total number of capacity units consumed. + ConsumedCapacity []*ConsumedCapacity `type:"list"` + + // A list of tables that were processed by BatchWriteItem and, for each table, + // information about any item collections that were affected by individual DeleteItem + // or PutItem operations. + // + // Each entry consists of the following subelements: + // + // * ItemCollectionKey - The partition key value of the item collection. + // This is the same as the partition key value of the item. + // + // * SizeEstimateRangeGB - An estimate of item collection size, expressed + // in GB. This is a two-element array containing a lower bound and an upper + // bound for the estimate. The estimate includes the size of all the items + // in the table, plus the size of all attributes projected into all of the + // local secondary indexes on the table. Use this estimate to measure whether + // a local secondary index is approaching its size limit. The estimate is + // subject to change over time; therefore, do not rely on the precision or + // accuracy of the estimate. + ItemCollectionMetrics map[string][]*ItemCollectionMetrics `type:"map"` + + // A map of tables and requests against those tables that were not processed. + // The UnprocessedItems value is in the same form as RequestItems, so you can + // provide this value directly to a subsequent BatchWriteItem operation. For + // more information, see RequestItems in the Request Parameters section. + // + // Each UnprocessedItems entry consists of a table name and, for that table, + // a list of operations to perform (DeleteRequest or PutRequest). + // + // * DeleteRequest - Perform a DeleteItem operation on the specified item. + // The item to be deleted is identified by a Key subelement: Key - A map + // of primary key attribute values that uniquely identify the item. Each + // entry in this map consists of an attribute name and an attribute value. + // + // * PutRequest - Perform a PutItem operation on the specified item. The + // item to be put is identified by an Item subelement: Item - A map of attributes + // and their values. Each entry in this map consists of an attribute name + // and an attribute value. Attribute values must not be null; string and + // binary type attributes must have lengths greater than zero; and set type + // attributes must not be empty. Requests that contain empty values will + // be rejected with a ValidationException exception. If you specify any attributes + // that are part of an index key, then the data types for those attributes + // must match those of the schema in the table's attribute definition. + // + // If there are no unprocessed items remaining, the response contains an empty + // UnprocessedItems map. + UnprocessedItems map[string][]*WriteRequest `min:"1" type:"map"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchWriteItemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchWriteItemOutput) GoString() string { + return s.String() +} + +// SetConsumedCapacity sets the ConsumedCapacity field's value. +func (s *BatchWriteItemOutput) SetConsumedCapacity(v []*ConsumedCapacity) *BatchWriteItemOutput { + s.ConsumedCapacity = v + return s +} + +// SetItemCollectionMetrics sets the ItemCollectionMetrics field's value. +func (s *BatchWriteItemOutput) SetItemCollectionMetrics(v map[string][]*ItemCollectionMetrics) *BatchWriteItemOutput { + s.ItemCollectionMetrics = v + return s +} + +// SetUnprocessedItems sets the UnprocessedItems field's value. +func (s *BatchWriteItemOutput) SetUnprocessedItems(v map[string][]*WriteRequest) *BatchWriteItemOutput { + s.UnprocessedItems = v + return s +} + +// Contains the details for the read/write capacity mode. This page talks about +// PROVISIONED and PAY_PER_REQUEST billing modes. For more information about +// these modes, see Read/write capacity mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html). +// +// You may need to switch to on-demand mode at least once in order to return +// a BillingModeSummary response. +type BillingModeSummary struct { + _ struct{} `type:"structure"` + + // Controls how you are charged for read and write throughput and how you manage + // capacity. This setting can be changed later. + // + // * PROVISIONED - Sets the read/write capacity mode to PROVISIONED. We recommend + // using PROVISIONED for predictable workloads. + // + // * PAY_PER_REQUEST - Sets the read/write capacity mode to PAY_PER_REQUEST. + // We recommend using PAY_PER_REQUEST for unpredictable workloads. + BillingMode *string `type:"string" enum:"BillingMode"` + + // Represents the time when PAY_PER_REQUEST was last set as the read/write capacity + // mode. + LastUpdateToPayPerRequestDateTime *time.Time `type:"timestamp"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BillingModeSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BillingModeSummary) GoString() string { + return s.String() +} + +// SetBillingMode sets the BillingMode field's value. +func (s *BillingModeSummary) SetBillingMode(v string) *BillingModeSummary { + s.BillingMode = &v + return s +} + +// SetLastUpdateToPayPerRequestDateTime sets the LastUpdateToPayPerRequestDateTime field's value. +func (s *BillingModeSummary) SetLastUpdateToPayPerRequestDateTime(v time.Time) *BillingModeSummary { + s.LastUpdateToPayPerRequestDateTime = &v + return s +} + +// An ordered list of errors for each item in the request which caused the transaction +// to get cancelled. The values of the list are ordered according to the ordering +// of the TransactWriteItems request parameter. If no error occurred for the +// associated item an error with a Null code and Null message will be present. +type CancellationReason struct { + _ struct{} `type:"structure"` + + // Status code for the result of the cancelled transaction. + Code *string `type:"string"` + + // Item in the request which caused the transaction to get cancelled. + Item map[string]*AttributeValue `type:"map"` + + // Cancellation reason message description. + Message *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CancellationReason) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CancellationReason) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *CancellationReason) SetCode(v string) *CancellationReason { + s.Code = &v + return s +} + +// SetItem sets the Item field's value. +func (s *CancellationReason) SetItem(v map[string]*AttributeValue) *CancellationReason { + s.Item = v + return s +} + +// SetMessage sets the Message field's value. +func (s *CancellationReason) SetMessage(v string) *CancellationReason { + s.Message = &v + return s +} + +// Represents the amount of provisioned throughput capacity consumed on a table +// or an index. +type Capacity struct { + _ struct{} `type:"structure"` + + // The total number of capacity units consumed on a table or an index. + CapacityUnits *float64 `type:"double"` + + // The total number of read capacity units consumed on a table or an index. + ReadCapacityUnits *float64 `type:"double"` + + // The total number of write capacity units consumed on a table or an index. + WriteCapacityUnits *float64 `type:"double"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Capacity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Capacity) GoString() string { + return s.String() +} + +// SetCapacityUnits sets the CapacityUnits field's value. +func (s *Capacity) SetCapacityUnits(v float64) *Capacity { + s.CapacityUnits = &v + return s +} + +// SetReadCapacityUnits sets the ReadCapacityUnits field's value. +func (s *Capacity) SetReadCapacityUnits(v float64) *Capacity { + s.ReadCapacityUnits = &v + return s +} + +// SetWriteCapacityUnits sets the WriteCapacityUnits field's value. +func (s *Capacity) SetWriteCapacityUnits(v float64) *Capacity { + s.WriteCapacityUnits = &v + return s +} + +// Represents the selection criteria for a Query or Scan operation: +// +// - For a Query operation, Condition is used for specifying the KeyConditions +// to use when querying a table or an index. For KeyConditions, only the +// following comparison operators are supported: EQ | LE | LT | GE | GT | +// BEGINS_WITH | BETWEEN Condition is also used in a QueryFilter, which evaluates +// the query results and returns only the desired values. +// +// - For a Scan operation, Condition is used in a ScanFilter, which evaluates +// the scan results and returns only the desired values. +type Condition struct { + _ struct{} `type:"structure"` + + // One or more values to evaluate against the supplied attribute. The number + // of values in the list depends on the ComparisonOperator being used. + // + // For type Number, value comparisons are numeric. + // + // String value comparisons for greater than, equals, or less than are based + // on ASCII character code values. For example, a is greater than A, and a is + // greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters + // (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters). + // + // For Binary, DynamoDB treats each byte of the binary data as unsigned when + // it compares binary values. + AttributeValueList []*AttributeValue `type:"list"` + + // A comparator for evaluating attributes. For example, equals, greater than, + // less than, etc. + // + // The following comparison operators are available: + // + // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | + // BEGINS_WITH | IN | BETWEEN + // + // The following are descriptions of each comparison operator. + // + // * EQ : Equal. EQ is supported for all data types, including lists and + // maps. AttributeValueList can contain only one AttributeValue element of + // type String, Number, Binary, String Set, Number Set, or Binary Set. If + // an item contains an AttributeValue element of a different type than the + // one provided in the request, the value does not match. For example, {"S":"6"} + // does not equal {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", + // "1"]}. + // + // * NE : Not equal. NE is supported for all data types, including lists + // and maps. AttributeValueList can contain only one AttributeValue of type + // String, Number, Binary, String Set, Number Set, or Binary Set. If an item + // contains an AttributeValue of a different type than the one provided in + // the request, the value does not match. For example, {"S":"6"} does not + // equal {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. + // + // * LE : Less than or equal. AttributeValueList can contain only one AttributeValue + // element of type String, Number, or Binary (not a set type). If an item + // contains an AttributeValue element of a different type than the one provided + // in the request, the value does not match. For example, {"S":"6"} does + // not equal {"N":"6"}. Also, {"N":"6"} does not compare to {"NS":["6", "2", + // "1"]}. + // + // * LT : Less than. AttributeValueList can contain only one AttributeValue + // of type String, Number, or Binary (not a set type). If an item contains + // an AttributeValue element of a different type than the one provided in + // the request, the value does not match. For example, {"S":"6"} does not + // equal {"N":"6"}. Also, {"N":"6"} does not compare to {"NS":["6", "2", + // "1"]}. + // + // * GE : Greater than or equal. AttributeValueList can contain only one + // AttributeValue element of type String, Number, or Binary (not a set type). + // If an item contains an AttributeValue element of a different type than + // the one provided in the request, the value does not match. For example, + // {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} does not compare to + // {"NS":["6", "2", "1"]}. + // + // * GT : Greater than. AttributeValueList can contain only one AttributeValue + // element of type String, Number, or Binary (not a set type). If an item + // contains an AttributeValue element of a different type than the one provided + // in the request, the value does not match. For example, {"S":"6"} does + // not equal {"N":"6"}. Also, {"N":"6"} does not compare to {"NS":["6", "2", + // "1"]}. + // + // * NOT_NULL : The attribute exists. NOT_NULL is supported for all data + // types, including lists and maps. This operator tests for the existence + // of an attribute, not its data type. If the data type of attribute "a" + // is null, and you evaluate it using NOT_NULL, the result is a Boolean true. + // This result is because the attribute "a" exists; its data type is not + // relevant to the NOT_NULL comparison operator. + // + // * NULL : The attribute does not exist. NULL is supported for all data + // types, including lists and maps. This operator tests for the nonexistence + // of an attribute, not its data type. If the data type of attribute "a" + // is null, and you evaluate it using NULL, the result is a Boolean false. + // This is because the attribute "a" exists; its data type is not relevant + // to the NULL comparison operator. + // + // * CONTAINS : Checks for a subsequence, or value in a set. AttributeValueList + // can contain only one AttributeValue element of type String, Number, or + // Binary (not a set type). If the target attribute of the comparison is + // of type String, then the operator checks for a substring match. If the + // target attribute of the comparison is of type Binary, then the operator + // looks for a subsequence of the target that matches the input. If the target + // attribute of the comparison is a set ("SS", "NS", or "BS"), then the operator + // evaluates to true if it finds an exact match with any member of the set. + // CONTAINS is supported for lists: When evaluating "a CONTAINS b", "a" can + // be a list; however, "b" cannot be a set, a map, or a list. + // + // * NOT_CONTAINS : Checks for absence of a subsequence, or absence of a + // value in a set. AttributeValueList can contain only one AttributeValue + // element of type String, Number, or Binary (not a set type). If the target + // attribute of the comparison is a String, then the operator checks for + // the absence of a substring match. If the target attribute of the comparison + // is Binary, then the operator checks for the absence of a subsequence of + // the target that matches the input. If the target attribute of the comparison + // is a set ("SS", "NS", or "BS"), then the operator evaluates to true if + // it does not find an exact match with any member of the set. NOT_CONTAINS + // is supported for lists: When evaluating "a NOT CONTAINS b", "a" can be + // a list; however, "b" cannot be a set, a map, or a list. + // + // * BEGINS_WITH : Checks for a prefix. AttributeValueList can contain only + // one AttributeValue of type String or Binary (not a Number or a set type). + // The target attribute of the comparison must be of type String or Binary + // (not a Number or a set type). + // + // * IN : Checks for matching elements in a list. AttributeValueList can + // contain one or more AttributeValue elements of type String, Number, or + // Binary. These attributes are compared against an existing attribute of + // an item. If any elements of the input are equal to the item attribute, + // the expression evaluates to true. + // + // * BETWEEN : Greater than or equal to the first value, and less than or + // equal to the second value. AttributeValueList must contain two AttributeValue + // elements of the same type, either String, Number, or Binary (not a set + // type). A target attribute matches if the target value is greater than, + // or equal to, the first element and less than, or equal to, the second + // element. If an item contains an AttributeValue element of a different + // type than the one provided in the request, the value does not match. For + // example, {"S":"6"} does not compare to {"N":"6"}. Also, {"N":"6"} does + // not compare to {"NS":["6", "2", "1"]} + // + // For usage examples of AttributeValueList and ComparisonOperator, see Legacy + // Conditional Parameters (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.html) + // in the Amazon DynamoDB Developer Guide. + // + // ComparisonOperator is a required field + ComparisonOperator *string `type:"string" required:"true" enum:"ComparisonOperator"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Condition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Condition) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Condition) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Condition"} + if s.ComparisonOperator == nil { + invalidParams.Add(request.NewErrParamRequired("ComparisonOperator")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributeValueList sets the AttributeValueList field's value. +func (s *Condition) SetAttributeValueList(v []*AttributeValue) *Condition { + s.AttributeValueList = v + return s +} + +// SetComparisonOperator sets the ComparisonOperator field's value. +func (s *Condition) SetComparisonOperator(v string) *Condition { + s.ComparisonOperator = &v + return s +} + +// Represents a request to perform a check that an item exists or to check the +// condition of specific attributes of the item. +type ConditionCheck struct { + _ struct{} `type:"structure"` + + // A condition that must be satisfied in order for a conditional update to succeed. + // For more information, see Condition expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.ConditionExpressions.html) + // in the Amazon DynamoDB Developer Guide. + // + // ConditionExpression is a required field + ConditionExpression *string `type:"string" required:"true"` + + // One or more substitution tokens for attribute names in an expression. For + // more information, see Expression attribute names (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.ExpressionAttributeNames.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeNames map[string]*string `type:"map"` + + // One or more values that can be substituted in an expression. For more information, + // see Condition expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.ConditionExpressions.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeValues map[string]*AttributeValue `type:"map"` + + // The primary key of the item to be checked. Each element consists of an attribute + // name and a value for that attribute. + // + // Key is a required field + Key map[string]*AttributeValue `type:"map" required:"true"` + + // Use ReturnValuesOnConditionCheckFailure to get the item attributes if the + // ConditionCheck condition fails. For ReturnValuesOnConditionCheckFailure, + // the valid values are: NONE and ALL_OLD. + ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"` + + // Name of the table for the check item request. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ConditionCheck) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ConditionCheck) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ConditionCheck) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ConditionCheck"} + if s.ConditionExpression == nil { + invalidParams.Add(request.NewErrParamRequired("ConditionExpression")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConditionExpression sets the ConditionExpression field's value. +func (s *ConditionCheck) SetConditionExpression(v string) *ConditionCheck { + s.ConditionExpression = &v + return s +} + +// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. +func (s *ConditionCheck) SetExpressionAttributeNames(v map[string]*string) *ConditionCheck { + s.ExpressionAttributeNames = v + return s +} + +// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value. +func (s *ConditionCheck) SetExpressionAttributeValues(v map[string]*AttributeValue) *ConditionCheck { + s.ExpressionAttributeValues = v + return s +} + +// SetKey sets the Key field's value. +func (s *ConditionCheck) SetKey(v map[string]*AttributeValue) *ConditionCheck { + s.Key = v + return s +} + +// SetReturnValuesOnConditionCheckFailure sets the ReturnValuesOnConditionCheckFailure field's value. +func (s *ConditionCheck) SetReturnValuesOnConditionCheckFailure(v string) *ConditionCheck { + s.ReturnValuesOnConditionCheckFailure = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *ConditionCheck) SetTableName(v string) *ConditionCheck { + s.TableName = &v + return s +} + +// A condition specified in the operation could not be evaluated. +type ConditionalCheckFailedException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // The conditional request failed. + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ConditionalCheckFailedException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ConditionalCheckFailedException) GoString() string { + return s.String() +} + +func newErrorConditionalCheckFailedException(v protocol.ResponseMetadata) error { + return &ConditionalCheckFailedException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ConditionalCheckFailedException) Code() string { + return "ConditionalCheckFailedException" +} + +// Message returns the exception's message. +func (s *ConditionalCheckFailedException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ConditionalCheckFailedException) OrigErr() error { + return nil +} + +func (s *ConditionalCheckFailedException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ConditionalCheckFailedException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ConditionalCheckFailedException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The capacity units consumed by an operation. The data returned includes the +// total provisioned throughput consumed, along with statistics for the table +// and any indexes involved in the operation. ConsumedCapacity is only returned +// if the request asked for it. For more information, see Provisioned Throughput +// (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) +// in the Amazon DynamoDB Developer Guide. +type ConsumedCapacity struct { + _ struct{} `type:"structure"` + + // The total number of capacity units consumed by the operation. + CapacityUnits *float64 `type:"double"` + + // The amount of throughput consumed on each global index affected by the operation. + GlobalSecondaryIndexes map[string]*Capacity `type:"map"` + + // The amount of throughput consumed on each local index affected by the operation. + LocalSecondaryIndexes map[string]*Capacity `type:"map"` + + // The total number of read capacity units consumed by the operation. + ReadCapacityUnits *float64 `type:"double"` + + // The amount of throughput consumed on the table affected by the operation. + Table *Capacity `type:"structure"` + + // The name of the table that was affected by the operation. + TableName *string `min:"3" type:"string"` + + // The total number of write capacity units consumed by the operation. + WriteCapacityUnits *float64 `type:"double"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ConsumedCapacity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ConsumedCapacity) GoString() string { + return s.String() +} + +// SetCapacityUnits sets the CapacityUnits field's value. +func (s *ConsumedCapacity) SetCapacityUnits(v float64) *ConsumedCapacity { + s.CapacityUnits = &v + return s +} + +// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value. +func (s *ConsumedCapacity) SetGlobalSecondaryIndexes(v map[string]*Capacity) *ConsumedCapacity { + s.GlobalSecondaryIndexes = v + return s +} + +// SetLocalSecondaryIndexes sets the LocalSecondaryIndexes field's value. +func (s *ConsumedCapacity) SetLocalSecondaryIndexes(v map[string]*Capacity) *ConsumedCapacity { + s.LocalSecondaryIndexes = v + return s +} + +// SetReadCapacityUnits sets the ReadCapacityUnits field's value. +func (s *ConsumedCapacity) SetReadCapacityUnits(v float64) *ConsumedCapacity { + s.ReadCapacityUnits = &v + return s +} + +// SetTable sets the Table field's value. +func (s *ConsumedCapacity) SetTable(v *Capacity) *ConsumedCapacity { + s.Table = v + return s +} + +// SetTableName sets the TableName field's value. +func (s *ConsumedCapacity) SetTableName(v string) *ConsumedCapacity { + s.TableName = &v + return s +} + +// SetWriteCapacityUnits sets the WriteCapacityUnits field's value. +func (s *ConsumedCapacity) SetWriteCapacityUnits(v float64) *ConsumedCapacity { + s.WriteCapacityUnits = &v + return s +} + +// Represents the continuous backups and point in time recovery settings on +// the table. +type ContinuousBackupsDescription struct { + _ struct{} `type:"structure"` + + // ContinuousBackupsStatus can be one of the following states: ENABLED, DISABLED + // + // ContinuousBackupsStatus is a required field + ContinuousBackupsStatus *string `type:"string" required:"true" enum:"ContinuousBackupsStatus"` + + // The description of the point in time recovery settings applied to the table. + PointInTimeRecoveryDescription *PointInTimeRecoveryDescription `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ContinuousBackupsDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ContinuousBackupsDescription) GoString() string { + return s.String() +} + +// SetContinuousBackupsStatus sets the ContinuousBackupsStatus field's value. +func (s *ContinuousBackupsDescription) SetContinuousBackupsStatus(v string) *ContinuousBackupsDescription { + s.ContinuousBackupsStatus = &v + return s +} + +// SetPointInTimeRecoveryDescription sets the PointInTimeRecoveryDescription field's value. +func (s *ContinuousBackupsDescription) SetPointInTimeRecoveryDescription(v *PointInTimeRecoveryDescription) *ContinuousBackupsDescription { + s.PointInTimeRecoveryDescription = v + return s +} + +// Backups have not yet been enabled for this table. +type ContinuousBackupsUnavailableException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ContinuousBackupsUnavailableException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ContinuousBackupsUnavailableException) GoString() string { + return s.String() +} + +func newErrorContinuousBackupsUnavailableException(v protocol.ResponseMetadata) error { + return &ContinuousBackupsUnavailableException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ContinuousBackupsUnavailableException) Code() string { + return "ContinuousBackupsUnavailableException" +} + +// Message returns the exception's message. +func (s *ContinuousBackupsUnavailableException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ContinuousBackupsUnavailableException) OrigErr() error { + return nil +} + +func (s *ContinuousBackupsUnavailableException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ContinuousBackupsUnavailableException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ContinuousBackupsUnavailableException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Represents a Contributor Insights summary entry. +type ContributorInsightsSummary struct { + _ struct{} `type:"structure"` + + // Describes the current status for contributor insights for the given table + // and index, if applicable. + ContributorInsightsStatus *string `type:"string" enum:"ContributorInsightsStatus"` + + // Name of the index associated with the summary, if any. + IndexName *string `min:"3" type:"string"` + + // Name of the table associated with the summary. + TableName *string `min:"3" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ContributorInsightsSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ContributorInsightsSummary) GoString() string { + return s.String() +} + +// SetContributorInsightsStatus sets the ContributorInsightsStatus field's value. +func (s *ContributorInsightsSummary) SetContributorInsightsStatus(v string) *ContributorInsightsSummary { + s.ContributorInsightsStatus = &v + return s +} + +// SetIndexName sets the IndexName field's value. +func (s *ContributorInsightsSummary) SetIndexName(v string) *ContributorInsightsSummary { + s.IndexName = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *ContributorInsightsSummary) SetTableName(v string) *ContributorInsightsSummary { + s.TableName = &v + return s +} + +type CreateBackupInput struct { + _ struct{} `type:"structure"` + + // Specified name for the backup. + // + // BackupName is a required field + BackupName *string `min:"3" type:"string" required:"true"` + + // The name of the table. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateBackupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateBackupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateBackupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateBackupInput"} + if s.BackupName == nil { + invalidParams.Add(request.NewErrParamRequired("BackupName")) + } + if s.BackupName != nil && len(*s.BackupName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("BackupName", 3)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBackupName sets the BackupName field's value. +func (s *CreateBackupInput) SetBackupName(v string) *CreateBackupInput { + s.BackupName = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *CreateBackupInput) SetTableName(v string) *CreateBackupInput { + s.TableName = &v + return s +} + +type CreateBackupOutput struct { + _ struct{} `type:"structure"` + + // Contains the details of the backup created for the table. + BackupDetails *BackupDetails `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateBackupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateBackupOutput) GoString() string { + return s.String() +} + +// SetBackupDetails sets the BackupDetails field's value. +func (s *CreateBackupOutput) SetBackupDetails(v *BackupDetails) *CreateBackupOutput { + s.BackupDetails = v + return s +} + +// Represents a new global secondary index to be added to an existing table. +type CreateGlobalSecondaryIndexAction struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index to be created. + // + // IndexName is a required field + IndexName *string `min:"3" type:"string" required:"true"` + + // The key schema for the global secondary index. + // + // KeySchema is a required field + KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"` + + // Represents attributes that are copied (projected) from the table into an + // index. These are in addition to the primary key attributes and index key + // attributes, which are automatically projected. + // + // Projection is a required field + Projection *Projection `type:"structure" required:"true"` + + // Represents the provisioned throughput settings for the specified global secondary + // index. + // + // For current minimum and maximum provisioned throughput values, see Service, + // Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) + // in the Amazon DynamoDB Developer Guide. + ProvisionedThroughput *ProvisionedThroughput `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateGlobalSecondaryIndexAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateGlobalSecondaryIndexAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateGlobalSecondaryIndexAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateGlobalSecondaryIndexAction"} + if s.IndexName == nil { + invalidParams.Add(request.NewErrParamRequired("IndexName")) + } + if s.IndexName != nil && len(*s.IndexName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) + } + if s.KeySchema == nil { + invalidParams.Add(request.NewErrParamRequired("KeySchema")) + } + if s.KeySchema != nil && len(s.KeySchema) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeySchema", 1)) + } + if s.Projection == nil { + invalidParams.Add(request.NewErrParamRequired("Projection")) + } + if s.KeySchema != nil { + for i, v := range s.KeySchema { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "KeySchema", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Projection != nil { + if err := s.Projection.Validate(); err != nil { + invalidParams.AddNested("Projection", err.(request.ErrInvalidParams)) + } + } + if s.ProvisionedThroughput != nil { + if err := s.ProvisionedThroughput.Validate(); err != nil { + invalidParams.AddNested("ProvisionedThroughput", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIndexName sets the IndexName field's value. +func (s *CreateGlobalSecondaryIndexAction) SetIndexName(v string) *CreateGlobalSecondaryIndexAction { + s.IndexName = &v + return s +} + +// SetKeySchema sets the KeySchema field's value. +func (s *CreateGlobalSecondaryIndexAction) SetKeySchema(v []*KeySchemaElement) *CreateGlobalSecondaryIndexAction { + s.KeySchema = v + return s +} + +// SetProjection sets the Projection field's value. +func (s *CreateGlobalSecondaryIndexAction) SetProjection(v *Projection) *CreateGlobalSecondaryIndexAction { + s.Projection = v + return s +} + +// SetProvisionedThroughput sets the ProvisionedThroughput field's value. +func (s *CreateGlobalSecondaryIndexAction) SetProvisionedThroughput(v *ProvisionedThroughput) *CreateGlobalSecondaryIndexAction { + s.ProvisionedThroughput = v + return s +} + +type CreateGlobalTableInput struct { + _ struct{} `type:"structure"` + + // The global table name. + // + // GlobalTableName is a required field + GlobalTableName *string `min:"3" type:"string" required:"true"` + + // The Regions where the global table needs to be created. + // + // ReplicationGroup is a required field + ReplicationGroup []*Replica `type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateGlobalTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateGlobalTableInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateGlobalTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateGlobalTableInput"} + if s.GlobalTableName == nil { + invalidParams.Add(request.NewErrParamRequired("GlobalTableName")) + } + if s.GlobalTableName != nil && len(*s.GlobalTableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("GlobalTableName", 3)) + } + if s.ReplicationGroup == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationGroup")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGlobalTableName sets the GlobalTableName field's value. +func (s *CreateGlobalTableInput) SetGlobalTableName(v string) *CreateGlobalTableInput { + s.GlobalTableName = &v + return s +} + +// SetReplicationGroup sets the ReplicationGroup field's value. +func (s *CreateGlobalTableInput) SetReplicationGroup(v []*Replica) *CreateGlobalTableInput { + s.ReplicationGroup = v + return s +} + +type CreateGlobalTableOutput struct { + _ struct{} `type:"structure"` + + // Contains the details of the global table. + GlobalTableDescription *GlobalTableDescription `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateGlobalTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateGlobalTableOutput) GoString() string { + return s.String() +} + +// SetGlobalTableDescription sets the GlobalTableDescription field's value. +func (s *CreateGlobalTableOutput) SetGlobalTableDescription(v *GlobalTableDescription) *CreateGlobalTableOutput { + s.GlobalTableDescription = v + return s +} + +// Represents a replica to be added. +type CreateReplicaAction struct { + _ struct{} `type:"structure"` + + // The Region of the replica to be added. + // + // RegionName is a required field + RegionName *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateReplicaAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateReplicaAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateReplicaAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateReplicaAction"} + if s.RegionName == nil { + invalidParams.Add(request.NewErrParamRequired("RegionName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRegionName sets the RegionName field's value. +func (s *CreateReplicaAction) SetRegionName(v string) *CreateReplicaAction { + s.RegionName = &v + return s +} + +// Represents a replica to be created. +type CreateReplicationGroupMemberAction struct { + _ struct{} `type:"structure"` + + // Replica-specific global secondary index settings. + GlobalSecondaryIndexes []*ReplicaGlobalSecondaryIndex `min:"1" type:"list"` + + // The KMS key that should be used for KMS encryption in the new replica. To + // specify a key, use its key ID, Amazon Resource Name (ARN), alias name, or + // alias ARN. Note that you should only provide this parameter if the key is + // different from the default DynamoDB KMS key alias/aws/dynamodb. + KMSMasterKeyId *string `type:"string"` + + // Replica-specific provisioned throughput. If not specified, uses the source + // table's provisioned throughput settings. + ProvisionedThroughputOverride *ProvisionedThroughputOverride `type:"structure"` + + // The Region where the new replica will be created. + // + // RegionName is a required field + RegionName *string `type:"string" required:"true"` + + // Replica-specific table class. If not specified, uses the source table's table + // class. + TableClassOverride *string `type:"string" enum:"TableClass"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateReplicationGroupMemberAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateReplicationGroupMemberAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateReplicationGroupMemberAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateReplicationGroupMemberAction"} + if s.GlobalSecondaryIndexes != nil && len(s.GlobalSecondaryIndexes) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GlobalSecondaryIndexes", 1)) + } + if s.RegionName == nil { + invalidParams.Add(request.NewErrParamRequired("RegionName")) + } + if s.GlobalSecondaryIndexes != nil { + for i, v := range s.GlobalSecondaryIndexes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalSecondaryIndexes", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ProvisionedThroughputOverride != nil { + if err := s.ProvisionedThroughputOverride.Validate(); err != nil { + invalidParams.AddNested("ProvisionedThroughputOverride", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value. +func (s *CreateReplicationGroupMemberAction) SetGlobalSecondaryIndexes(v []*ReplicaGlobalSecondaryIndex) *CreateReplicationGroupMemberAction { + s.GlobalSecondaryIndexes = v + return s +} + +// SetKMSMasterKeyId sets the KMSMasterKeyId field's value. +func (s *CreateReplicationGroupMemberAction) SetKMSMasterKeyId(v string) *CreateReplicationGroupMemberAction { + s.KMSMasterKeyId = &v + return s +} + +// SetProvisionedThroughputOverride sets the ProvisionedThroughputOverride field's value. +func (s *CreateReplicationGroupMemberAction) SetProvisionedThroughputOverride(v *ProvisionedThroughputOverride) *CreateReplicationGroupMemberAction { + s.ProvisionedThroughputOverride = v + return s +} + +// SetRegionName sets the RegionName field's value. +func (s *CreateReplicationGroupMemberAction) SetRegionName(v string) *CreateReplicationGroupMemberAction { + s.RegionName = &v + return s +} + +// SetTableClassOverride sets the TableClassOverride field's value. +func (s *CreateReplicationGroupMemberAction) SetTableClassOverride(v string) *CreateReplicationGroupMemberAction { + s.TableClassOverride = &v + return s +} + +// Represents the input of a CreateTable operation. +type CreateTableInput struct { + _ struct{} `type:"structure"` + + // An array of attributes that describe the key schema for the table and indexes. + // + // AttributeDefinitions is a required field + AttributeDefinitions []*AttributeDefinition `type:"list" required:"true"` + + // Controls how you are charged for read and write throughput and how you manage + // capacity. This setting can be changed later. + // + // * PROVISIONED - We recommend using PROVISIONED for predictable workloads. + // PROVISIONED sets the billing mode to Provisioned Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual). + // + // * PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable + // workloads. PAY_PER_REQUEST sets the billing mode to On-Demand Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.OnDemand). + BillingMode *string `type:"string" enum:"BillingMode"` + + // Indicates whether deletion protection is to be enabled (true) or disabled + // (false) on the table. + DeletionProtectionEnabled *bool `type:"boolean"` + + // One or more global secondary indexes (the maximum is 20) to be created on + // the table. Each global secondary index in the array includes the following: + // + // * IndexName - The name of the global secondary index. Must be unique only + // for this table. + // + // * KeySchema - Specifies the key schema for the global secondary index. + // + // * Projection - Specifies attributes that are copied (projected) from the + // table into the index. These are in addition to the primary key attributes + // and index key attributes, which are automatically projected. Each attribute + // specification is composed of: ProjectionType - One of the following: KEYS_ONLY + // - Only the index and primary keys are projected into the index. INCLUDE + // - Only the specified table attributes are projected into the index. The + // list of projected attributes is in NonKeyAttributes. ALL - All of the + // table attributes are projected into the index. NonKeyAttributes - A list + // of one or more non-key attribute names that are projected into the secondary + // index. The total count of attributes provided in NonKeyAttributes, summed + // across all of the secondary indexes, must not exceed 100. If you project + // the same attribute into two different indexes, this counts as two distinct + // attributes when determining the total. + // + // * ProvisionedThroughput - The provisioned throughput settings for the + // global secondary index, consisting of read and write capacity units. + GlobalSecondaryIndexes []*GlobalSecondaryIndex `type:"list"` + + // Specifies the attributes that make up the primary key for a table or an index. + // The attributes in KeySchema must also be defined in the AttributeDefinitions + // array. For more information, see Data Model (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html) + // in the Amazon DynamoDB Developer Guide. + // + // Each KeySchemaElement in the array is composed of: + // + // * AttributeName - The name of this key attribute. + // + // * KeyType - The role that the key attribute will assume: HASH - partition + // key RANGE - sort key + // + // The partition key of an item is also known as its hash attribute. The term + // "hash attribute" derives from the DynamoDB usage of an internal hash function + // to evenly distribute data items across partitions, based on their partition + // key values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. + // + // For a simple primary key (partition key), you must provide exactly one element + // with a KeyType of HASH. + // + // For a composite primary key (partition key and sort key), you must provide + // exactly two elements, in this order: The first element must have a KeyType + // of HASH, and the second element must have a KeyType of RANGE. + // + // For more information, see Working with Tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#WorkingWithTables.primary.key) + // in the Amazon DynamoDB Developer Guide. + // + // KeySchema is a required field + KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"` + + // One or more local secondary indexes (the maximum is 5) to be created on the + // table. Each index is scoped to a given partition key value. There is a 10 + // GB size limit per partition key value; otherwise, the size of a local secondary + // index is unconstrained. + // + // Each local secondary index in the array includes the following: + // + // * IndexName - The name of the local secondary index. Must be unique only + // for this table. + // + // * KeySchema - Specifies the key schema for the local secondary index. + // The key schema must begin with the same partition key as the table. + // + // * Projection - Specifies attributes that are copied (projected) from the + // table into the index. These are in addition to the primary key attributes + // and index key attributes, which are automatically projected. Each attribute + // specification is composed of: ProjectionType - One of the following: KEYS_ONLY + // - Only the index and primary keys are projected into the index. INCLUDE + // - Only the specified table attributes are projected into the index. The + // list of projected attributes is in NonKeyAttributes. ALL - All of the + // table attributes are projected into the index. NonKeyAttributes - A list + // of one or more non-key attribute names that are projected into the secondary + // index. The total count of attributes provided in NonKeyAttributes, summed + // across all of the secondary indexes, must not exceed 100. If you project + // the same attribute into two different indexes, this counts as two distinct + // attributes when determining the total. + LocalSecondaryIndexes []*LocalSecondaryIndex `type:"list"` + + // Represents the provisioned throughput settings for a specified table or index. + // The settings can be modified using the UpdateTable operation. + // + // If you set BillingMode as PROVISIONED, you must specify this property. If + // you set BillingMode as PAY_PER_REQUEST, you cannot specify this property. + // + // For current minimum and maximum provisioned throughput values, see Service, + // Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) + // in the Amazon DynamoDB Developer Guide. + ProvisionedThroughput *ProvisionedThroughput `type:"structure"` + + // Represents the settings used to enable server-side encryption. + SSESpecification *SSESpecification `type:"structure"` + + // The settings for DynamoDB Streams on the table. These settings consist of: + // + // * StreamEnabled - Indicates whether DynamoDB Streams is to be enabled + // (true) or disabled (false). + // + // * StreamViewType - When an item in the table is modified, StreamViewType + // determines what information is written to the table's stream. Valid values + // for StreamViewType are: KEYS_ONLY - Only the key attributes of the modified + // item are written to the stream. NEW_IMAGE - The entire item, as it appears + // after it was modified, is written to the stream. OLD_IMAGE - The entire + // item, as it appeared before it was modified, is written to the stream. + // NEW_AND_OLD_IMAGES - Both the new and the old item images of the item + // are written to the stream. + StreamSpecification *StreamSpecification `type:"structure"` + + // The table class of the new table. Valid values are STANDARD and STANDARD_INFREQUENT_ACCESS. + TableClass *string `type:"string" enum:"TableClass"` + + // The name of the table to create. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` + + // A list of key-value pairs to label the table. For more information, see Tagging + // for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html). + Tags []*Tag `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTableInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTableInput"} + if s.AttributeDefinitions == nil { + invalidParams.Add(request.NewErrParamRequired("AttributeDefinitions")) + } + if s.KeySchema == nil { + invalidParams.Add(request.NewErrParamRequired("KeySchema")) + } + if s.KeySchema != nil && len(s.KeySchema) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeySchema", 1)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + if s.AttributeDefinitions != nil { + for i, v := range s.AttributeDefinitions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AttributeDefinitions", i), err.(request.ErrInvalidParams)) + } + } + } + if s.GlobalSecondaryIndexes != nil { + for i, v := range s.GlobalSecondaryIndexes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalSecondaryIndexes", i), err.(request.ErrInvalidParams)) + } + } + } + if s.KeySchema != nil { + for i, v := range s.KeySchema { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "KeySchema", i), err.(request.ErrInvalidParams)) + } + } + } + if s.LocalSecondaryIndexes != nil { + for i, v := range s.LocalSecondaryIndexes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LocalSecondaryIndexes", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ProvisionedThroughput != nil { + if err := s.ProvisionedThroughput.Validate(); err != nil { + invalidParams.AddNested("ProvisionedThroughput", err.(request.ErrInvalidParams)) + } + } + if s.StreamSpecification != nil { + if err := s.StreamSpecification.Validate(); err != nil { + invalidParams.AddNested("StreamSpecification", err.(request.ErrInvalidParams)) + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributeDefinitions sets the AttributeDefinitions field's value. +func (s *CreateTableInput) SetAttributeDefinitions(v []*AttributeDefinition) *CreateTableInput { + s.AttributeDefinitions = v + return s +} + +// SetBillingMode sets the BillingMode field's value. +func (s *CreateTableInput) SetBillingMode(v string) *CreateTableInput { + s.BillingMode = &v + return s +} + +// SetDeletionProtectionEnabled sets the DeletionProtectionEnabled field's value. +func (s *CreateTableInput) SetDeletionProtectionEnabled(v bool) *CreateTableInput { + s.DeletionProtectionEnabled = &v + return s +} + +// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value. +func (s *CreateTableInput) SetGlobalSecondaryIndexes(v []*GlobalSecondaryIndex) *CreateTableInput { + s.GlobalSecondaryIndexes = v + return s +} + +// SetKeySchema sets the KeySchema field's value. +func (s *CreateTableInput) SetKeySchema(v []*KeySchemaElement) *CreateTableInput { + s.KeySchema = v + return s +} + +// SetLocalSecondaryIndexes sets the LocalSecondaryIndexes field's value. +func (s *CreateTableInput) SetLocalSecondaryIndexes(v []*LocalSecondaryIndex) *CreateTableInput { + s.LocalSecondaryIndexes = v + return s +} + +// SetProvisionedThroughput sets the ProvisionedThroughput field's value. +func (s *CreateTableInput) SetProvisionedThroughput(v *ProvisionedThroughput) *CreateTableInput { + s.ProvisionedThroughput = v + return s +} + +// SetSSESpecification sets the SSESpecification field's value. +func (s *CreateTableInput) SetSSESpecification(v *SSESpecification) *CreateTableInput { + s.SSESpecification = v + return s +} + +// SetStreamSpecification sets the StreamSpecification field's value. +func (s *CreateTableInput) SetStreamSpecification(v *StreamSpecification) *CreateTableInput { + s.StreamSpecification = v + return s +} + +// SetTableClass sets the TableClass field's value. +func (s *CreateTableInput) SetTableClass(v string) *CreateTableInput { + s.TableClass = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *CreateTableInput) SetTableName(v string) *CreateTableInput { + s.TableName = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateTableInput) SetTags(v []*Tag) *CreateTableInput { + s.Tags = v + return s +} + +// Represents the output of a CreateTable operation. +type CreateTableOutput struct { + _ struct{} `type:"structure"` + + // Represents the properties of the table. + TableDescription *TableDescription `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTableOutput) GoString() string { + return s.String() +} + +// SetTableDescription sets the TableDescription field's value. +func (s *CreateTableOutput) SetTableDescription(v *TableDescription) *CreateTableOutput { + s.TableDescription = v + return s +} + +// Processing options for the CSV file being imported. +type CsvOptions struct { + _ struct{} `type:"structure"` + + // The delimiter used for separating items in the CSV file being imported. + Delimiter *string `min:"1" type:"string"` + + // List of the headers used to specify a common header for all source CSV files + // being imported. If this field is specified then the first line of each CSV + // file is treated as data instead of the header. If this field is not specified + // the the first line of each CSV file is treated as the header. + HeaderList []*string `min:"1" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CsvOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CsvOptions) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CsvOptions) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CsvOptions"} + if s.Delimiter != nil && len(*s.Delimiter) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Delimiter", 1)) + } + if s.HeaderList != nil && len(s.HeaderList) < 1 { + invalidParams.Add(request.NewErrParamMinLen("HeaderList", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDelimiter sets the Delimiter field's value. +func (s *CsvOptions) SetDelimiter(v string) *CsvOptions { + s.Delimiter = &v + return s +} + +// SetHeaderList sets the HeaderList field's value. +func (s *CsvOptions) SetHeaderList(v []*string) *CsvOptions { + s.HeaderList = v + return s +} + +// Represents a request to perform a DeleteItem operation. +type Delete struct { + _ struct{} `type:"structure"` + + // A condition that must be satisfied in order for a conditional delete to succeed. + ConditionExpression *string `type:"string"` + + // One or more substitution tokens for attribute names in an expression. + ExpressionAttributeNames map[string]*string `type:"map"` + + // One or more values that can be substituted in an expression. + ExpressionAttributeValues map[string]*AttributeValue `type:"map"` + + // The primary key of the item to be deleted. Each element consists of an attribute + // name and a value for that attribute. + // + // Key is a required field + Key map[string]*AttributeValue `type:"map" required:"true"` + + // Use ReturnValuesOnConditionCheckFailure to get the item attributes if the + // Delete condition fails. For ReturnValuesOnConditionCheckFailure, the valid + // values are: NONE and ALL_OLD. + ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"` + + // Name of the table in which the item to be deleted resides. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Delete) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Delete) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Delete) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Delete"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConditionExpression sets the ConditionExpression field's value. +func (s *Delete) SetConditionExpression(v string) *Delete { + s.ConditionExpression = &v + return s +} + +// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. +func (s *Delete) SetExpressionAttributeNames(v map[string]*string) *Delete { + s.ExpressionAttributeNames = v + return s +} + +// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value. +func (s *Delete) SetExpressionAttributeValues(v map[string]*AttributeValue) *Delete { + s.ExpressionAttributeValues = v + return s +} + +// SetKey sets the Key field's value. +func (s *Delete) SetKey(v map[string]*AttributeValue) *Delete { + s.Key = v + return s +} + +// SetReturnValuesOnConditionCheckFailure sets the ReturnValuesOnConditionCheckFailure field's value. +func (s *Delete) SetReturnValuesOnConditionCheckFailure(v string) *Delete { + s.ReturnValuesOnConditionCheckFailure = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *Delete) SetTableName(v string) *Delete { + s.TableName = &v + return s +} + +type DeleteBackupInput struct { + _ struct{} `type:"structure"` + + // The ARN associated with the backup. + // + // BackupArn is a required field + BackupArn *string `min:"37" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBackupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBackupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBackupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBackupInput"} + if s.BackupArn == nil { + invalidParams.Add(request.NewErrParamRequired("BackupArn")) + } + if s.BackupArn != nil && len(*s.BackupArn) < 37 { + invalidParams.Add(request.NewErrParamMinLen("BackupArn", 37)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBackupArn sets the BackupArn field's value. +func (s *DeleteBackupInput) SetBackupArn(v string) *DeleteBackupInput { + s.BackupArn = &v + return s +} + +type DeleteBackupOutput struct { + _ struct{} `type:"structure"` + + // Contains the description of the backup created for the table. + BackupDescription *BackupDescription `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBackupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBackupOutput) GoString() string { + return s.String() +} + +// SetBackupDescription sets the BackupDescription field's value. +func (s *DeleteBackupOutput) SetBackupDescription(v *BackupDescription) *DeleteBackupOutput { + s.BackupDescription = v + return s +} + +// Represents a global secondary index to be deleted from an existing table. +type DeleteGlobalSecondaryIndexAction struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index to be deleted. + // + // IndexName is a required field + IndexName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteGlobalSecondaryIndexAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteGlobalSecondaryIndexAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteGlobalSecondaryIndexAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteGlobalSecondaryIndexAction"} + if s.IndexName == nil { + invalidParams.Add(request.NewErrParamRequired("IndexName")) + } + if s.IndexName != nil && len(*s.IndexName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIndexName sets the IndexName field's value. +func (s *DeleteGlobalSecondaryIndexAction) SetIndexName(v string) *DeleteGlobalSecondaryIndexAction { + s.IndexName = &v + return s +} + +// Represents the input of a DeleteItem operation. +type DeleteItemInput struct { + _ struct{} `type:"structure"` + + // A condition that must be satisfied in order for a conditional DeleteItem + // to succeed. + // + // An expression can contain any of the following: + // + // * Functions: attribute_exists | attribute_not_exists | attribute_type + // | contains | begins_with | size These function names are case-sensitive. + // + // * Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN + // + // * Logical operators: AND | OR | NOT + // + // For more information about condition expressions, see Condition Expressions + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + ConditionExpression *string `type:"string"` + + // This is a legacy parameter. Use ConditionExpression instead. For more information, + // see ConditionalOperator (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html) + // in the Amazon DynamoDB Developer Guide. + ConditionalOperator *string `type:"string" enum:"ConditionalOperator"` + + // This is a legacy parameter. Use ConditionExpression instead. For more information, + // see Expected (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html) + // in the Amazon DynamoDB Developer Guide. + Expected map[string]*ExpectedAttributeValue `type:"map"` + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames: + // + // * To access an attribute whose name conflicts with a DynamoDB reserved + // word. + // + // * To create a placeholder for repeating occurrences of an attribute name + // in an expression. + // + // * To prevent special characters in an attribute name from being misinterpreted + // in an expression. + // + // Use the # character in an expression to dereference an attribute name. For + // example, consider the following attribute name: + // + // * Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot be + // used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide). To work around this, you could specify + // the following for ExpressionAttributeNames: + // + // * {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // * #P = :val + // + // Tokens that begin with the : character are expression attribute values, which + // are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see Specifying Item Attributes + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeNames map[string]*string `type:"map"` + + // One or more values that can be substituted in an expression. + // + // Use the : (colon) character in an expression to dereference an attribute + // value. For example, suppose that you wanted to check whether the value of + // the ProductStatus attribute was one of the following: + // + // Available | Backordered | Discontinued + // + // You would first need to specify ExpressionAttributeValues as follows: + // + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} + // } + // + // You could then use these values in an expression, such as this: + // + // ProductStatus IN (:avail, :back, :disc) + // + // For more information on expression attribute values, see Condition Expressions + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeValues map[string]*AttributeValue `type:"map"` + + // A map of attribute names to AttributeValue objects, representing the primary + // key of the item to delete. + // + // For the primary key, you must provide all of the key attributes. For example, + // with a simple primary key, you only need to provide a value for the partition + // key. For a composite primary key, you must provide values for both the partition + // key and the sort key. + // + // Key is a required field + Key map[string]*AttributeValue `type:"map" required:"true"` + + // Determines the level of detail about either provisioned or on-demand throughput + // consumption that is returned in the response: + // + // * INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary + // index that was accessed. Note that some operations, such as GetItem and + // BatchGetItem, do not access any indexes at all. In these cases, specifying + // INDEXES will only return ConsumedCapacity information for table(s). + // + // * TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. + // + // * NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // Determines whether item collection metrics are returned. If set to SIZE, + // the response includes statistics about item collections, if any, that were + // modified during the operation are returned in the response. If set to NONE + // (the default), no statistics are returned. + ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"` + + // Use ReturnValues if you want to get the item attributes as they appeared + // before they were deleted. For DeleteItem, the valid values are: + // + // * NONE - If ReturnValues is not specified, or if its value is NONE, then + // nothing is returned. (This setting is the default for ReturnValues.) + // + // * ALL_OLD - The content of the old item is returned. + // + // There is no additional cost associated with requesting a return value aside + // from the small network and processing overhead of receiving a larger response. + // No read capacity units are consumed. + // + // The ReturnValues parameter is used by several DynamoDB operations; however, + // DeleteItem does not recognize any values other than NONE or ALL_OLD. + ReturnValues *string `type:"string" enum:"ReturnValue"` + + // The name of the table from which to delete the item. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteItemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteItemInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteItemInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteItemInput"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConditionExpression sets the ConditionExpression field's value. +func (s *DeleteItemInput) SetConditionExpression(v string) *DeleteItemInput { + s.ConditionExpression = &v + return s +} + +// SetConditionalOperator sets the ConditionalOperator field's value. +func (s *DeleteItemInput) SetConditionalOperator(v string) *DeleteItemInput { + s.ConditionalOperator = &v + return s +} + +// SetExpected sets the Expected field's value. +func (s *DeleteItemInput) SetExpected(v map[string]*ExpectedAttributeValue) *DeleteItemInput { + s.Expected = v + return s +} + +// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. +func (s *DeleteItemInput) SetExpressionAttributeNames(v map[string]*string) *DeleteItemInput { + s.ExpressionAttributeNames = v + return s +} + +// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value. +func (s *DeleteItemInput) SetExpressionAttributeValues(v map[string]*AttributeValue) *DeleteItemInput { + s.ExpressionAttributeValues = v + return s +} + +// SetKey sets the Key field's value. +func (s *DeleteItemInput) SetKey(v map[string]*AttributeValue) *DeleteItemInput { + s.Key = v + return s +} + +// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. +func (s *DeleteItemInput) SetReturnConsumedCapacity(v string) *DeleteItemInput { + s.ReturnConsumedCapacity = &v + return s +} + +// SetReturnItemCollectionMetrics sets the ReturnItemCollectionMetrics field's value. +func (s *DeleteItemInput) SetReturnItemCollectionMetrics(v string) *DeleteItemInput { + s.ReturnItemCollectionMetrics = &v + return s +} + +// SetReturnValues sets the ReturnValues field's value. +func (s *DeleteItemInput) SetReturnValues(v string) *DeleteItemInput { + s.ReturnValues = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *DeleteItemInput) SetTableName(v string) *DeleteItemInput { + s.TableName = &v + return s +} + +// Represents the output of a DeleteItem operation. +type DeleteItemOutput struct { + _ struct{} `type:"structure"` + + // A map of attribute names to AttributeValue objects, representing the item + // as it appeared before the DeleteItem operation. This map appears in the response + // only if ReturnValues was specified as ALL_OLD in the request. + Attributes map[string]*AttributeValue `type:"map"` + + // The capacity units consumed by the DeleteItem operation. The data returned + // includes the total provisioned throughput consumed, along with statistics + // for the table and any indexes involved in the operation. ConsumedCapacity + // is only returned if the ReturnConsumedCapacity parameter was specified. For + // more information, see Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // in the Amazon DynamoDB Developer Guide. + ConsumedCapacity *ConsumedCapacity `type:"structure"` + + // Information about item collections, if any, that were affected by the DeleteItem + // operation. ItemCollectionMetrics is only returned if the ReturnItemCollectionMetrics + // parameter was specified. If the table does not have any local secondary indexes, + // this information is not returned in the response. + // + // Each ItemCollectionMetrics element consists of: + // + // * ItemCollectionKey - The partition key value of the item collection. + // This is the same as the partition key value of the item itself. + // + // * SizeEstimateRangeGB - An estimate of item collection size, in gigabytes. + // This value is a two-element array containing a lower bound and an upper + // bound for the estimate. The estimate includes the size of all the items + // in the table, plus the size of all attributes projected into all of the + // local secondary indexes on that table. Use this estimate to measure whether + // a local secondary index is approaching its size limit. The estimate is + // subject to change over time; therefore, do not rely on the precision or + // accuracy of the estimate. + ItemCollectionMetrics *ItemCollectionMetrics `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteItemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteItemOutput) GoString() string { + return s.String() +} + +// SetAttributes sets the Attributes field's value. +func (s *DeleteItemOutput) SetAttributes(v map[string]*AttributeValue) *DeleteItemOutput { + s.Attributes = v + return s +} + +// SetConsumedCapacity sets the ConsumedCapacity field's value. +func (s *DeleteItemOutput) SetConsumedCapacity(v *ConsumedCapacity) *DeleteItemOutput { + s.ConsumedCapacity = v + return s +} + +// SetItemCollectionMetrics sets the ItemCollectionMetrics field's value. +func (s *DeleteItemOutput) SetItemCollectionMetrics(v *ItemCollectionMetrics) *DeleteItemOutput { + s.ItemCollectionMetrics = v + return s +} + +// Represents a replica to be removed. +type DeleteReplicaAction struct { + _ struct{} `type:"structure"` + + // The Region of the replica to be removed. + // + // RegionName is a required field + RegionName *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteReplicaAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteReplicaAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteReplicaAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteReplicaAction"} + if s.RegionName == nil { + invalidParams.Add(request.NewErrParamRequired("RegionName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRegionName sets the RegionName field's value. +func (s *DeleteReplicaAction) SetRegionName(v string) *DeleteReplicaAction { + s.RegionName = &v + return s +} + +// Represents a replica to be deleted. +type DeleteReplicationGroupMemberAction struct { + _ struct{} `type:"structure"` + + // The Region where the replica exists. + // + // RegionName is a required field + RegionName *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteReplicationGroupMemberAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteReplicationGroupMemberAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteReplicationGroupMemberAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteReplicationGroupMemberAction"} + if s.RegionName == nil { + invalidParams.Add(request.NewErrParamRequired("RegionName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRegionName sets the RegionName field's value. +func (s *DeleteReplicationGroupMemberAction) SetRegionName(v string) *DeleteReplicationGroupMemberAction { + s.RegionName = &v + return s +} + +// Represents a request to perform a DeleteItem operation on an item. +type DeleteRequest struct { + _ struct{} `type:"structure"` + + // A map of attribute name to attribute values, representing the primary key + // of the item to delete. All of the table's primary key attributes must be + // specified, and their data types must match those of the table's key schema. + // + // Key is a required field + Key map[string]*AttributeValue `type:"map" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteRequest) GoString() string { + return s.String() +} + +// SetKey sets the Key field's value. +func (s *DeleteRequest) SetKey(v map[string]*AttributeValue) *DeleteRequest { + s.Key = v + return s +} + +// Represents the input of a DeleteTable operation. +type DeleteTableInput struct { + _ struct{} `type:"structure"` + + // The name of the table to delete. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteTableInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTableInput"} + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTableName sets the TableName field's value. +func (s *DeleteTableInput) SetTableName(v string) *DeleteTableInput { + s.TableName = &v + return s +} + +// Represents the output of a DeleteTable operation. +type DeleteTableOutput struct { + _ struct{} `type:"structure"` + + // Represents the properties of a table. + TableDescription *TableDescription `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteTableOutput) GoString() string { + return s.String() +} + +// SetTableDescription sets the TableDescription field's value. +func (s *DeleteTableOutput) SetTableDescription(v *TableDescription) *DeleteTableOutput { + s.TableDescription = v + return s +} + +type DescribeBackupInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) associated with the backup. + // + // BackupArn is a required field + BackupArn *string `min:"37" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeBackupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeBackupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeBackupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeBackupInput"} + if s.BackupArn == nil { + invalidParams.Add(request.NewErrParamRequired("BackupArn")) + } + if s.BackupArn != nil && len(*s.BackupArn) < 37 { + invalidParams.Add(request.NewErrParamMinLen("BackupArn", 37)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBackupArn sets the BackupArn field's value. +func (s *DescribeBackupInput) SetBackupArn(v string) *DescribeBackupInput { + s.BackupArn = &v + return s +} + +type DescribeBackupOutput struct { + _ struct{} `type:"structure"` + + // Contains the description of the backup created for the table. + BackupDescription *BackupDescription `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeBackupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeBackupOutput) GoString() string { + return s.String() +} + +// SetBackupDescription sets the BackupDescription field's value. +func (s *DescribeBackupOutput) SetBackupDescription(v *BackupDescription) *DescribeBackupOutput { + s.BackupDescription = v + return s +} + +type DescribeContinuousBackupsInput struct { + _ struct{} `type:"structure"` + + // Name of the table for which the customer wants to check the continuous backups + // and point in time recovery settings. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeContinuousBackupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeContinuousBackupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeContinuousBackupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeContinuousBackupsInput"} + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTableName sets the TableName field's value. +func (s *DescribeContinuousBackupsInput) SetTableName(v string) *DescribeContinuousBackupsInput { + s.TableName = &v + return s +} + +type DescribeContinuousBackupsOutput struct { + _ struct{} `type:"structure"` + + // Represents the continuous backups and point in time recovery settings on + // the table. + ContinuousBackupsDescription *ContinuousBackupsDescription `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeContinuousBackupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeContinuousBackupsOutput) GoString() string { + return s.String() +} + +// SetContinuousBackupsDescription sets the ContinuousBackupsDescription field's value. +func (s *DescribeContinuousBackupsOutput) SetContinuousBackupsDescription(v *ContinuousBackupsDescription) *DescribeContinuousBackupsOutput { + s.ContinuousBackupsDescription = v + return s +} + +type DescribeContributorInsightsInput struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index to describe, if applicable. + IndexName *string `min:"3" type:"string"` + + // The name of the table to describe. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeContributorInsightsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeContributorInsightsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeContributorInsightsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeContributorInsightsInput"} + if s.IndexName != nil && len(*s.IndexName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIndexName sets the IndexName field's value. +func (s *DescribeContributorInsightsInput) SetIndexName(v string) *DescribeContributorInsightsInput { + s.IndexName = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *DescribeContributorInsightsInput) SetTableName(v string) *DescribeContributorInsightsInput { + s.TableName = &v + return s +} + +type DescribeContributorInsightsOutput struct { + _ struct{} `type:"structure"` + + // List of names of the associated contributor insights rules. + ContributorInsightsRuleList []*string `type:"list"` + + // Current status of contributor insights. + ContributorInsightsStatus *string `type:"string" enum:"ContributorInsightsStatus"` + + // Returns information about the last failure that was encountered. + // + // The most common exceptions for a FAILED status are: + // + // * LimitExceededException - Per-account Amazon CloudWatch Contributor Insights + // rule limit reached. Please disable Contributor Insights for other tables/indexes + // OR disable Contributor Insights rules before retrying. + // + // * AccessDeniedException - Amazon CloudWatch Contributor Insights rules + // cannot be modified due to insufficient permissions. + // + // * AccessDeniedException - Failed to create service-linked role for Contributor + // Insights due to insufficient permissions. + // + // * InternalServerError - Failed to create Amazon CloudWatch Contributor + // Insights rules. Please retry request. + FailureException *FailureException `type:"structure"` + + // The name of the global secondary index being described. + IndexName *string `min:"3" type:"string"` + + // Timestamp of the last time the status was changed. + LastUpdateDateTime *time.Time `type:"timestamp"` + + // The name of the table being described. + TableName *string `min:"3" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeContributorInsightsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeContributorInsightsOutput) GoString() string { + return s.String() +} + +// SetContributorInsightsRuleList sets the ContributorInsightsRuleList field's value. +func (s *DescribeContributorInsightsOutput) SetContributorInsightsRuleList(v []*string) *DescribeContributorInsightsOutput { + s.ContributorInsightsRuleList = v + return s +} + +// SetContributorInsightsStatus sets the ContributorInsightsStatus field's value. +func (s *DescribeContributorInsightsOutput) SetContributorInsightsStatus(v string) *DescribeContributorInsightsOutput { + s.ContributorInsightsStatus = &v + return s +} + +// SetFailureException sets the FailureException field's value. +func (s *DescribeContributorInsightsOutput) SetFailureException(v *FailureException) *DescribeContributorInsightsOutput { + s.FailureException = v + return s +} + +// SetIndexName sets the IndexName field's value. +func (s *DescribeContributorInsightsOutput) SetIndexName(v string) *DescribeContributorInsightsOutput { + s.IndexName = &v + return s +} + +// SetLastUpdateDateTime sets the LastUpdateDateTime field's value. +func (s *DescribeContributorInsightsOutput) SetLastUpdateDateTime(v time.Time) *DescribeContributorInsightsOutput { + s.LastUpdateDateTime = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *DescribeContributorInsightsOutput) SetTableName(v string) *DescribeContributorInsightsOutput { + s.TableName = &v + return s +} + +type DescribeEndpointsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeEndpointsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeEndpointsInput) GoString() string { + return s.String() +} + +type DescribeEndpointsOutput struct { + _ struct{} `type:"structure"` + + // List of endpoints. + // + // Endpoints is a required field + Endpoints []*Endpoint `type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeEndpointsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeEndpointsOutput) GoString() string { + return s.String() +} + +// SetEndpoints sets the Endpoints field's value. +func (s *DescribeEndpointsOutput) SetEndpoints(v []*Endpoint) *DescribeEndpointsOutput { + s.Endpoints = v + return s +} + +type DescribeExportInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) associated with the export. + // + // ExportArn is a required field + ExportArn *string `min:"37" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeExportInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeExportInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeExportInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeExportInput"} + if s.ExportArn == nil { + invalidParams.Add(request.NewErrParamRequired("ExportArn")) + } + if s.ExportArn != nil && len(*s.ExportArn) < 37 { + invalidParams.Add(request.NewErrParamMinLen("ExportArn", 37)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetExportArn sets the ExportArn field's value. +func (s *DescribeExportInput) SetExportArn(v string) *DescribeExportInput { + s.ExportArn = &v + return s +} + +type DescribeExportOutput struct { + _ struct{} `type:"structure"` + + // Represents the properties of the export. + ExportDescription *ExportDescription `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeExportOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeExportOutput) GoString() string { + return s.String() +} + +// SetExportDescription sets the ExportDescription field's value. +func (s *DescribeExportOutput) SetExportDescription(v *ExportDescription) *DescribeExportOutput { + s.ExportDescription = v + return s +} + +type DescribeGlobalTableInput struct { + _ struct{} `type:"structure"` + + // The name of the global table. + // + // GlobalTableName is a required field + GlobalTableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeGlobalTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeGlobalTableInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeGlobalTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeGlobalTableInput"} + if s.GlobalTableName == nil { + invalidParams.Add(request.NewErrParamRequired("GlobalTableName")) + } + if s.GlobalTableName != nil && len(*s.GlobalTableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("GlobalTableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGlobalTableName sets the GlobalTableName field's value. +func (s *DescribeGlobalTableInput) SetGlobalTableName(v string) *DescribeGlobalTableInput { + s.GlobalTableName = &v + return s +} + +type DescribeGlobalTableOutput struct { + _ struct{} `type:"structure"` + + // Contains the details of the global table. + GlobalTableDescription *GlobalTableDescription `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeGlobalTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeGlobalTableOutput) GoString() string { + return s.String() +} + +// SetGlobalTableDescription sets the GlobalTableDescription field's value. +func (s *DescribeGlobalTableOutput) SetGlobalTableDescription(v *GlobalTableDescription) *DescribeGlobalTableOutput { + s.GlobalTableDescription = v + return s +} + +type DescribeGlobalTableSettingsInput struct { + _ struct{} `type:"structure"` + + // The name of the global table to describe. + // + // GlobalTableName is a required field + GlobalTableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeGlobalTableSettingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeGlobalTableSettingsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeGlobalTableSettingsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeGlobalTableSettingsInput"} + if s.GlobalTableName == nil { + invalidParams.Add(request.NewErrParamRequired("GlobalTableName")) + } + if s.GlobalTableName != nil && len(*s.GlobalTableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("GlobalTableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGlobalTableName sets the GlobalTableName field's value. +func (s *DescribeGlobalTableSettingsInput) SetGlobalTableName(v string) *DescribeGlobalTableSettingsInput { + s.GlobalTableName = &v + return s +} + +type DescribeGlobalTableSettingsOutput struct { + _ struct{} `type:"structure"` + + // The name of the global table. + GlobalTableName *string `min:"3" type:"string"` + + // The Region-specific settings for the global table. + ReplicaSettings []*ReplicaSettingsDescription `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeGlobalTableSettingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeGlobalTableSettingsOutput) GoString() string { + return s.String() +} + +// SetGlobalTableName sets the GlobalTableName field's value. +func (s *DescribeGlobalTableSettingsOutput) SetGlobalTableName(v string) *DescribeGlobalTableSettingsOutput { + s.GlobalTableName = &v + return s +} + +// SetReplicaSettings sets the ReplicaSettings field's value. +func (s *DescribeGlobalTableSettingsOutput) SetReplicaSettings(v []*ReplicaSettingsDescription) *DescribeGlobalTableSettingsOutput { + s.ReplicaSettings = v + return s +} + +type DescribeImportInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) associated with the table you're importing + // to. + // + // ImportArn is a required field + ImportArn *string `min:"37" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeImportInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeImportInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeImportInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeImportInput"} + if s.ImportArn == nil { + invalidParams.Add(request.NewErrParamRequired("ImportArn")) + } + if s.ImportArn != nil && len(*s.ImportArn) < 37 { + invalidParams.Add(request.NewErrParamMinLen("ImportArn", 37)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetImportArn sets the ImportArn field's value. +func (s *DescribeImportInput) SetImportArn(v string) *DescribeImportInput { + s.ImportArn = &v + return s +} + +type DescribeImportOutput struct { + _ struct{} `type:"structure"` + + // Represents the properties of the table created for the import, and parameters + // of the import. The import parameters include import status, how many items + // were processed, and how many errors were encountered. + // + // ImportTableDescription is a required field + ImportTableDescription *ImportTableDescription `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeImportOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeImportOutput) GoString() string { + return s.String() +} + +// SetImportTableDescription sets the ImportTableDescription field's value. +func (s *DescribeImportOutput) SetImportTableDescription(v *ImportTableDescription) *DescribeImportOutput { + s.ImportTableDescription = v + return s +} + +type DescribeKinesisStreamingDestinationInput struct { + _ struct{} `type:"structure"` + + // The name of the table being described. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeKinesisStreamingDestinationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeKinesisStreamingDestinationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeKinesisStreamingDestinationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeKinesisStreamingDestinationInput"} + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTableName sets the TableName field's value. +func (s *DescribeKinesisStreamingDestinationInput) SetTableName(v string) *DescribeKinesisStreamingDestinationInput { + s.TableName = &v + return s +} + +type DescribeKinesisStreamingDestinationOutput struct { + _ struct{} `type:"structure"` + + // The list of replica structures for the table being described. + KinesisDataStreamDestinations []*KinesisDataStreamDestination `type:"list"` + + // The name of the table being described. + TableName *string `min:"3" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeKinesisStreamingDestinationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeKinesisStreamingDestinationOutput) GoString() string { + return s.String() +} + +// SetKinesisDataStreamDestinations sets the KinesisDataStreamDestinations field's value. +func (s *DescribeKinesisStreamingDestinationOutput) SetKinesisDataStreamDestinations(v []*KinesisDataStreamDestination) *DescribeKinesisStreamingDestinationOutput { + s.KinesisDataStreamDestinations = v + return s +} + +// SetTableName sets the TableName field's value. +func (s *DescribeKinesisStreamingDestinationOutput) SetTableName(v string) *DescribeKinesisStreamingDestinationOutput { + s.TableName = &v + return s +} + +// Represents the input of a DescribeLimits operation. Has no content. +type DescribeLimitsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeLimitsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeLimitsInput) GoString() string { + return s.String() +} + +// Represents the output of a DescribeLimits operation. +type DescribeLimitsOutput struct { + _ struct{} `type:"structure"` + + // The maximum total read capacity units that your account allows you to provision + // across all of your tables in this Region. + AccountMaxReadCapacityUnits *int64 `min:"1" type:"long"` + + // The maximum total write capacity units that your account allows you to provision + // across all of your tables in this Region. + AccountMaxWriteCapacityUnits *int64 `min:"1" type:"long"` + + // The maximum read capacity units that your account allows you to provision + // for a new table that you are creating in this Region, including the read + // capacity units provisioned for its global secondary indexes (GSIs). + TableMaxReadCapacityUnits *int64 `min:"1" type:"long"` + + // The maximum write capacity units that your account allows you to provision + // for a new table that you are creating in this Region, including the write + // capacity units provisioned for its global secondary indexes (GSIs). + TableMaxWriteCapacityUnits *int64 `min:"1" type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeLimitsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeLimitsOutput) GoString() string { + return s.String() +} + +// SetAccountMaxReadCapacityUnits sets the AccountMaxReadCapacityUnits field's value. +func (s *DescribeLimitsOutput) SetAccountMaxReadCapacityUnits(v int64) *DescribeLimitsOutput { + s.AccountMaxReadCapacityUnits = &v + return s +} + +// SetAccountMaxWriteCapacityUnits sets the AccountMaxWriteCapacityUnits field's value. +func (s *DescribeLimitsOutput) SetAccountMaxWriteCapacityUnits(v int64) *DescribeLimitsOutput { + s.AccountMaxWriteCapacityUnits = &v + return s +} + +// SetTableMaxReadCapacityUnits sets the TableMaxReadCapacityUnits field's value. +func (s *DescribeLimitsOutput) SetTableMaxReadCapacityUnits(v int64) *DescribeLimitsOutput { + s.TableMaxReadCapacityUnits = &v + return s +} + +// SetTableMaxWriteCapacityUnits sets the TableMaxWriteCapacityUnits field's value. +func (s *DescribeLimitsOutput) SetTableMaxWriteCapacityUnits(v int64) *DescribeLimitsOutput { + s.TableMaxWriteCapacityUnits = &v + return s +} + +// Represents the input of a DescribeTable operation. +type DescribeTableInput struct { + _ struct{} `type:"structure"` + + // The name of the table to describe. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeTableInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTableInput"} + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTableName sets the TableName field's value. +func (s *DescribeTableInput) SetTableName(v string) *DescribeTableInput { + s.TableName = &v + return s +} + +// Represents the output of a DescribeTable operation. +type DescribeTableOutput struct { + _ struct{} `type:"structure"` + + // The properties of the table. + Table *TableDescription `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeTableOutput) GoString() string { + return s.String() +} + +// SetTable sets the Table field's value. +func (s *DescribeTableOutput) SetTable(v *TableDescription) *DescribeTableOutput { + s.Table = v + return s +} + +type DescribeTableReplicaAutoScalingInput struct { + _ struct{} `type:"structure"` + + // The name of the table. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeTableReplicaAutoScalingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeTableReplicaAutoScalingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTableReplicaAutoScalingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTableReplicaAutoScalingInput"} + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTableName sets the TableName field's value. +func (s *DescribeTableReplicaAutoScalingInput) SetTableName(v string) *DescribeTableReplicaAutoScalingInput { + s.TableName = &v + return s +} + +type DescribeTableReplicaAutoScalingOutput struct { + _ struct{} `type:"structure"` + + // Represents the auto scaling properties of the table. + TableAutoScalingDescription *TableAutoScalingDescription `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeTableReplicaAutoScalingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeTableReplicaAutoScalingOutput) GoString() string { + return s.String() +} + +// SetTableAutoScalingDescription sets the TableAutoScalingDescription field's value. +func (s *DescribeTableReplicaAutoScalingOutput) SetTableAutoScalingDescription(v *TableAutoScalingDescription) *DescribeTableReplicaAutoScalingOutput { + s.TableAutoScalingDescription = v + return s +} + +type DescribeTimeToLiveInput struct { + _ struct{} `type:"structure"` + + // The name of the table to be described. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeTimeToLiveInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeTimeToLiveInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTimeToLiveInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTimeToLiveInput"} + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTableName sets the TableName field's value. +func (s *DescribeTimeToLiveInput) SetTableName(v string) *DescribeTimeToLiveInput { + s.TableName = &v + return s +} + +type DescribeTimeToLiveOutput struct { + _ struct{} `type:"structure"` + + // The description of the Time to Live (TTL) status on the specified table. + TimeToLiveDescription *TimeToLiveDescription `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeTimeToLiveOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeTimeToLiveOutput) GoString() string { + return s.String() +} + +// SetTimeToLiveDescription sets the TimeToLiveDescription field's value. +func (s *DescribeTimeToLiveOutput) SetTimeToLiveDescription(v *TimeToLiveDescription) *DescribeTimeToLiveOutput { + s.TimeToLiveDescription = v + return s +} + +type DisableKinesisStreamingDestinationInput struct { + _ struct{} `type:"structure"` + + // The ARN for a Kinesis data stream. + // + // StreamArn is a required field + StreamArn *string `min:"37" type:"string" required:"true"` + + // The name of the DynamoDB table. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DisableKinesisStreamingDestinationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DisableKinesisStreamingDestinationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisableKinesisStreamingDestinationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisableKinesisStreamingDestinationInput"} + if s.StreamArn == nil { + invalidParams.Add(request.NewErrParamRequired("StreamArn")) + } + if s.StreamArn != nil && len(*s.StreamArn) < 37 { + invalidParams.Add(request.NewErrParamMinLen("StreamArn", 37)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStreamArn sets the StreamArn field's value. +func (s *DisableKinesisStreamingDestinationInput) SetStreamArn(v string) *DisableKinesisStreamingDestinationInput { + s.StreamArn = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *DisableKinesisStreamingDestinationInput) SetTableName(v string) *DisableKinesisStreamingDestinationInput { + s.TableName = &v + return s +} + +type DisableKinesisStreamingDestinationOutput struct { + _ struct{} `type:"structure"` + + // The current status of the replication. + DestinationStatus *string `type:"string" enum:"DestinationStatus"` + + // The ARN for the specific Kinesis data stream. + StreamArn *string `min:"37" type:"string"` + + // The name of the table being modified. + TableName *string `min:"3" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DisableKinesisStreamingDestinationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DisableKinesisStreamingDestinationOutput) GoString() string { + return s.String() +} + +// SetDestinationStatus sets the DestinationStatus field's value. +func (s *DisableKinesisStreamingDestinationOutput) SetDestinationStatus(v string) *DisableKinesisStreamingDestinationOutput { + s.DestinationStatus = &v + return s +} + +// SetStreamArn sets the StreamArn field's value. +func (s *DisableKinesisStreamingDestinationOutput) SetStreamArn(v string) *DisableKinesisStreamingDestinationOutput { + s.StreamArn = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *DisableKinesisStreamingDestinationOutput) SetTableName(v string) *DisableKinesisStreamingDestinationOutput { + s.TableName = &v + return s +} + +// There was an attempt to insert an item with the same primary key as an item +// that already exists in the DynamoDB table. +type DuplicateItemException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DuplicateItemException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DuplicateItemException) GoString() string { + return s.String() +} + +func newErrorDuplicateItemException(v protocol.ResponseMetadata) error { + return &DuplicateItemException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *DuplicateItemException) Code() string { + return "DuplicateItemException" +} + +// Message returns the exception's message. +func (s *DuplicateItemException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *DuplicateItemException) OrigErr() error { + return nil +} + +func (s *DuplicateItemException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *DuplicateItemException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *DuplicateItemException) RequestID() string { + return s.RespMetadata.RequestID +} + +type EnableKinesisStreamingDestinationInput struct { + _ struct{} `type:"structure"` + + // The ARN for a Kinesis data stream. + // + // StreamArn is a required field + StreamArn *string `min:"37" type:"string" required:"true"` + + // The name of the DynamoDB table. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EnableKinesisStreamingDestinationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EnableKinesisStreamingDestinationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EnableKinesisStreamingDestinationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EnableKinesisStreamingDestinationInput"} + if s.StreamArn == nil { + invalidParams.Add(request.NewErrParamRequired("StreamArn")) + } + if s.StreamArn != nil && len(*s.StreamArn) < 37 { + invalidParams.Add(request.NewErrParamMinLen("StreamArn", 37)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStreamArn sets the StreamArn field's value. +func (s *EnableKinesisStreamingDestinationInput) SetStreamArn(v string) *EnableKinesisStreamingDestinationInput { + s.StreamArn = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *EnableKinesisStreamingDestinationInput) SetTableName(v string) *EnableKinesisStreamingDestinationInput { + s.TableName = &v + return s +} + +type EnableKinesisStreamingDestinationOutput struct { + _ struct{} `type:"structure"` + + // The current status of the replication. + DestinationStatus *string `type:"string" enum:"DestinationStatus"` + + // The ARN for the specific Kinesis data stream. + StreamArn *string `min:"37" type:"string"` + + // The name of the table being modified. + TableName *string `min:"3" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EnableKinesisStreamingDestinationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EnableKinesisStreamingDestinationOutput) GoString() string { + return s.String() +} + +// SetDestinationStatus sets the DestinationStatus field's value. +func (s *EnableKinesisStreamingDestinationOutput) SetDestinationStatus(v string) *EnableKinesisStreamingDestinationOutput { + s.DestinationStatus = &v + return s +} + +// SetStreamArn sets the StreamArn field's value. +func (s *EnableKinesisStreamingDestinationOutput) SetStreamArn(v string) *EnableKinesisStreamingDestinationOutput { + s.StreamArn = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *EnableKinesisStreamingDestinationOutput) SetTableName(v string) *EnableKinesisStreamingDestinationOutput { + s.TableName = &v + return s +} + +// An endpoint information details. +type Endpoint struct { + _ struct{} `type:"structure"` + + // IP address of the endpoint. + // + // Address is a required field + Address *string `type:"string" required:"true"` + + // Endpoint cache time to live (TTL) value. + // + // CachePeriodInMinutes is a required field + CachePeriodInMinutes *int64 `type:"long" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Endpoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Endpoint) GoString() string { + return s.String() +} + +// SetAddress sets the Address field's value. +func (s *Endpoint) SetAddress(v string) *Endpoint { + s.Address = &v + return s +} + +// SetCachePeriodInMinutes sets the CachePeriodInMinutes field's value. +func (s *Endpoint) SetCachePeriodInMinutes(v int64) *Endpoint { + s.CachePeriodInMinutes = &v + return s +} + +type ExecuteStatementInput struct { + _ struct{} `type:"structure"` + + // The consistency of a read operation. If set to true, then a strongly consistent + // read is used; otherwise, an eventually consistent read is used. + ConsistentRead *bool `type:"boolean"` + + // The maximum number of items to evaluate (not necessarily the number of matching + // items). If DynamoDB processes the number of items up to the limit while processing + // the results, it stops the operation and returns the matching values up to + // that point, along with a key in LastEvaluatedKey to apply in a subsequent + // operation so you can pick up where you left off. Also, if the processed dataset + // size exceeds 1 MB before DynamoDB reaches this limit, it stops the operation + // and returns the matching values up to the limit, and a key in LastEvaluatedKey + // to apply in a subsequent operation to continue the operation. + Limit *int64 `min:"1" type:"integer"` + + // Set this value to get remaining results, if NextToken was returned in the + // statement response. + NextToken *string `min:"1" type:"string"` + + // The parameters for the PartiQL statement, if any. + Parameters []*AttributeValue `min:"1" type:"list"` + + // Determines the level of detail about either provisioned or on-demand throughput + // consumption that is returned in the response: + // + // * INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary + // index that was accessed. Note that some operations, such as GetItem and + // BatchGetItem, do not access any indexes at all. In these cases, specifying + // INDEXES will only return ConsumedCapacity information for table(s). + // + // * TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. + // + // * NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // The PartiQL statement representing the operation to run. + // + // Statement is a required field + Statement *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExecuteStatementInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExecuteStatementInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExecuteStatementInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExecuteStatementInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.Parameters != nil && len(s.Parameters) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Parameters", 1)) + } + if s.Statement == nil { + invalidParams.Add(request.NewErrParamRequired("Statement")) + } + if s.Statement != nil && len(*s.Statement) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Statement", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConsistentRead sets the ConsistentRead field's value. +func (s *ExecuteStatementInput) SetConsistentRead(v bool) *ExecuteStatementInput { + s.ConsistentRead = &v + return s +} + +// SetLimit sets the Limit field's value. +func (s *ExecuteStatementInput) SetLimit(v int64) *ExecuteStatementInput { + s.Limit = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ExecuteStatementInput) SetNextToken(v string) *ExecuteStatementInput { + s.NextToken = &v + return s +} + +// SetParameters sets the Parameters field's value. +func (s *ExecuteStatementInput) SetParameters(v []*AttributeValue) *ExecuteStatementInput { + s.Parameters = v + return s +} + +// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. +func (s *ExecuteStatementInput) SetReturnConsumedCapacity(v string) *ExecuteStatementInput { + s.ReturnConsumedCapacity = &v + return s +} + +// SetStatement sets the Statement field's value. +func (s *ExecuteStatementInput) SetStatement(v string) *ExecuteStatementInput { + s.Statement = &v + return s +} + +type ExecuteStatementOutput struct { + _ struct{} `type:"structure"` + + // The capacity units consumed by an operation. The data returned includes the + // total provisioned throughput consumed, along with statistics for the table + // and any indexes involved in the operation. ConsumedCapacity is only returned + // if the request asked for it. For more information, see Provisioned Throughput + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // in the Amazon DynamoDB Developer Guide. + ConsumedCapacity *ConsumedCapacity `type:"structure"` + + // If a read operation was used, this property will contain the result of the + // read operation; a map of attribute names and their values. For the write + // operations this value will be empty. + Items []map[string]*AttributeValue `type:"list"` + + // The primary key of the item where the operation stopped, inclusive of the + // previous result set. Use this value to start a new operation, excluding this + // value in the new request. If LastEvaluatedKey is empty, then the "last page" + // of results has been processed and there is no more data to be retrieved. + // If LastEvaluatedKey is not empty, it does not necessarily mean that there + // is more data in the result set. The only way to know when you have reached + // the end of the result set is when LastEvaluatedKey is empty. + LastEvaluatedKey map[string]*AttributeValue `type:"map"` + + // If the response of a read request exceeds the response payload limit DynamoDB + // will set this value in the response. If set, you can use that this value + // in the subsequent request to get the remaining results. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExecuteStatementOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExecuteStatementOutput) GoString() string { + return s.String() +} + +// SetConsumedCapacity sets the ConsumedCapacity field's value. +func (s *ExecuteStatementOutput) SetConsumedCapacity(v *ConsumedCapacity) *ExecuteStatementOutput { + s.ConsumedCapacity = v + return s +} + +// SetItems sets the Items field's value. +func (s *ExecuteStatementOutput) SetItems(v []map[string]*AttributeValue) *ExecuteStatementOutput { + s.Items = v + return s +} + +// SetLastEvaluatedKey sets the LastEvaluatedKey field's value. +func (s *ExecuteStatementOutput) SetLastEvaluatedKey(v map[string]*AttributeValue) *ExecuteStatementOutput { + s.LastEvaluatedKey = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ExecuteStatementOutput) SetNextToken(v string) *ExecuteStatementOutput { + s.NextToken = &v + return s +} + +type ExecuteTransactionInput struct { + _ struct{} `type:"structure"` + + // Set this value to get remaining results, if NextToken was returned in the + // statement response. + ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` + + // Determines the level of detail about either provisioned or on-demand throughput + // consumption that is returned in the response. For more information, see TransactGetItems + // (https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_TransactGetItems.html) + // and TransactWriteItems (https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_TransactWriteItems.html). + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // The list of PartiQL statements representing the transaction to run. + // + // TransactStatements is a required field + TransactStatements []*ParameterizedStatement `min:"1" type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExecuteTransactionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExecuteTransactionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExecuteTransactionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExecuteTransactionInput"} + if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1)) + } + if s.TransactStatements == nil { + invalidParams.Add(request.NewErrParamRequired("TransactStatements")) + } + if s.TransactStatements != nil && len(s.TransactStatements) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TransactStatements", 1)) + } + if s.TransactStatements != nil { + for i, v := range s.TransactStatements { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TransactStatements", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientRequestToken sets the ClientRequestToken field's value. +func (s *ExecuteTransactionInput) SetClientRequestToken(v string) *ExecuteTransactionInput { + s.ClientRequestToken = &v + return s +} + +// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. +func (s *ExecuteTransactionInput) SetReturnConsumedCapacity(v string) *ExecuteTransactionInput { + s.ReturnConsumedCapacity = &v + return s +} + +// SetTransactStatements sets the TransactStatements field's value. +func (s *ExecuteTransactionInput) SetTransactStatements(v []*ParameterizedStatement) *ExecuteTransactionInput { + s.TransactStatements = v + return s +} + +type ExecuteTransactionOutput struct { + _ struct{} `type:"structure"` + + // The capacity units consumed by the entire operation. The values of the list + // are ordered according to the ordering of the statements. + ConsumedCapacity []*ConsumedCapacity `type:"list"` + + // The response to a PartiQL transaction. + Responses []*ItemResponse `min:"1" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExecuteTransactionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExecuteTransactionOutput) GoString() string { + return s.String() +} + +// SetConsumedCapacity sets the ConsumedCapacity field's value. +func (s *ExecuteTransactionOutput) SetConsumedCapacity(v []*ConsumedCapacity) *ExecuteTransactionOutput { + s.ConsumedCapacity = v + return s +} + +// SetResponses sets the Responses field's value. +func (s *ExecuteTransactionOutput) SetResponses(v []*ItemResponse) *ExecuteTransactionOutput { + s.Responses = v + return s +} + +// Represents a condition to be compared with an attribute value. This condition +// can be used with DeleteItem, PutItem, or UpdateItem operations; if the comparison +// evaluates to true, the operation succeeds; if not, the operation fails. You +// can use ExpectedAttributeValue in one of two different ways: +// +// - Use AttributeValueList to specify one or more values to compare against +// an attribute. Use ComparisonOperator to specify how you want to perform +// the comparison. If the comparison evaluates to true, then the conditional +// operation succeeds. +// +// - Use Value to specify a value that DynamoDB will compare against an attribute. +// If the values match, then ExpectedAttributeValue evaluates to true and +// the conditional operation succeeds. Optionally, you can also set Exists +// to false, indicating that you do not expect to find the attribute value +// in the table. In this case, the conditional operation succeeds only if +// the comparison evaluates to false. +// +// Value and Exists are incompatible with AttributeValueList and ComparisonOperator. +// Note that if you use both sets of parameters at once, DynamoDB will return +// a ValidationException exception. +type ExpectedAttributeValue struct { + _ struct{} `type:"structure"` + + // One or more values to evaluate against the supplied attribute. The number + // of values in the list depends on the ComparisonOperator being used. + // + // For type Number, value comparisons are numeric. + // + // String value comparisons for greater than, equals, or less than are based + // on ASCII character code values. For example, a is greater than A, and a is + // greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters + // (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters). + // + // For Binary, DynamoDB treats each byte of the binary data as unsigned when + // it compares binary values. + // + // For information on specifying data types in JSON, see JSON Data Format (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataFormat.html) + // in the Amazon DynamoDB Developer Guide. + AttributeValueList []*AttributeValue `type:"list"` + + // A comparator for evaluating attributes in the AttributeValueList. For example, + // equals, greater than, less than, etc. + // + // The following comparison operators are available: + // + // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | + // BEGINS_WITH | IN | BETWEEN + // + // The following are descriptions of each comparison operator. + // + // * EQ : Equal. EQ is supported for all data types, including lists and + // maps. AttributeValueList can contain only one AttributeValue element of + // type String, Number, Binary, String Set, Number Set, or Binary Set. If + // an item contains an AttributeValue element of a different type than the + // one provided in the request, the value does not match. For example, {"S":"6"} + // does not equal {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", + // "1"]}. + // + // * NE : Not equal. NE is supported for all data types, including lists + // and maps. AttributeValueList can contain only one AttributeValue of type + // String, Number, Binary, String Set, Number Set, or Binary Set. If an item + // contains an AttributeValue of a different type than the one provided in + // the request, the value does not match. For example, {"S":"6"} does not + // equal {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. + // + // * LE : Less than or equal. AttributeValueList can contain only one AttributeValue + // element of type String, Number, or Binary (not a set type). If an item + // contains an AttributeValue element of a different type than the one provided + // in the request, the value does not match. For example, {"S":"6"} does + // not equal {"N":"6"}. Also, {"N":"6"} does not compare to {"NS":["6", "2", + // "1"]}. + // + // * LT : Less than. AttributeValueList can contain only one AttributeValue + // of type String, Number, or Binary (not a set type). If an item contains + // an AttributeValue element of a different type than the one provided in + // the request, the value does not match. For example, {"S":"6"} does not + // equal {"N":"6"}. Also, {"N":"6"} does not compare to {"NS":["6", "2", + // "1"]}. + // + // * GE : Greater than or equal. AttributeValueList can contain only one + // AttributeValue element of type String, Number, or Binary (not a set type). + // If an item contains an AttributeValue element of a different type than + // the one provided in the request, the value does not match. For example, + // {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} does not compare to + // {"NS":["6", "2", "1"]}. + // + // * GT : Greater than. AttributeValueList can contain only one AttributeValue + // element of type String, Number, or Binary (not a set type). If an item + // contains an AttributeValue element of a different type than the one provided + // in the request, the value does not match. For example, {"S":"6"} does + // not equal {"N":"6"}. Also, {"N":"6"} does not compare to {"NS":["6", "2", + // "1"]}. + // + // * NOT_NULL : The attribute exists. NOT_NULL is supported for all data + // types, including lists and maps. This operator tests for the existence + // of an attribute, not its data type. If the data type of attribute "a" + // is null, and you evaluate it using NOT_NULL, the result is a Boolean true. + // This result is because the attribute "a" exists; its data type is not + // relevant to the NOT_NULL comparison operator. + // + // * NULL : The attribute does not exist. NULL is supported for all data + // types, including lists and maps. This operator tests for the nonexistence + // of an attribute, not its data type. If the data type of attribute "a" + // is null, and you evaluate it using NULL, the result is a Boolean false. + // This is because the attribute "a" exists; its data type is not relevant + // to the NULL comparison operator. + // + // * CONTAINS : Checks for a subsequence, or value in a set. AttributeValueList + // can contain only one AttributeValue element of type String, Number, or + // Binary (not a set type). If the target attribute of the comparison is + // of type String, then the operator checks for a substring match. If the + // target attribute of the comparison is of type Binary, then the operator + // looks for a subsequence of the target that matches the input. If the target + // attribute of the comparison is a set ("SS", "NS", or "BS"), then the operator + // evaluates to true if it finds an exact match with any member of the set. + // CONTAINS is supported for lists: When evaluating "a CONTAINS b", "a" can + // be a list; however, "b" cannot be a set, a map, or a list. + // + // * NOT_CONTAINS : Checks for absence of a subsequence, or absence of a + // value in a set. AttributeValueList can contain only one AttributeValue + // element of type String, Number, or Binary (not a set type). If the target + // attribute of the comparison is a String, then the operator checks for + // the absence of a substring match. If the target attribute of the comparison + // is Binary, then the operator checks for the absence of a subsequence of + // the target that matches the input. If the target attribute of the comparison + // is a set ("SS", "NS", or "BS"), then the operator evaluates to true if + // it does not find an exact match with any member of the set. NOT_CONTAINS + // is supported for lists: When evaluating "a NOT CONTAINS b", "a" can be + // a list; however, "b" cannot be a set, a map, or a list. + // + // * BEGINS_WITH : Checks for a prefix. AttributeValueList can contain only + // one AttributeValue of type String or Binary (not a Number or a set type). + // The target attribute of the comparison must be of type String or Binary + // (not a Number or a set type). + // + // * IN : Checks for matching elements in a list. AttributeValueList can + // contain one or more AttributeValue elements of type String, Number, or + // Binary. These attributes are compared against an existing attribute of + // an item. If any elements of the input are equal to the item attribute, + // the expression evaluates to true. + // + // * BETWEEN : Greater than or equal to the first value, and less than or + // equal to the second value. AttributeValueList must contain two AttributeValue + // elements of the same type, either String, Number, or Binary (not a set + // type). A target attribute matches if the target value is greater than, + // or equal to, the first element and less than, or equal to, the second + // element. If an item contains an AttributeValue element of a different + // type than the one provided in the request, the value does not match. For + // example, {"S":"6"} does not compare to {"N":"6"}. Also, {"N":"6"} does + // not compare to {"NS":["6", "2", "1"]} + ComparisonOperator *string `type:"string" enum:"ComparisonOperator"` + + // Causes DynamoDB to evaluate the value before attempting a conditional operation: + // + // * If Exists is true, DynamoDB will check to see if that attribute value + // already exists in the table. If it is found, then the operation succeeds. + // If it is not found, the operation fails with a ConditionCheckFailedException. + // + // * If Exists is false, DynamoDB assumes that the attribute value does not + // exist in the table. If in fact the value does not exist, then the assumption + // is valid and the operation succeeds. If the value is found, despite the + // assumption that it does not exist, the operation fails with a ConditionCheckFailedException. + // + // The default setting for Exists is true. If you supply a Value all by itself, + // DynamoDB assumes the attribute exists: You don't have to set Exists to true, + // because it is implied. + // + // DynamoDB returns a ValidationException if: + // + // * Exists is true but there is no Value to check. (You expect a value to + // exist, but don't specify what that value is.) + // + // * Exists is false but you also provide a Value. (You cannot expect an + // attribute to have a value, while also expecting it not to exist.) + Exists *bool `type:"boolean"` + + // Represents the data for the expected attribute. + // + // Each attribute value is described as a name-value pair. The name is the data + // type, and the value is the data itself. + // + // For more information, see Data Types (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes) + // in the Amazon DynamoDB Developer Guide. + Value *AttributeValue `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExpectedAttributeValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExpectedAttributeValue) GoString() string { + return s.String() +} + +// SetAttributeValueList sets the AttributeValueList field's value. +func (s *ExpectedAttributeValue) SetAttributeValueList(v []*AttributeValue) *ExpectedAttributeValue { + s.AttributeValueList = v + return s +} + +// SetComparisonOperator sets the ComparisonOperator field's value. +func (s *ExpectedAttributeValue) SetComparisonOperator(v string) *ExpectedAttributeValue { + s.ComparisonOperator = &v + return s +} + +// SetExists sets the Exists field's value. +func (s *ExpectedAttributeValue) SetExists(v bool) *ExpectedAttributeValue { + s.Exists = &v + return s +} + +// SetValue sets the Value field's value. +func (s *ExpectedAttributeValue) SetValue(v *AttributeValue) *ExpectedAttributeValue { + s.Value = v + return s +} + +// There was a conflict when writing to the specified S3 bucket. +type ExportConflictException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExportConflictException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExportConflictException) GoString() string { + return s.String() +} + +func newErrorExportConflictException(v protocol.ResponseMetadata) error { + return &ExportConflictException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ExportConflictException) Code() string { + return "ExportConflictException" +} + +// Message returns the exception's message. +func (s *ExportConflictException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ExportConflictException) OrigErr() error { + return nil +} + +func (s *ExportConflictException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ExportConflictException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ExportConflictException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Represents the properties of the exported table. +type ExportDescription struct { + _ struct{} `type:"structure"` + + // The billable size of the table export. + BilledSizeBytes *int64 `type:"long"` + + // The client token that was provided for the export task. A client token makes + // calls to ExportTableToPointInTimeInput idempotent, meaning that multiple + // identical calls have the same effect as one single call. + ClientToken *string `type:"string"` + + // The time at which the export task completed. + EndTime *time.Time `type:"timestamp"` + + // The Amazon Resource Name (ARN) of the table export. + ExportArn *string `min:"37" type:"string"` + + // The format of the exported data. Valid values for ExportFormat are DYNAMODB_JSON + // or ION. + ExportFormat *string `type:"string" enum:"ExportFormat"` + + // The name of the manifest file for the export task. + ExportManifest *string `type:"string"` + + // Export can be in one of the following states: IN_PROGRESS, COMPLETED, or + // FAILED. + ExportStatus *string `type:"string" enum:"ExportStatus"` + + // Point in time from which table data was exported. + ExportTime *time.Time `type:"timestamp"` + + // Status code for the result of the failed export. + FailureCode *string `type:"string"` + + // Export failure reason description. + FailureMessage *string `type:"string"` + + // The number of items exported. + ItemCount *int64 `type:"long"` + + // The name of the Amazon S3 bucket containing the export. + S3Bucket *string `type:"string"` + + // The ID of the Amazon Web Services account that owns the bucket containing + // the export. + S3BucketOwner *string `type:"string"` + + // The Amazon S3 bucket prefix used as the file name and path of the exported + // snapshot. + S3Prefix *string `type:"string"` + + // Type of encryption used on the bucket where export data is stored. Valid + // values for S3SseAlgorithm are: + // + // * AES256 - server-side encryption with Amazon S3 managed keys + // + // * KMS - server-side encryption with KMS managed keys + S3SseAlgorithm *string `type:"string" enum:"S3SseAlgorithm"` + + // The ID of the KMS managed key used to encrypt the S3 bucket where export + // data is stored (if applicable). + S3SseKmsKeyId *string `min:"1" type:"string"` + + // The time at which the export task began. + StartTime *time.Time `type:"timestamp"` + + // The Amazon Resource Name (ARN) of the table that was exported. + TableArn *string `type:"string"` + + // Unique ID of the table that was exported. + TableId *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExportDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExportDescription) GoString() string { + return s.String() +} + +// SetBilledSizeBytes sets the BilledSizeBytes field's value. +func (s *ExportDescription) SetBilledSizeBytes(v int64) *ExportDescription { + s.BilledSizeBytes = &v + return s +} + +// SetClientToken sets the ClientToken field's value. +func (s *ExportDescription) SetClientToken(v string) *ExportDescription { + s.ClientToken = &v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *ExportDescription) SetEndTime(v time.Time) *ExportDescription { + s.EndTime = &v + return s +} + +// SetExportArn sets the ExportArn field's value. +func (s *ExportDescription) SetExportArn(v string) *ExportDescription { + s.ExportArn = &v + return s +} + +// SetExportFormat sets the ExportFormat field's value. +func (s *ExportDescription) SetExportFormat(v string) *ExportDescription { + s.ExportFormat = &v + return s +} + +// SetExportManifest sets the ExportManifest field's value. +func (s *ExportDescription) SetExportManifest(v string) *ExportDescription { + s.ExportManifest = &v + return s +} + +// SetExportStatus sets the ExportStatus field's value. +func (s *ExportDescription) SetExportStatus(v string) *ExportDescription { + s.ExportStatus = &v + return s +} + +// SetExportTime sets the ExportTime field's value. +func (s *ExportDescription) SetExportTime(v time.Time) *ExportDescription { + s.ExportTime = &v + return s +} + +// SetFailureCode sets the FailureCode field's value. +func (s *ExportDescription) SetFailureCode(v string) *ExportDescription { + s.FailureCode = &v + return s +} + +// SetFailureMessage sets the FailureMessage field's value. +func (s *ExportDescription) SetFailureMessage(v string) *ExportDescription { + s.FailureMessage = &v + return s +} + +// SetItemCount sets the ItemCount field's value. +func (s *ExportDescription) SetItemCount(v int64) *ExportDescription { + s.ItemCount = &v + return s +} + +// SetS3Bucket sets the S3Bucket field's value. +func (s *ExportDescription) SetS3Bucket(v string) *ExportDescription { + s.S3Bucket = &v + return s +} + +// SetS3BucketOwner sets the S3BucketOwner field's value. +func (s *ExportDescription) SetS3BucketOwner(v string) *ExportDescription { + s.S3BucketOwner = &v + return s +} + +// SetS3Prefix sets the S3Prefix field's value. +func (s *ExportDescription) SetS3Prefix(v string) *ExportDescription { + s.S3Prefix = &v + return s +} + +// SetS3SseAlgorithm sets the S3SseAlgorithm field's value. +func (s *ExportDescription) SetS3SseAlgorithm(v string) *ExportDescription { + s.S3SseAlgorithm = &v + return s +} + +// SetS3SseKmsKeyId sets the S3SseKmsKeyId field's value. +func (s *ExportDescription) SetS3SseKmsKeyId(v string) *ExportDescription { + s.S3SseKmsKeyId = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *ExportDescription) SetStartTime(v time.Time) *ExportDescription { + s.StartTime = &v + return s +} + +// SetTableArn sets the TableArn field's value. +func (s *ExportDescription) SetTableArn(v string) *ExportDescription { + s.TableArn = &v + return s +} + +// SetTableId sets the TableId field's value. +func (s *ExportDescription) SetTableId(v string) *ExportDescription { + s.TableId = &v + return s +} + +// The specified export was not found. +type ExportNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExportNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExportNotFoundException) GoString() string { + return s.String() +} + +func newErrorExportNotFoundException(v protocol.ResponseMetadata) error { + return &ExportNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ExportNotFoundException) Code() string { + return "ExportNotFoundException" +} + +// Message returns the exception's message. +func (s *ExportNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ExportNotFoundException) OrigErr() error { + return nil +} + +func (s *ExportNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ExportNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ExportNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Summary information about an export task. +type ExportSummary struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the export. + ExportArn *string `min:"37" type:"string"` + + // Export can be in one of the following states: IN_PROGRESS, COMPLETED, or + // FAILED. + ExportStatus *string `type:"string" enum:"ExportStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExportSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExportSummary) GoString() string { + return s.String() +} + +// SetExportArn sets the ExportArn field's value. +func (s *ExportSummary) SetExportArn(v string) *ExportSummary { + s.ExportArn = &v + return s +} + +// SetExportStatus sets the ExportStatus field's value. +func (s *ExportSummary) SetExportStatus(v string) *ExportSummary { + s.ExportStatus = &v + return s +} + +type ExportTableToPointInTimeInput struct { + _ struct{} `type:"structure"` + + // Providing a ClientToken makes the call to ExportTableToPointInTimeInput idempotent, + // meaning that multiple identical calls have the same effect as one single + // call. + // + // A client token is valid for 8 hours after the first request that uses it + // is completed. After 8 hours, any request with the same client token is treated + // as a new request. Do not resubmit the same request with the same client token + // for more than 8 hours, or the result might not be idempotent. + // + // If you submit a request with the same client token but a change in other + // parameters within the 8-hour idempotency window, DynamoDB returns an ImportConflictException. + ClientToken *string `type:"string" idempotencyToken:"true"` + + // The format for the exported data. Valid values for ExportFormat are DYNAMODB_JSON + // or ION. + ExportFormat *string `type:"string" enum:"ExportFormat"` + + // Time in the past from which to export table data, counted in seconds from + // the start of the Unix epoch. The table export will be a snapshot of the table's + // state at this point in time. + ExportTime *time.Time `type:"timestamp"` + + // The name of the Amazon S3 bucket to export the snapshot to. + // + // S3Bucket is a required field + S3Bucket *string `type:"string" required:"true"` + + // The ID of the Amazon Web Services account that owns the bucket the export + // will be stored in. + S3BucketOwner *string `type:"string"` + + // The Amazon S3 bucket prefix to use as the file name and path of the exported + // snapshot. + S3Prefix *string `type:"string"` + + // Type of encryption used on the bucket where export data will be stored. Valid + // values for S3SseAlgorithm are: + // + // * AES256 - server-side encryption with Amazon S3 managed keys + // + // * KMS - server-side encryption with KMS managed keys + S3SseAlgorithm *string `type:"string" enum:"S3SseAlgorithm"` + + // The ID of the KMS managed key used to encrypt the S3 bucket where export + // data will be stored (if applicable). + S3SseKmsKeyId *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) associated with the table to export. + // + // TableArn is a required field + TableArn *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExportTableToPointInTimeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExportTableToPointInTimeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExportTableToPointInTimeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExportTableToPointInTimeInput"} + if s.S3Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("S3Bucket")) + } + if s.S3SseKmsKeyId != nil && len(*s.S3SseKmsKeyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("S3SseKmsKeyId", 1)) + } + if s.TableArn == nil { + invalidParams.Add(request.NewErrParamRequired("TableArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *ExportTableToPointInTimeInput) SetClientToken(v string) *ExportTableToPointInTimeInput { + s.ClientToken = &v + return s +} + +// SetExportFormat sets the ExportFormat field's value. +func (s *ExportTableToPointInTimeInput) SetExportFormat(v string) *ExportTableToPointInTimeInput { + s.ExportFormat = &v + return s +} + +// SetExportTime sets the ExportTime field's value. +func (s *ExportTableToPointInTimeInput) SetExportTime(v time.Time) *ExportTableToPointInTimeInput { + s.ExportTime = &v + return s +} + +// SetS3Bucket sets the S3Bucket field's value. +func (s *ExportTableToPointInTimeInput) SetS3Bucket(v string) *ExportTableToPointInTimeInput { + s.S3Bucket = &v + return s +} + +// SetS3BucketOwner sets the S3BucketOwner field's value. +func (s *ExportTableToPointInTimeInput) SetS3BucketOwner(v string) *ExportTableToPointInTimeInput { + s.S3BucketOwner = &v + return s +} + +// SetS3Prefix sets the S3Prefix field's value. +func (s *ExportTableToPointInTimeInput) SetS3Prefix(v string) *ExportTableToPointInTimeInput { + s.S3Prefix = &v + return s +} + +// SetS3SseAlgorithm sets the S3SseAlgorithm field's value. +func (s *ExportTableToPointInTimeInput) SetS3SseAlgorithm(v string) *ExportTableToPointInTimeInput { + s.S3SseAlgorithm = &v + return s +} + +// SetS3SseKmsKeyId sets the S3SseKmsKeyId field's value. +func (s *ExportTableToPointInTimeInput) SetS3SseKmsKeyId(v string) *ExportTableToPointInTimeInput { + s.S3SseKmsKeyId = &v + return s +} + +// SetTableArn sets the TableArn field's value. +func (s *ExportTableToPointInTimeInput) SetTableArn(v string) *ExportTableToPointInTimeInput { + s.TableArn = &v + return s +} + +type ExportTableToPointInTimeOutput struct { + _ struct{} `type:"structure"` + + // Contains a description of the table export. + ExportDescription *ExportDescription `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExportTableToPointInTimeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExportTableToPointInTimeOutput) GoString() string { + return s.String() +} + +// SetExportDescription sets the ExportDescription field's value. +func (s *ExportTableToPointInTimeOutput) SetExportDescription(v *ExportDescription) *ExportTableToPointInTimeOutput { + s.ExportDescription = v + return s +} + +// Represents a failure a contributor insights operation. +type FailureException struct { + _ struct{} `type:"structure"` + + // Description of the failure. + ExceptionDescription *string `type:"string"` + + // Exception name. + ExceptionName *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s FailureException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s FailureException) GoString() string { + return s.String() +} + +// SetExceptionDescription sets the ExceptionDescription field's value. +func (s *FailureException) SetExceptionDescription(v string) *FailureException { + s.ExceptionDescription = &v + return s +} + +// SetExceptionName sets the ExceptionName field's value. +func (s *FailureException) SetExceptionName(v string) *FailureException { + s.ExceptionName = &v + return s +} + +// Specifies an item and related attribute values to retrieve in a TransactGetItem +// object. +type Get struct { + _ struct{} `type:"structure"` + + // One or more substitution tokens for attribute names in the ProjectionExpression + // parameter. + ExpressionAttributeNames map[string]*string `type:"map"` + + // A map of attribute names to AttributeValue objects that specifies the primary + // key of the item to retrieve. + // + // Key is a required field + Key map[string]*AttributeValue `type:"map" required:"true"` + + // A string that identifies one or more attributes of the specified item to + // retrieve from the table. The attributes in the expression must be separated + // by commas. If no attribute names are specified, then all attributes of the + // specified item are returned. If any of the requested attributes are not found, + // they do not appear in the result. + ProjectionExpression *string `type:"string"` + + // The name of the table from which to retrieve the specified item. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Get) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Get) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Get) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Get"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. +func (s *Get) SetExpressionAttributeNames(v map[string]*string) *Get { + s.ExpressionAttributeNames = v + return s +} + +// SetKey sets the Key field's value. +func (s *Get) SetKey(v map[string]*AttributeValue) *Get { + s.Key = v + return s +} + +// SetProjectionExpression sets the ProjectionExpression field's value. +func (s *Get) SetProjectionExpression(v string) *Get { + s.ProjectionExpression = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *Get) SetTableName(v string) *Get { + s.TableName = &v + return s +} + +// Represents the input of a GetItem operation. +type GetItemInput struct { + _ struct{} `type:"structure"` + + // This is a legacy parameter. Use ProjectionExpression instead. For more information, + // see AttributesToGet (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html) + // in the Amazon DynamoDB Developer Guide. + AttributesToGet []*string `min:"1" type:"list"` + + // Determines the read consistency model: If set to true, then the operation + // uses strongly consistent reads; otherwise, the operation uses eventually + // consistent reads. + ConsistentRead *bool `type:"boolean"` + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames: + // + // * To access an attribute whose name conflicts with a DynamoDB reserved + // word. + // + // * To create a placeholder for repeating occurrences of an attribute name + // in an expression. + // + // * To prevent special characters in an attribute name from being misinterpreted + // in an expression. + // + // Use the # character in an expression to dereference an attribute name. For + // example, consider the following attribute name: + // + // * Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot be + // used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide). To work around this, you could specify + // the following for ExpressionAttributeNames: + // + // * {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // * #P = :val + // + // Tokens that begin with the : character are expression attribute values, which + // are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see Specifying Item Attributes + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeNames map[string]*string `type:"map"` + + // A map of attribute names to AttributeValue objects, representing the primary + // key of the item to retrieve. + // + // For the primary key, you must provide all of the attributes. For example, + // with a simple primary key, you only need to provide a value for the partition + // key. For a composite primary key, you must provide values for both the partition + // key and the sort key. + // + // Key is a required field + Key map[string]*AttributeValue `type:"map" required:"true"` + + // A string that identifies one or more attributes to retrieve from the table. + // These attributes can include scalars, sets, or elements of a JSON document. + // The attributes in the expression must be separated by commas. + // + // If no attribute names are specified, then all attributes are returned. If + // any of the requested attributes are not found, they do not appear in the + // result. + // + // For more information, see Specifying Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ProjectionExpression *string `type:"string"` + + // Determines the level of detail about either provisioned or on-demand throughput + // consumption that is returned in the response: + // + // * INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary + // index that was accessed. Note that some operations, such as GetItem and + // BatchGetItem, do not access any indexes at all. In these cases, specifying + // INDEXES will only return ConsumedCapacity information for table(s). + // + // * TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. + // + // * NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // The name of the table containing the requested item. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetItemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetItemInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetItemInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetItemInput"} + if s.AttributesToGet != nil && len(s.AttributesToGet) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AttributesToGet", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributesToGet sets the AttributesToGet field's value. +func (s *GetItemInput) SetAttributesToGet(v []*string) *GetItemInput { + s.AttributesToGet = v + return s +} + +// SetConsistentRead sets the ConsistentRead field's value. +func (s *GetItemInput) SetConsistentRead(v bool) *GetItemInput { + s.ConsistentRead = &v + return s +} + +// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. +func (s *GetItemInput) SetExpressionAttributeNames(v map[string]*string) *GetItemInput { + s.ExpressionAttributeNames = v + return s +} + +// SetKey sets the Key field's value. +func (s *GetItemInput) SetKey(v map[string]*AttributeValue) *GetItemInput { + s.Key = v + return s +} + +// SetProjectionExpression sets the ProjectionExpression field's value. +func (s *GetItemInput) SetProjectionExpression(v string) *GetItemInput { + s.ProjectionExpression = &v + return s +} + +// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. +func (s *GetItemInput) SetReturnConsumedCapacity(v string) *GetItemInput { + s.ReturnConsumedCapacity = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *GetItemInput) SetTableName(v string) *GetItemInput { + s.TableName = &v + return s +} + +// Represents the output of a GetItem operation. +type GetItemOutput struct { + _ struct{} `type:"structure"` + + // The capacity units consumed by the GetItem operation. The data returned includes + // the total provisioned throughput consumed, along with statistics for the + // table and any indexes involved in the operation. ConsumedCapacity is only + // returned if the ReturnConsumedCapacity parameter was specified. For more + // information, see Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughput.html#ItemSizeCalculations.Reads) + // in the Amazon DynamoDB Developer Guide. + ConsumedCapacity *ConsumedCapacity `type:"structure"` + + // A map of attribute names to AttributeValue objects, as specified by ProjectionExpression. + Item map[string]*AttributeValue `type:"map"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetItemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetItemOutput) GoString() string { + return s.String() +} + +// SetConsumedCapacity sets the ConsumedCapacity field's value. +func (s *GetItemOutput) SetConsumedCapacity(v *ConsumedCapacity) *GetItemOutput { + s.ConsumedCapacity = v + return s +} + +// SetItem sets the Item field's value. +func (s *GetItemOutput) SetItem(v map[string]*AttributeValue) *GetItemOutput { + s.Item = v + return s +} + +// Represents the properties of a global secondary index. +type GlobalSecondaryIndex struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index. The name must be unique among all + // other indexes on this table. + // + // IndexName is a required field + IndexName *string `min:"3" type:"string" required:"true"` + + // The complete key schema for a global secondary index, which consists of one + // or more pairs of attribute names and key types: + // + // * HASH - partition key + // + // * RANGE - sort key + // + // The partition key of an item is also known as its hash attribute. The term + // "hash attribute" derives from DynamoDB's usage of an internal hash function + // to evenly distribute data items across partitions, based on their partition + // key values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. + // + // KeySchema is a required field + KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"` + + // Represents attributes that are copied (projected) from the table into the + // global secondary index. These are in addition to the primary key attributes + // and index key attributes, which are automatically projected. + // + // Projection is a required field + Projection *Projection `type:"structure" required:"true"` + + // Represents the provisioned throughput settings for the specified global secondary + // index. + // + // For current minimum and maximum provisioned throughput values, see Service, + // Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) + // in the Amazon DynamoDB Developer Guide. + ProvisionedThroughput *ProvisionedThroughput `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlobalSecondaryIndex) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlobalSecondaryIndex) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GlobalSecondaryIndex) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GlobalSecondaryIndex"} + if s.IndexName == nil { + invalidParams.Add(request.NewErrParamRequired("IndexName")) + } + if s.IndexName != nil && len(*s.IndexName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) + } + if s.KeySchema == nil { + invalidParams.Add(request.NewErrParamRequired("KeySchema")) + } + if s.KeySchema != nil && len(s.KeySchema) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeySchema", 1)) + } + if s.Projection == nil { + invalidParams.Add(request.NewErrParamRequired("Projection")) + } + if s.KeySchema != nil { + for i, v := range s.KeySchema { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "KeySchema", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Projection != nil { + if err := s.Projection.Validate(); err != nil { + invalidParams.AddNested("Projection", err.(request.ErrInvalidParams)) + } + } + if s.ProvisionedThroughput != nil { + if err := s.ProvisionedThroughput.Validate(); err != nil { + invalidParams.AddNested("ProvisionedThroughput", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIndexName sets the IndexName field's value. +func (s *GlobalSecondaryIndex) SetIndexName(v string) *GlobalSecondaryIndex { + s.IndexName = &v + return s +} + +// SetKeySchema sets the KeySchema field's value. +func (s *GlobalSecondaryIndex) SetKeySchema(v []*KeySchemaElement) *GlobalSecondaryIndex { + s.KeySchema = v + return s +} + +// SetProjection sets the Projection field's value. +func (s *GlobalSecondaryIndex) SetProjection(v *Projection) *GlobalSecondaryIndex { + s.Projection = v + return s +} + +// SetProvisionedThroughput sets the ProvisionedThroughput field's value. +func (s *GlobalSecondaryIndex) SetProvisionedThroughput(v *ProvisionedThroughput) *GlobalSecondaryIndex { + s.ProvisionedThroughput = v + return s +} + +// Represents the auto scaling settings of a global secondary index for a global +// table that will be modified. +type GlobalSecondaryIndexAutoScalingUpdate struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index. + IndexName *string `min:"3" type:"string"` + + // Represents the auto scaling settings to be modified for a global table or + // global secondary index. + ProvisionedWriteCapacityAutoScalingUpdate *AutoScalingSettingsUpdate `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlobalSecondaryIndexAutoScalingUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlobalSecondaryIndexAutoScalingUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GlobalSecondaryIndexAutoScalingUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GlobalSecondaryIndexAutoScalingUpdate"} + if s.IndexName != nil && len(*s.IndexName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) + } + if s.ProvisionedWriteCapacityAutoScalingUpdate != nil { + if err := s.ProvisionedWriteCapacityAutoScalingUpdate.Validate(); err != nil { + invalidParams.AddNested("ProvisionedWriteCapacityAutoScalingUpdate", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIndexName sets the IndexName field's value. +func (s *GlobalSecondaryIndexAutoScalingUpdate) SetIndexName(v string) *GlobalSecondaryIndexAutoScalingUpdate { + s.IndexName = &v + return s +} + +// SetProvisionedWriteCapacityAutoScalingUpdate sets the ProvisionedWriteCapacityAutoScalingUpdate field's value. +func (s *GlobalSecondaryIndexAutoScalingUpdate) SetProvisionedWriteCapacityAutoScalingUpdate(v *AutoScalingSettingsUpdate) *GlobalSecondaryIndexAutoScalingUpdate { + s.ProvisionedWriteCapacityAutoScalingUpdate = v + return s +} + +// Represents the properties of a global secondary index. +type GlobalSecondaryIndexDescription struct { + _ struct{} `type:"structure"` + + // Indicates whether the index is currently backfilling. Backfilling is the + // process of reading items from the table and determining whether they can + // be added to the index. (Not all items will qualify: For example, a partition + // key cannot have any duplicate values.) If an item can be added to the index, + // DynamoDB will do so. After all items have been processed, the backfilling + // operation is complete and Backfilling is false. + // + // You can delete an index that is being created during the Backfilling phase + // when IndexStatus is set to CREATING and Backfilling is true. You can't delete + // the index that is being created when IndexStatus is set to CREATING and Backfilling + // is false. + // + // For indexes that were created during a CreateTable operation, the Backfilling + // attribute does not appear in the DescribeTable output. + Backfilling *bool `type:"boolean"` + + // The Amazon Resource Name (ARN) that uniquely identifies the index. + IndexArn *string `type:"string"` + + // The name of the global secondary index. + IndexName *string `min:"3" type:"string"` + + // The total size of the specified index, in bytes. DynamoDB updates this value + // approximately every six hours. Recent changes might not be reflected in this + // value. + IndexSizeBytes *int64 `type:"long"` + + // The current state of the global secondary index: + // + // * CREATING - The index is being created. + // + // * UPDATING - The index is being updated. + // + // * DELETING - The index is being deleted. + // + // * ACTIVE - The index is ready for use. + IndexStatus *string `type:"string" enum:"IndexStatus"` + + // The number of items in the specified index. DynamoDB updates this value approximately + // every six hours. Recent changes might not be reflected in this value. + ItemCount *int64 `type:"long"` + + // The complete key schema for a global secondary index, which consists of one + // or more pairs of attribute names and key types: + // + // * HASH - partition key + // + // * RANGE - sort key + // + // The partition key of an item is also known as its hash attribute. The term + // "hash attribute" derives from DynamoDB's usage of an internal hash function + // to evenly distribute data items across partitions, based on their partition + // key values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. + KeySchema []*KeySchemaElement `min:"1" type:"list"` + + // Represents attributes that are copied (projected) from the table into the + // global secondary index. These are in addition to the primary key attributes + // and index key attributes, which are automatically projected. + Projection *Projection `type:"structure"` + + // Represents the provisioned throughput settings for the specified global secondary + // index. + // + // For current minimum and maximum provisioned throughput values, see Service, + // Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) + // in the Amazon DynamoDB Developer Guide. + ProvisionedThroughput *ProvisionedThroughputDescription `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlobalSecondaryIndexDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlobalSecondaryIndexDescription) GoString() string { + return s.String() +} + +// SetBackfilling sets the Backfilling field's value. +func (s *GlobalSecondaryIndexDescription) SetBackfilling(v bool) *GlobalSecondaryIndexDescription { + s.Backfilling = &v + return s +} + +// SetIndexArn sets the IndexArn field's value. +func (s *GlobalSecondaryIndexDescription) SetIndexArn(v string) *GlobalSecondaryIndexDescription { + s.IndexArn = &v + return s +} + +// SetIndexName sets the IndexName field's value. +func (s *GlobalSecondaryIndexDescription) SetIndexName(v string) *GlobalSecondaryIndexDescription { + s.IndexName = &v + return s +} + +// SetIndexSizeBytes sets the IndexSizeBytes field's value. +func (s *GlobalSecondaryIndexDescription) SetIndexSizeBytes(v int64) *GlobalSecondaryIndexDescription { + s.IndexSizeBytes = &v + return s +} + +// SetIndexStatus sets the IndexStatus field's value. +func (s *GlobalSecondaryIndexDescription) SetIndexStatus(v string) *GlobalSecondaryIndexDescription { + s.IndexStatus = &v + return s +} + +// SetItemCount sets the ItemCount field's value. +func (s *GlobalSecondaryIndexDescription) SetItemCount(v int64) *GlobalSecondaryIndexDescription { + s.ItemCount = &v + return s +} + +// SetKeySchema sets the KeySchema field's value. +func (s *GlobalSecondaryIndexDescription) SetKeySchema(v []*KeySchemaElement) *GlobalSecondaryIndexDescription { + s.KeySchema = v + return s +} + +// SetProjection sets the Projection field's value. +func (s *GlobalSecondaryIndexDescription) SetProjection(v *Projection) *GlobalSecondaryIndexDescription { + s.Projection = v + return s +} + +// SetProvisionedThroughput sets the ProvisionedThroughput field's value. +func (s *GlobalSecondaryIndexDescription) SetProvisionedThroughput(v *ProvisionedThroughputDescription) *GlobalSecondaryIndexDescription { + s.ProvisionedThroughput = v + return s +} + +// Represents the properties of a global secondary index for the table when +// the backup was created. +type GlobalSecondaryIndexInfo struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index. + IndexName *string `min:"3" type:"string"` + + // The complete key schema for a global secondary index, which consists of one + // or more pairs of attribute names and key types: + // + // * HASH - partition key + // + // * RANGE - sort key + // + // The partition key of an item is also known as its hash attribute. The term + // "hash attribute" derives from DynamoDB's usage of an internal hash function + // to evenly distribute data items across partitions, based on their partition + // key values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. + KeySchema []*KeySchemaElement `min:"1" type:"list"` + + // Represents attributes that are copied (projected) from the table into the + // global secondary index. These are in addition to the primary key attributes + // and index key attributes, which are automatically projected. + Projection *Projection `type:"structure"` + + // Represents the provisioned throughput settings for the specified global secondary + // index. + ProvisionedThroughput *ProvisionedThroughput `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlobalSecondaryIndexInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlobalSecondaryIndexInfo) GoString() string { + return s.String() +} + +// SetIndexName sets the IndexName field's value. +func (s *GlobalSecondaryIndexInfo) SetIndexName(v string) *GlobalSecondaryIndexInfo { + s.IndexName = &v + return s +} + +// SetKeySchema sets the KeySchema field's value. +func (s *GlobalSecondaryIndexInfo) SetKeySchema(v []*KeySchemaElement) *GlobalSecondaryIndexInfo { + s.KeySchema = v + return s +} + +// SetProjection sets the Projection field's value. +func (s *GlobalSecondaryIndexInfo) SetProjection(v *Projection) *GlobalSecondaryIndexInfo { + s.Projection = v + return s +} + +// SetProvisionedThroughput sets the ProvisionedThroughput field's value. +func (s *GlobalSecondaryIndexInfo) SetProvisionedThroughput(v *ProvisionedThroughput) *GlobalSecondaryIndexInfo { + s.ProvisionedThroughput = v + return s +} + +// Represents one of the following: +// +// - A new global secondary index to be added to an existing table. +// +// - New provisioned throughput parameters for an existing global secondary +// index. +// +// - An existing global secondary index to be removed from an existing table. +type GlobalSecondaryIndexUpdate struct { + _ struct{} `type:"structure"` + + // The parameters required for creating a global secondary index on an existing + // table: + // + // * IndexName + // + // * KeySchema + // + // * AttributeDefinitions + // + // * Projection + // + // * ProvisionedThroughput + Create *CreateGlobalSecondaryIndexAction `type:"structure"` + + // The name of an existing global secondary index to be removed. + Delete *DeleteGlobalSecondaryIndexAction `type:"structure"` + + // The name of an existing global secondary index, along with new provisioned + // throughput settings to be applied to that index. + Update *UpdateGlobalSecondaryIndexAction `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlobalSecondaryIndexUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlobalSecondaryIndexUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GlobalSecondaryIndexUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GlobalSecondaryIndexUpdate"} + if s.Create != nil { + if err := s.Create.Validate(); err != nil { + invalidParams.AddNested("Create", err.(request.ErrInvalidParams)) + } + } + if s.Delete != nil { + if err := s.Delete.Validate(); err != nil { + invalidParams.AddNested("Delete", err.(request.ErrInvalidParams)) + } + } + if s.Update != nil { + if err := s.Update.Validate(); err != nil { + invalidParams.AddNested("Update", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCreate sets the Create field's value. +func (s *GlobalSecondaryIndexUpdate) SetCreate(v *CreateGlobalSecondaryIndexAction) *GlobalSecondaryIndexUpdate { + s.Create = v + return s +} + +// SetDelete sets the Delete field's value. +func (s *GlobalSecondaryIndexUpdate) SetDelete(v *DeleteGlobalSecondaryIndexAction) *GlobalSecondaryIndexUpdate { + s.Delete = v + return s +} + +// SetUpdate sets the Update field's value. +func (s *GlobalSecondaryIndexUpdate) SetUpdate(v *UpdateGlobalSecondaryIndexAction) *GlobalSecondaryIndexUpdate { + s.Update = v + return s +} + +// Represents the properties of a global table. +type GlobalTable struct { + _ struct{} `type:"structure"` + + // The global table name. + GlobalTableName *string `min:"3" type:"string"` + + // The Regions where the global table has replicas. + ReplicationGroup []*Replica `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlobalTable) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlobalTable) GoString() string { + return s.String() +} + +// SetGlobalTableName sets the GlobalTableName field's value. +func (s *GlobalTable) SetGlobalTableName(v string) *GlobalTable { + s.GlobalTableName = &v + return s +} + +// SetReplicationGroup sets the ReplicationGroup field's value. +func (s *GlobalTable) SetReplicationGroup(v []*Replica) *GlobalTable { + s.ReplicationGroup = v + return s +} + +// The specified global table already exists. +type GlobalTableAlreadyExistsException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlobalTableAlreadyExistsException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlobalTableAlreadyExistsException) GoString() string { + return s.String() +} + +func newErrorGlobalTableAlreadyExistsException(v protocol.ResponseMetadata) error { + return &GlobalTableAlreadyExistsException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *GlobalTableAlreadyExistsException) Code() string { + return "GlobalTableAlreadyExistsException" +} + +// Message returns the exception's message. +func (s *GlobalTableAlreadyExistsException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *GlobalTableAlreadyExistsException) OrigErr() error { + return nil +} + +func (s *GlobalTableAlreadyExistsException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *GlobalTableAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *GlobalTableAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Contains details about the global table. +type GlobalTableDescription struct { + _ struct{} `type:"structure"` + + // The creation time of the global table. + CreationDateTime *time.Time `type:"timestamp"` + + // The unique identifier of the global table. + GlobalTableArn *string `type:"string"` + + // The global table name. + GlobalTableName *string `min:"3" type:"string"` + + // The current state of the global table: + // + // * CREATING - The global table is being created. + // + // * UPDATING - The global table is being updated. + // + // * DELETING - The global table is being deleted. + // + // * ACTIVE - The global table is ready for use. + GlobalTableStatus *string `type:"string" enum:"GlobalTableStatus"` + + // The Regions where the global table has replicas. + ReplicationGroup []*ReplicaDescription `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlobalTableDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlobalTableDescription) GoString() string { + return s.String() +} + +// SetCreationDateTime sets the CreationDateTime field's value. +func (s *GlobalTableDescription) SetCreationDateTime(v time.Time) *GlobalTableDescription { + s.CreationDateTime = &v + return s +} + +// SetGlobalTableArn sets the GlobalTableArn field's value. +func (s *GlobalTableDescription) SetGlobalTableArn(v string) *GlobalTableDescription { + s.GlobalTableArn = &v + return s +} + +// SetGlobalTableName sets the GlobalTableName field's value. +func (s *GlobalTableDescription) SetGlobalTableName(v string) *GlobalTableDescription { + s.GlobalTableName = &v + return s +} + +// SetGlobalTableStatus sets the GlobalTableStatus field's value. +func (s *GlobalTableDescription) SetGlobalTableStatus(v string) *GlobalTableDescription { + s.GlobalTableStatus = &v + return s +} + +// SetReplicationGroup sets the ReplicationGroup field's value. +func (s *GlobalTableDescription) SetReplicationGroup(v []*ReplicaDescription) *GlobalTableDescription { + s.ReplicationGroup = v + return s +} + +// Represents the settings of a global secondary index for a global table that +// will be modified. +type GlobalTableGlobalSecondaryIndexSettingsUpdate struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index. The name must be unique among all + // other indexes on this table. + // + // IndexName is a required field + IndexName *string `min:"3" type:"string" required:"true"` + + // Auto scaling settings for managing a global secondary index's write capacity + // units. + ProvisionedWriteCapacityAutoScalingSettingsUpdate *AutoScalingSettingsUpdate `type:"structure"` + + // The maximum number of writes consumed per second before DynamoDB returns + // a ThrottlingException. + ProvisionedWriteCapacityUnits *int64 `min:"1" type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlobalTableGlobalSecondaryIndexSettingsUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlobalTableGlobalSecondaryIndexSettingsUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GlobalTableGlobalSecondaryIndexSettingsUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GlobalTableGlobalSecondaryIndexSettingsUpdate"} + if s.IndexName == nil { + invalidParams.Add(request.NewErrParamRequired("IndexName")) + } + if s.IndexName != nil && len(*s.IndexName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) + } + if s.ProvisionedWriteCapacityUnits != nil && *s.ProvisionedWriteCapacityUnits < 1 { + invalidParams.Add(request.NewErrParamMinValue("ProvisionedWriteCapacityUnits", 1)) + } + if s.ProvisionedWriteCapacityAutoScalingSettingsUpdate != nil { + if err := s.ProvisionedWriteCapacityAutoScalingSettingsUpdate.Validate(); err != nil { + invalidParams.AddNested("ProvisionedWriteCapacityAutoScalingSettingsUpdate", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIndexName sets the IndexName field's value. +func (s *GlobalTableGlobalSecondaryIndexSettingsUpdate) SetIndexName(v string) *GlobalTableGlobalSecondaryIndexSettingsUpdate { + s.IndexName = &v + return s +} + +// SetProvisionedWriteCapacityAutoScalingSettingsUpdate sets the ProvisionedWriteCapacityAutoScalingSettingsUpdate field's value. +func (s *GlobalTableGlobalSecondaryIndexSettingsUpdate) SetProvisionedWriteCapacityAutoScalingSettingsUpdate(v *AutoScalingSettingsUpdate) *GlobalTableGlobalSecondaryIndexSettingsUpdate { + s.ProvisionedWriteCapacityAutoScalingSettingsUpdate = v + return s +} + +// SetProvisionedWriteCapacityUnits sets the ProvisionedWriteCapacityUnits field's value. +func (s *GlobalTableGlobalSecondaryIndexSettingsUpdate) SetProvisionedWriteCapacityUnits(v int64) *GlobalTableGlobalSecondaryIndexSettingsUpdate { + s.ProvisionedWriteCapacityUnits = &v + return s +} + +// The specified global table does not exist. +type GlobalTableNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlobalTableNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlobalTableNotFoundException) GoString() string { + return s.String() +} + +func newErrorGlobalTableNotFoundException(v protocol.ResponseMetadata) error { + return &GlobalTableNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *GlobalTableNotFoundException) Code() string { + return "GlobalTableNotFoundException" +} + +// Message returns the exception's message. +func (s *GlobalTableNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *GlobalTableNotFoundException) OrigErr() error { + return nil +} + +func (s *GlobalTableNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *GlobalTableNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *GlobalTableNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// DynamoDB rejected the request because you retried a request with a different +// payload but with an idempotent token that was already used. +type IdempotentParameterMismatchException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IdempotentParameterMismatchException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IdempotentParameterMismatchException) GoString() string { + return s.String() +} + +func newErrorIdempotentParameterMismatchException(v protocol.ResponseMetadata) error { + return &IdempotentParameterMismatchException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *IdempotentParameterMismatchException) Code() string { + return "IdempotentParameterMismatchException" +} + +// Message returns the exception's message. +func (s *IdempotentParameterMismatchException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *IdempotentParameterMismatchException) OrigErr() error { + return nil +} + +func (s *IdempotentParameterMismatchException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *IdempotentParameterMismatchException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *IdempotentParameterMismatchException) RequestID() string { + return s.RespMetadata.RequestID +} + +// There was a conflict when importing from the specified S3 source. This can +// occur when the current import conflicts with a previous import request that +// had the same client token. +type ImportConflictException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ImportConflictException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ImportConflictException) GoString() string { + return s.String() +} + +func newErrorImportConflictException(v protocol.ResponseMetadata) error { + return &ImportConflictException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ImportConflictException) Code() string { + return "ImportConflictException" +} + +// Message returns the exception's message. +func (s *ImportConflictException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ImportConflictException) OrigErr() error { + return nil +} + +func (s *ImportConflictException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ImportConflictException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ImportConflictException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The specified import was not found. +type ImportNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ImportNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ImportNotFoundException) GoString() string { + return s.String() +} + +func newErrorImportNotFoundException(v protocol.ResponseMetadata) error { + return &ImportNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ImportNotFoundException) Code() string { + return "ImportNotFoundException" +} + +// Message returns the exception's message. +func (s *ImportNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ImportNotFoundException) OrigErr() error { + return nil +} + +func (s *ImportNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ImportNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ImportNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Summary information about the source file for the import. +type ImportSummary struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Number (ARN) of the Cloudwatch Log Group associated with + // this import task. + CloudWatchLogGroupArn *string `min:"1" type:"string"` + + // The time at which this import task ended. (Does this include the successful + // complete creation of the table it was imported to?) + EndTime *time.Time `type:"timestamp"` + + // The Amazon Resource Number (ARN) corresponding to the import request. + ImportArn *string `min:"37" type:"string"` + + // The status of the import operation. + ImportStatus *string `type:"string" enum:"ImportStatus"` + + // The format of the source data. Valid values are CSV, DYNAMODB_JSON or ION. + InputFormat *string `type:"string" enum:"InputFormat"` + + // The path and S3 bucket of the source file that is being imported. This includes + // the S3Bucket (required), S3KeyPrefix (optional) and S3BucketOwner (optional + // if the bucket is owned by the requester). + S3BucketSource *S3BucketSource `type:"structure"` + + // The time at which this import task began. + StartTime *time.Time `type:"timestamp"` + + // The Amazon Resource Number (ARN) of the table being imported into. + TableArn *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ImportSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ImportSummary) GoString() string { + return s.String() +} + +// SetCloudWatchLogGroupArn sets the CloudWatchLogGroupArn field's value. +func (s *ImportSummary) SetCloudWatchLogGroupArn(v string) *ImportSummary { + s.CloudWatchLogGroupArn = &v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *ImportSummary) SetEndTime(v time.Time) *ImportSummary { + s.EndTime = &v + return s +} + +// SetImportArn sets the ImportArn field's value. +func (s *ImportSummary) SetImportArn(v string) *ImportSummary { + s.ImportArn = &v + return s +} + +// SetImportStatus sets the ImportStatus field's value. +func (s *ImportSummary) SetImportStatus(v string) *ImportSummary { + s.ImportStatus = &v + return s +} + +// SetInputFormat sets the InputFormat field's value. +func (s *ImportSummary) SetInputFormat(v string) *ImportSummary { + s.InputFormat = &v + return s +} + +// SetS3BucketSource sets the S3BucketSource field's value. +func (s *ImportSummary) SetS3BucketSource(v *S3BucketSource) *ImportSummary { + s.S3BucketSource = v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *ImportSummary) SetStartTime(v time.Time) *ImportSummary { + s.StartTime = &v + return s +} + +// SetTableArn sets the TableArn field's value. +func (s *ImportSummary) SetTableArn(v string) *ImportSummary { + s.TableArn = &v + return s +} + +// Represents the properties of the table being imported into. +type ImportTableDescription struct { + _ struct{} `type:"structure"` + + // The client token that was provided for the import task. Reusing the client + // token on retry makes a call to ImportTable idempotent. + ClientToken *string `type:"string"` + + // The Amazon Resource Number (ARN) of the Cloudwatch Log Group associated with + // the target table. + CloudWatchLogGroupArn *string `min:"1" type:"string"` + + // The time at which the creation of the table associated with this import task + // completed. + EndTime *time.Time `type:"timestamp"` + + // The number of errors occurred on importing the source file into the target + // table. + ErrorCount *int64 `type:"long"` + + // The error code corresponding to the failure that the import job ran into + // during execution. + FailureCode *string `type:"string"` + + // The error message corresponding to the failure that the import job ran into + // during execution. + FailureMessage *string `type:"string"` + + // The Amazon Resource Number (ARN) corresponding to the import request. + ImportArn *string `min:"37" type:"string"` + + // The status of the import. + ImportStatus *string `type:"string" enum:"ImportStatus"` + + // The number of items successfully imported into the new table. + ImportedItemCount *int64 `type:"long"` + + // The compression options for the data that has been imported into the target + // table. The values are NONE, GZIP, or ZSTD. + InputCompressionType *string `type:"string" enum:"InputCompressionType"` + + // The format of the source data going into the target table. + InputFormat *string `type:"string" enum:"InputFormat"` + + // The format options for the data that was imported into the target table. + // There is one value, CsvOption. + InputFormatOptions *InputFormatOptions `type:"structure"` + + // The total number of items processed from the source file. + ProcessedItemCount *int64 `type:"long"` + + // The total size of data processed from the source file, in Bytes. + ProcessedSizeBytes *int64 `type:"long"` + + // Values for the S3 bucket the source file is imported from. Includes bucket + // name (required), key prefix (optional) and bucket account owner ID (optional). + S3BucketSource *S3BucketSource `type:"structure"` + + // The time when this import task started. + StartTime *time.Time `type:"timestamp"` + + // The Amazon Resource Number (ARN) of the table being imported into. + TableArn *string `type:"string"` + + // The parameters for the new table that is being imported into. + TableCreationParameters *TableCreationParameters `type:"structure"` + + // The table id corresponding to the table created by import table process. + TableId *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ImportTableDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ImportTableDescription) GoString() string { + return s.String() +} + +// SetClientToken sets the ClientToken field's value. +func (s *ImportTableDescription) SetClientToken(v string) *ImportTableDescription { + s.ClientToken = &v + return s +} + +// SetCloudWatchLogGroupArn sets the CloudWatchLogGroupArn field's value. +func (s *ImportTableDescription) SetCloudWatchLogGroupArn(v string) *ImportTableDescription { + s.CloudWatchLogGroupArn = &v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *ImportTableDescription) SetEndTime(v time.Time) *ImportTableDescription { + s.EndTime = &v + return s +} + +// SetErrorCount sets the ErrorCount field's value. +func (s *ImportTableDescription) SetErrorCount(v int64) *ImportTableDescription { + s.ErrorCount = &v + return s +} + +// SetFailureCode sets the FailureCode field's value. +func (s *ImportTableDescription) SetFailureCode(v string) *ImportTableDescription { + s.FailureCode = &v + return s +} + +// SetFailureMessage sets the FailureMessage field's value. +func (s *ImportTableDescription) SetFailureMessage(v string) *ImportTableDescription { + s.FailureMessage = &v + return s +} + +// SetImportArn sets the ImportArn field's value. +func (s *ImportTableDescription) SetImportArn(v string) *ImportTableDescription { + s.ImportArn = &v + return s +} + +// SetImportStatus sets the ImportStatus field's value. +func (s *ImportTableDescription) SetImportStatus(v string) *ImportTableDescription { + s.ImportStatus = &v + return s +} + +// SetImportedItemCount sets the ImportedItemCount field's value. +func (s *ImportTableDescription) SetImportedItemCount(v int64) *ImportTableDescription { + s.ImportedItemCount = &v + return s +} + +// SetInputCompressionType sets the InputCompressionType field's value. +func (s *ImportTableDescription) SetInputCompressionType(v string) *ImportTableDescription { + s.InputCompressionType = &v + return s +} + +// SetInputFormat sets the InputFormat field's value. +func (s *ImportTableDescription) SetInputFormat(v string) *ImportTableDescription { + s.InputFormat = &v + return s +} + +// SetInputFormatOptions sets the InputFormatOptions field's value. +func (s *ImportTableDescription) SetInputFormatOptions(v *InputFormatOptions) *ImportTableDescription { + s.InputFormatOptions = v + return s +} + +// SetProcessedItemCount sets the ProcessedItemCount field's value. +func (s *ImportTableDescription) SetProcessedItemCount(v int64) *ImportTableDescription { + s.ProcessedItemCount = &v + return s +} + +// SetProcessedSizeBytes sets the ProcessedSizeBytes field's value. +func (s *ImportTableDescription) SetProcessedSizeBytes(v int64) *ImportTableDescription { + s.ProcessedSizeBytes = &v + return s +} + +// SetS3BucketSource sets the S3BucketSource field's value. +func (s *ImportTableDescription) SetS3BucketSource(v *S3BucketSource) *ImportTableDescription { + s.S3BucketSource = v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *ImportTableDescription) SetStartTime(v time.Time) *ImportTableDescription { + s.StartTime = &v + return s +} + +// SetTableArn sets the TableArn field's value. +func (s *ImportTableDescription) SetTableArn(v string) *ImportTableDescription { + s.TableArn = &v + return s +} + +// SetTableCreationParameters sets the TableCreationParameters field's value. +func (s *ImportTableDescription) SetTableCreationParameters(v *TableCreationParameters) *ImportTableDescription { + s.TableCreationParameters = v + return s +} + +// SetTableId sets the TableId field's value. +func (s *ImportTableDescription) SetTableId(v string) *ImportTableDescription { + s.TableId = &v + return s +} + +type ImportTableInput struct { + _ struct{} `type:"structure"` + + // Providing a ClientToken makes the call to ImportTableInput idempotent, meaning + // that multiple identical calls have the same effect as one single call. + // + // A client token is valid for 8 hours after the first request that uses it + // is completed. After 8 hours, any request with the same client token is treated + // as a new request. Do not resubmit the same request with the same client token + // for more than 8 hours, or the result might not be idempotent. + // + // If you submit a request with the same client token but a change in other + // parameters within the 8-hour idempotency window, DynamoDB returns an IdempotentParameterMismatch + // exception. + ClientToken *string `type:"string" idempotencyToken:"true"` + + // Type of compression to be used on the input coming from the imported table. + InputCompressionType *string `type:"string" enum:"InputCompressionType"` + + // The format of the source data. Valid values for ImportFormat are CSV, DYNAMODB_JSON + // or ION. + // + // InputFormat is a required field + InputFormat *string `type:"string" required:"true" enum:"InputFormat"` + + // Additional properties that specify how the input is formatted, + InputFormatOptions *InputFormatOptions `type:"structure"` + + // The S3 bucket that provides the source for the import. + // + // S3BucketSource is a required field + S3BucketSource *S3BucketSource `type:"structure" required:"true"` + + // Parameters for the table to import the data into. + // + // TableCreationParameters is a required field + TableCreationParameters *TableCreationParameters `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ImportTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ImportTableInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ImportTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ImportTableInput"} + if s.InputFormat == nil { + invalidParams.Add(request.NewErrParamRequired("InputFormat")) + } + if s.S3BucketSource == nil { + invalidParams.Add(request.NewErrParamRequired("S3BucketSource")) + } + if s.TableCreationParameters == nil { + invalidParams.Add(request.NewErrParamRequired("TableCreationParameters")) + } + if s.InputFormatOptions != nil { + if err := s.InputFormatOptions.Validate(); err != nil { + invalidParams.AddNested("InputFormatOptions", err.(request.ErrInvalidParams)) + } + } + if s.S3BucketSource != nil { + if err := s.S3BucketSource.Validate(); err != nil { + invalidParams.AddNested("S3BucketSource", err.(request.ErrInvalidParams)) + } + } + if s.TableCreationParameters != nil { + if err := s.TableCreationParameters.Validate(); err != nil { + invalidParams.AddNested("TableCreationParameters", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *ImportTableInput) SetClientToken(v string) *ImportTableInput { + s.ClientToken = &v + return s +} + +// SetInputCompressionType sets the InputCompressionType field's value. +func (s *ImportTableInput) SetInputCompressionType(v string) *ImportTableInput { + s.InputCompressionType = &v + return s +} + +// SetInputFormat sets the InputFormat field's value. +func (s *ImportTableInput) SetInputFormat(v string) *ImportTableInput { + s.InputFormat = &v + return s +} + +// SetInputFormatOptions sets the InputFormatOptions field's value. +func (s *ImportTableInput) SetInputFormatOptions(v *InputFormatOptions) *ImportTableInput { + s.InputFormatOptions = v + return s +} + +// SetS3BucketSource sets the S3BucketSource field's value. +func (s *ImportTableInput) SetS3BucketSource(v *S3BucketSource) *ImportTableInput { + s.S3BucketSource = v + return s +} + +// SetTableCreationParameters sets the TableCreationParameters field's value. +func (s *ImportTableInput) SetTableCreationParameters(v *TableCreationParameters) *ImportTableInput { + s.TableCreationParameters = v + return s +} + +type ImportTableOutput struct { + _ struct{} `type:"structure"` + + // Represents the properties of the table created for the import, and parameters + // of the import. The import parameters include import status, how many items + // were processed, and how many errors were encountered. + // + // ImportTableDescription is a required field + ImportTableDescription *ImportTableDescription `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ImportTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ImportTableOutput) GoString() string { + return s.String() +} + +// SetImportTableDescription sets the ImportTableDescription field's value. +func (s *ImportTableOutput) SetImportTableDescription(v *ImportTableDescription) *ImportTableOutput { + s.ImportTableDescription = v + return s +} + +// The operation tried to access a nonexistent index. +type IndexNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IndexNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IndexNotFoundException) GoString() string { + return s.String() +} + +func newErrorIndexNotFoundException(v protocol.ResponseMetadata) error { + return &IndexNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *IndexNotFoundException) Code() string { + return "IndexNotFoundException" +} + +// Message returns the exception's message. +func (s *IndexNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *IndexNotFoundException) OrigErr() error { + return nil +} + +func (s *IndexNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *IndexNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *IndexNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The format options for the data that was imported into the target table. +// There is one value, CsvOption. +type InputFormatOptions struct { + _ struct{} `type:"structure"` + + // The options for imported source files in CSV format. The values are Delimiter + // and HeaderList. + Csv *CsvOptions `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InputFormatOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InputFormatOptions) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InputFormatOptions) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InputFormatOptions"} + if s.Csv != nil { + if err := s.Csv.Validate(); err != nil { + invalidParams.AddNested("Csv", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCsv sets the Csv field's value. +func (s *InputFormatOptions) SetCsv(v *CsvOptions) *InputFormatOptions { + s.Csv = v + return s +} + +// An error occurred on the server side. +type InternalServerError struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // The server encountered an internal error trying to fulfill the request. + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InternalServerError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InternalServerError) GoString() string { + return s.String() +} + +func newErrorInternalServerError(v protocol.ResponseMetadata) error { + return &InternalServerError{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InternalServerError) Code() string { + return "InternalServerError" +} + +// Message returns the exception's message. +func (s *InternalServerError) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InternalServerError) OrigErr() error { + return nil +} + +func (s *InternalServerError) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InternalServerError) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InternalServerError) RequestID() string { + return s.RespMetadata.RequestID +} + +// The specified ExportTime is outside of the point in time recovery window. +type InvalidExportTimeException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidExportTimeException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidExportTimeException) GoString() string { + return s.String() +} + +func newErrorInvalidExportTimeException(v protocol.ResponseMetadata) error { + return &InvalidExportTimeException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidExportTimeException) Code() string { + return "InvalidExportTimeException" +} + +// Message returns the exception's message. +func (s *InvalidExportTimeException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidExportTimeException) OrigErr() error { + return nil +} + +func (s *InvalidExportTimeException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidExportTimeException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidExportTimeException) RequestID() string { + return s.RespMetadata.RequestID +} + +// An invalid restore time was specified. RestoreDateTime must be between EarliestRestorableDateTime +// and LatestRestorableDateTime. +type InvalidRestoreTimeException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRestoreTimeException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRestoreTimeException) GoString() string { + return s.String() +} + +func newErrorInvalidRestoreTimeException(v protocol.ResponseMetadata) error { + return &InvalidRestoreTimeException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidRestoreTimeException) Code() string { + return "InvalidRestoreTimeException" +} + +// Message returns the exception's message. +func (s *InvalidRestoreTimeException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidRestoreTimeException) OrigErr() error { + return nil +} + +func (s *InvalidRestoreTimeException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidRestoreTimeException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidRestoreTimeException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Information about item collections, if any, that were affected by the operation. +// ItemCollectionMetrics is only returned if the request asked for it. If the +// table does not have any local secondary indexes, this information is not +// returned in the response. +type ItemCollectionMetrics struct { + _ struct{} `type:"structure"` + + // The partition key value of the item collection. This value is the same as + // the partition key value of the item. + ItemCollectionKey map[string]*AttributeValue `type:"map"` + + // An estimate of item collection size, in gigabytes. This value is a two-element + // array containing a lower bound and an upper bound for the estimate. The estimate + // includes the size of all the items in the table, plus the size of all attributes + // projected into all of the local secondary indexes on that table. Use this + // estimate to measure whether a local secondary index is approaching its size + // limit. + // + // The estimate is subject to change over time; therefore, do not rely on the + // precision or accuracy of the estimate. + SizeEstimateRangeGB []*float64 `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ItemCollectionMetrics) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ItemCollectionMetrics) GoString() string { + return s.String() +} + +// SetItemCollectionKey sets the ItemCollectionKey field's value. +func (s *ItemCollectionMetrics) SetItemCollectionKey(v map[string]*AttributeValue) *ItemCollectionMetrics { + s.ItemCollectionKey = v + return s +} + +// SetSizeEstimateRangeGB sets the SizeEstimateRangeGB field's value. +func (s *ItemCollectionMetrics) SetSizeEstimateRangeGB(v []*float64) *ItemCollectionMetrics { + s.SizeEstimateRangeGB = v + return s +} + +// An item collection is too large. This exception is only returned for tables +// that have one or more local secondary indexes. +type ItemCollectionSizeLimitExceededException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // The total size of an item collection has exceeded the maximum limit of 10 + // gigabytes. + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ItemCollectionSizeLimitExceededException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ItemCollectionSizeLimitExceededException) GoString() string { + return s.String() +} + +func newErrorItemCollectionSizeLimitExceededException(v protocol.ResponseMetadata) error { + return &ItemCollectionSizeLimitExceededException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ItemCollectionSizeLimitExceededException) Code() string { + return "ItemCollectionSizeLimitExceededException" +} + +// Message returns the exception's message. +func (s *ItemCollectionSizeLimitExceededException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ItemCollectionSizeLimitExceededException) OrigErr() error { + return nil +} + +func (s *ItemCollectionSizeLimitExceededException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ItemCollectionSizeLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ItemCollectionSizeLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Details for the requested item. +type ItemResponse struct { + _ struct{} `type:"structure"` + + // Map of attribute data consisting of the data type and attribute value. + Item map[string]*AttributeValue `type:"map"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ItemResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ItemResponse) GoString() string { + return s.String() +} + +// SetItem sets the Item field's value. +func (s *ItemResponse) SetItem(v map[string]*AttributeValue) *ItemResponse { + s.Item = v + return s +} + +// Represents a single element of a key schema. A key schema specifies the attributes +// that make up the primary key of a table, or the key attributes of an index. +// +// A KeySchemaElement represents exactly one attribute of the primary key. For +// example, a simple primary key would be represented by one KeySchemaElement +// (for the partition key). A composite primary key would require one KeySchemaElement +// for the partition key, and another KeySchemaElement for the sort key. +// +// A KeySchemaElement must be a scalar, top-level attribute (not a nested attribute). +// The data type must be one of String, Number, or Binary. The attribute cannot +// be nested within a List or a Map. +type KeySchemaElement struct { + _ struct{} `type:"structure"` + + // The name of a key attribute. + // + // AttributeName is a required field + AttributeName *string `min:"1" type:"string" required:"true"` + + // The role that this key attribute will assume: + // + // * HASH - partition key + // + // * RANGE - sort key + // + // The partition key of an item is also known as its hash attribute. The term + // "hash attribute" derives from DynamoDB's usage of an internal hash function + // to evenly distribute data items across partitions, based on their partition + // key values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. + // + // KeyType is a required field + KeyType *string `type:"string" required:"true" enum:"KeyType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s KeySchemaElement) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s KeySchemaElement) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *KeySchemaElement) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "KeySchemaElement"} + if s.AttributeName == nil { + invalidParams.Add(request.NewErrParamRequired("AttributeName")) + } + if s.AttributeName != nil && len(*s.AttributeName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AttributeName", 1)) + } + if s.KeyType == nil { + invalidParams.Add(request.NewErrParamRequired("KeyType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributeName sets the AttributeName field's value. +func (s *KeySchemaElement) SetAttributeName(v string) *KeySchemaElement { + s.AttributeName = &v + return s +} + +// SetKeyType sets the KeyType field's value. +func (s *KeySchemaElement) SetKeyType(v string) *KeySchemaElement { + s.KeyType = &v + return s +} + +// Represents a set of primary keys and, for each key, the attributes to retrieve +// from the table. +// +// For each primary key, you must provide all of the key attributes. For example, +// with a simple primary key, you only need to provide the partition key. For +// a composite primary key, you must provide both the partition key and the +// sort key. +type KeysAndAttributes struct { + _ struct{} `type:"structure"` + + // This is a legacy parameter. Use ProjectionExpression instead. For more information, + // see Legacy Conditional Parameters (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.html) + // in the Amazon DynamoDB Developer Guide. + AttributesToGet []*string `min:"1" type:"list"` + + // The consistency of a read operation. If set to true, then a strongly consistent + // read is used; otherwise, an eventually consistent read is used. + ConsistentRead *bool `type:"boolean"` + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames: + // + // * To access an attribute whose name conflicts with a DynamoDB reserved + // word. + // + // * To create a placeholder for repeating occurrences of an attribute name + // in an expression. + // + // * To prevent special characters in an attribute name from being misinterpreted + // in an expression. + // + // Use the # character in an expression to dereference an attribute name. For + // example, consider the following attribute name: + // + // * Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot be + // used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide). To work around this, you could specify + // the following for ExpressionAttributeNames: + // + // * {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // * #P = :val + // + // Tokens that begin with the : character are expression attribute values, which + // are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see Accessing Item Attributes + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeNames map[string]*string `type:"map"` + + // The primary key attribute values that define the items and the attributes + // associated with the items. + // + // Keys is a required field + Keys []map[string]*AttributeValue `min:"1" type:"list" required:"true"` + + // A string that identifies one or more attributes to retrieve from the table. + // These attributes can include scalars, sets, or elements of a JSON document. + // The attributes in the ProjectionExpression must be separated by commas. + // + // If no attribute names are specified, then all attributes will be returned. + // If any of the requested attributes are not found, they will not appear in + // the result. + // + // For more information, see Accessing Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ProjectionExpression *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s KeysAndAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s KeysAndAttributes) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *KeysAndAttributes) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "KeysAndAttributes"} + if s.AttributesToGet != nil && len(s.AttributesToGet) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AttributesToGet", 1)) + } + if s.Keys == nil { + invalidParams.Add(request.NewErrParamRequired("Keys")) + } + if s.Keys != nil && len(s.Keys) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Keys", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributesToGet sets the AttributesToGet field's value. +func (s *KeysAndAttributes) SetAttributesToGet(v []*string) *KeysAndAttributes { + s.AttributesToGet = v + return s +} + +// SetConsistentRead sets the ConsistentRead field's value. +func (s *KeysAndAttributes) SetConsistentRead(v bool) *KeysAndAttributes { + s.ConsistentRead = &v + return s +} + +// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. +func (s *KeysAndAttributes) SetExpressionAttributeNames(v map[string]*string) *KeysAndAttributes { + s.ExpressionAttributeNames = v + return s +} + +// SetKeys sets the Keys field's value. +func (s *KeysAndAttributes) SetKeys(v []map[string]*AttributeValue) *KeysAndAttributes { + s.Keys = v + return s +} + +// SetProjectionExpression sets the ProjectionExpression field's value. +func (s *KeysAndAttributes) SetProjectionExpression(v string) *KeysAndAttributes { + s.ProjectionExpression = &v + return s +} + +// Describes a Kinesis data stream destination. +type KinesisDataStreamDestination struct { + _ struct{} `type:"structure"` + + // The current status of replication. + DestinationStatus *string `type:"string" enum:"DestinationStatus"` + + // The human-readable string that corresponds to the replica status. + DestinationStatusDescription *string `type:"string"` + + // The ARN for a specific Kinesis data stream. + StreamArn *string `min:"37" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s KinesisDataStreamDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s KinesisDataStreamDestination) GoString() string { + return s.String() +} + +// SetDestinationStatus sets the DestinationStatus field's value. +func (s *KinesisDataStreamDestination) SetDestinationStatus(v string) *KinesisDataStreamDestination { + s.DestinationStatus = &v + return s +} + +// SetDestinationStatusDescription sets the DestinationStatusDescription field's value. +func (s *KinesisDataStreamDestination) SetDestinationStatusDescription(v string) *KinesisDataStreamDestination { + s.DestinationStatusDescription = &v + return s +} + +// SetStreamArn sets the StreamArn field's value. +func (s *KinesisDataStreamDestination) SetStreamArn(v string) *KinesisDataStreamDestination { + s.StreamArn = &v + return s +} + +// There is no limit to the number of daily on-demand backups that can be taken. +// +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, +// RestoreTableFromBackup, and RestoreTableToPointInTime. +// +// When you are creating a table with one or more secondary indexes, you can +// have up to 250 such requests running at a time. However, if the table or +// index specifications are complex, then DynamoDB might temporarily reduce +// the number of concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations +// are allowed per account. +// +// There is a soft account quota of 2,500 tables. +type LimitExceededException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Too many operations for a given subscriber. + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LimitExceededException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LimitExceededException) GoString() string { + return s.String() +} + +func newErrorLimitExceededException(v protocol.ResponseMetadata) error { + return &LimitExceededException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *LimitExceededException) Code() string { + return "LimitExceededException" +} + +// Message returns the exception's message. +func (s *LimitExceededException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *LimitExceededException) OrigErr() error { + return nil +} + +func (s *LimitExceededException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID +} + +type ListBackupsInput struct { + _ struct{} `type:"structure"` + + // The backups from the table specified by BackupType are listed. + // + // Where BackupType can be: + // + // * USER - On-demand backup created by you. (The default setting if no other + // backup types are specified.) + // + // * SYSTEM - On-demand backup automatically created by DynamoDB. + // + // * ALL - All types of on-demand backups (USER and SYSTEM). + BackupType *string `type:"string" enum:"BackupTypeFilter"` + + // LastEvaluatedBackupArn is the Amazon Resource Name (ARN) of the backup last + // evaluated when the current page of results was returned, inclusive of the + // current page of results. This value may be specified as the ExclusiveStartBackupArn + // of a new ListBackups operation in order to fetch the next page of results. + ExclusiveStartBackupArn *string `min:"37" type:"string"` + + // Maximum number of backups to return at once. + Limit *int64 `min:"1" type:"integer"` + + // The backups from the table specified by TableName are listed. + TableName *string `min:"3" type:"string"` + + // Only backups created after this time are listed. TimeRangeLowerBound is inclusive. + TimeRangeLowerBound *time.Time `type:"timestamp"` + + // Only backups created before this time are listed. TimeRangeUpperBound is + // exclusive. + TimeRangeUpperBound *time.Time `type:"timestamp"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBackupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBackupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBackupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBackupsInput"} + if s.ExclusiveStartBackupArn != nil && len(*s.ExclusiveStartBackupArn) < 37 { + invalidParams.Add(request.NewErrParamMinLen("ExclusiveStartBackupArn", 37)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBackupType sets the BackupType field's value. +func (s *ListBackupsInput) SetBackupType(v string) *ListBackupsInput { + s.BackupType = &v + return s +} + +// SetExclusiveStartBackupArn sets the ExclusiveStartBackupArn field's value. +func (s *ListBackupsInput) SetExclusiveStartBackupArn(v string) *ListBackupsInput { + s.ExclusiveStartBackupArn = &v + return s +} + +// SetLimit sets the Limit field's value. +func (s *ListBackupsInput) SetLimit(v int64) *ListBackupsInput { + s.Limit = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *ListBackupsInput) SetTableName(v string) *ListBackupsInput { + s.TableName = &v + return s +} + +// SetTimeRangeLowerBound sets the TimeRangeLowerBound field's value. +func (s *ListBackupsInput) SetTimeRangeLowerBound(v time.Time) *ListBackupsInput { + s.TimeRangeLowerBound = &v + return s +} + +// SetTimeRangeUpperBound sets the TimeRangeUpperBound field's value. +func (s *ListBackupsInput) SetTimeRangeUpperBound(v time.Time) *ListBackupsInput { + s.TimeRangeUpperBound = &v + return s +} + +type ListBackupsOutput struct { + _ struct{} `type:"structure"` + + // List of BackupSummary objects. + BackupSummaries []*BackupSummary `type:"list"` + + // The ARN of the backup last evaluated when the current page of results was + // returned, inclusive of the current page of results. This value may be specified + // as the ExclusiveStartBackupArn of a new ListBackups operation in order to + // fetch the next page of results. + // + // If LastEvaluatedBackupArn is empty, then the last page of results has been + // processed and there are no more results to be retrieved. + // + // If LastEvaluatedBackupArn is not empty, this may or may not indicate that + // there is more data to be returned. All results are guaranteed to have been + // returned if and only if no value for LastEvaluatedBackupArn is returned. + LastEvaluatedBackupArn *string `min:"37" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBackupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBackupsOutput) GoString() string { + return s.String() +} + +// SetBackupSummaries sets the BackupSummaries field's value. +func (s *ListBackupsOutput) SetBackupSummaries(v []*BackupSummary) *ListBackupsOutput { + s.BackupSummaries = v + return s +} + +// SetLastEvaluatedBackupArn sets the LastEvaluatedBackupArn field's value. +func (s *ListBackupsOutput) SetLastEvaluatedBackupArn(v string) *ListBackupsOutput { + s.LastEvaluatedBackupArn = &v + return s +} + +type ListContributorInsightsInput struct { + _ struct{} `type:"structure"` + + // Maximum number of results to return per page. + MaxResults *int64 `type:"integer"` + + // A token to for the desired page, if there is one. + NextToken *string `type:"string"` + + // The name of the table. + TableName *string `min:"3" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListContributorInsightsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListContributorInsightsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListContributorInsightsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListContributorInsightsInput"} + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListContributorInsightsInput) SetMaxResults(v int64) *ListContributorInsightsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListContributorInsightsInput) SetNextToken(v string) *ListContributorInsightsInput { + s.NextToken = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *ListContributorInsightsInput) SetTableName(v string) *ListContributorInsightsInput { + s.TableName = &v + return s +} + +type ListContributorInsightsOutput struct { + _ struct{} `type:"structure"` + + // A list of ContributorInsightsSummary. + ContributorInsightsSummaries []*ContributorInsightsSummary `type:"list"` + + // A token to go to the next page if there is one. + NextToken *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListContributorInsightsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListContributorInsightsOutput) GoString() string { + return s.String() +} + +// SetContributorInsightsSummaries sets the ContributorInsightsSummaries field's value. +func (s *ListContributorInsightsOutput) SetContributorInsightsSummaries(v []*ContributorInsightsSummary) *ListContributorInsightsOutput { + s.ContributorInsightsSummaries = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListContributorInsightsOutput) SetNextToken(v string) *ListContributorInsightsOutput { + s.NextToken = &v + return s +} + +type ListExportsInput struct { + _ struct{} `type:"structure"` + + // Maximum number of results to return per page. + MaxResults *int64 `min:"1" type:"integer"` + + // An optional string that, if supplied, must be copied from the output of a + // previous call to ListExports. When provided in this manner, the API fetches + // the next page of results. + NextToken *string `type:"string"` + + // The Amazon Resource Name (ARN) associated with the exported table. + TableArn *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListExportsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListExportsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListExportsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListExportsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListExportsInput) SetMaxResults(v int64) *ListExportsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListExportsInput) SetNextToken(v string) *ListExportsInput { + s.NextToken = &v + return s +} + +// SetTableArn sets the TableArn field's value. +func (s *ListExportsInput) SetTableArn(v string) *ListExportsInput { + s.TableArn = &v + return s +} + +type ListExportsOutput struct { + _ struct{} `type:"structure"` + + // A list of ExportSummary objects. + ExportSummaries []*ExportSummary `type:"list"` + + // If this value is returned, there are additional results to be displayed. + // To retrieve them, call ListExports again, with NextToken set to this value. + NextToken *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListExportsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListExportsOutput) GoString() string { + return s.String() +} + +// SetExportSummaries sets the ExportSummaries field's value. +func (s *ListExportsOutput) SetExportSummaries(v []*ExportSummary) *ListExportsOutput { + s.ExportSummaries = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListExportsOutput) SetNextToken(v string) *ListExportsOutput { + s.NextToken = &v + return s +} + +type ListGlobalTablesInput struct { + _ struct{} `type:"structure"` + + // The first global table name that this operation will evaluate. + ExclusiveStartGlobalTableName *string `min:"3" type:"string"` + + // The maximum number of table names to return, if the parameter is not specified + // DynamoDB defaults to 100. + // + // If the number of global tables DynamoDB finds reaches this limit, it stops + // the operation and returns the table names collected up to that point, with + // a table name in the LastEvaluatedGlobalTableName to apply in a subsequent + // operation to the ExclusiveStartGlobalTableName parameter. + Limit *int64 `min:"1" type:"integer"` + + // Lists the global tables in a specific Region. + RegionName *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListGlobalTablesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListGlobalTablesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListGlobalTablesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListGlobalTablesInput"} + if s.ExclusiveStartGlobalTableName != nil && len(*s.ExclusiveStartGlobalTableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("ExclusiveStartGlobalTableName", 3)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetExclusiveStartGlobalTableName sets the ExclusiveStartGlobalTableName field's value. +func (s *ListGlobalTablesInput) SetExclusiveStartGlobalTableName(v string) *ListGlobalTablesInput { + s.ExclusiveStartGlobalTableName = &v + return s +} + +// SetLimit sets the Limit field's value. +func (s *ListGlobalTablesInput) SetLimit(v int64) *ListGlobalTablesInput { + s.Limit = &v + return s +} + +// SetRegionName sets the RegionName field's value. +func (s *ListGlobalTablesInput) SetRegionName(v string) *ListGlobalTablesInput { + s.RegionName = &v + return s +} + +type ListGlobalTablesOutput struct { + _ struct{} `type:"structure"` + + // List of global table names. + GlobalTables []*GlobalTable `type:"list"` + + // Last evaluated global table name. + LastEvaluatedGlobalTableName *string `min:"3" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListGlobalTablesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListGlobalTablesOutput) GoString() string { + return s.String() +} + +// SetGlobalTables sets the GlobalTables field's value. +func (s *ListGlobalTablesOutput) SetGlobalTables(v []*GlobalTable) *ListGlobalTablesOutput { + s.GlobalTables = v + return s +} + +// SetLastEvaluatedGlobalTableName sets the LastEvaluatedGlobalTableName field's value. +func (s *ListGlobalTablesOutput) SetLastEvaluatedGlobalTableName(v string) *ListGlobalTablesOutput { + s.LastEvaluatedGlobalTableName = &v + return s +} + +type ListImportsInput struct { + _ struct{} `type:"structure"` + + // An optional string that, if supplied, must be copied from the output of a + // previous call to ListImports. When provided in this manner, the API fetches + // the next page of results. + NextToken *string `min:"112" type:"string"` + + // The number of ImportSummary objects returned in a single page. + PageSize *int64 `min:"1" type:"integer"` + + // The Amazon Resource Name (ARN) associated with the table that was imported + // to. + TableArn *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListImportsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListImportsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListImportsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListImportsInput"} + if s.NextToken != nil && len(*s.NextToken) < 112 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 112)) + } + if s.PageSize != nil && *s.PageSize < 1 { + invalidParams.Add(request.NewErrParamMinValue("PageSize", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNextToken sets the NextToken field's value. +func (s *ListImportsInput) SetNextToken(v string) *ListImportsInput { + s.NextToken = &v + return s +} + +// SetPageSize sets the PageSize field's value. +func (s *ListImportsInput) SetPageSize(v int64) *ListImportsInput { + s.PageSize = &v + return s +} + +// SetTableArn sets the TableArn field's value. +func (s *ListImportsInput) SetTableArn(v string) *ListImportsInput { + s.TableArn = &v + return s +} + +type ListImportsOutput struct { + _ struct{} `type:"structure"` + + // A list of ImportSummary objects. + ImportSummaryList []*ImportSummary `type:"list"` + + // If this value is returned, there are additional results to be displayed. + // To retrieve them, call ListImports again, with NextToken set to this value. + NextToken *string `min:"112" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListImportsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListImportsOutput) GoString() string { + return s.String() +} + +// SetImportSummaryList sets the ImportSummaryList field's value. +func (s *ListImportsOutput) SetImportSummaryList(v []*ImportSummary) *ListImportsOutput { + s.ImportSummaryList = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListImportsOutput) SetNextToken(v string) *ListImportsOutput { + s.NextToken = &v + return s +} + +// Represents the input of a ListTables operation. +type ListTablesInput struct { + _ struct{} `type:"structure"` + + // The first table name that this operation will evaluate. Use the value that + // was returned for LastEvaluatedTableName in a previous operation, so that + // you can obtain the next page of results. + ExclusiveStartTableName *string `min:"3" type:"string"` + + // A maximum number of table names to return. If this parameter is not specified, + // the limit is 100. + Limit *int64 `min:"1" type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListTablesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListTablesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTablesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTablesInput"} + if s.ExclusiveStartTableName != nil && len(*s.ExclusiveStartTableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("ExclusiveStartTableName", 3)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetExclusiveStartTableName sets the ExclusiveStartTableName field's value. +func (s *ListTablesInput) SetExclusiveStartTableName(v string) *ListTablesInput { + s.ExclusiveStartTableName = &v + return s +} + +// SetLimit sets the Limit field's value. +func (s *ListTablesInput) SetLimit(v int64) *ListTablesInput { + s.Limit = &v + return s +} + +// Represents the output of a ListTables operation. +type ListTablesOutput struct { + _ struct{} `type:"structure"` + + // The name of the last table in the current page of results. Use this value + // as the ExclusiveStartTableName in a new request to obtain the next page of + // results, until all the table names are returned. + // + // If you do not receive a LastEvaluatedTableName value in the response, this + // means that there are no more table names to be retrieved. + LastEvaluatedTableName *string `min:"3" type:"string"` + + // The names of the tables associated with the current account at the current + // endpoint. The maximum size of this array is 100. + // + // If LastEvaluatedTableName also appears in the output, you can use this value + // as the ExclusiveStartTableName parameter in a subsequent ListTables request + // and obtain the next page of results. + TableNames []*string `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListTablesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListTablesOutput) GoString() string { + return s.String() +} + +// SetLastEvaluatedTableName sets the LastEvaluatedTableName field's value. +func (s *ListTablesOutput) SetLastEvaluatedTableName(v string) *ListTablesOutput { + s.LastEvaluatedTableName = &v + return s +} + +// SetTableNames sets the TableNames field's value. +func (s *ListTablesOutput) SetTableNames(v []*string) *ListTablesOutput { + s.TableNames = v + return s +} + +type ListTagsOfResourceInput struct { + _ struct{} `type:"structure"` + + // An optional string that, if supplied, must be copied from the output of a + // previous call to ListTagOfResource. When provided in this manner, this API + // fetches the next page of results. + NextToken *string `type:"string"` + + // The Amazon DynamoDB resource with tags to be listed. This value is an Amazon + // Resource Name (ARN). + // + // ResourceArn is a required field + ResourceArn *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListTagsOfResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListTagsOfResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsOfResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsOfResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNextToken sets the NextToken field's value. +func (s *ListTagsOfResourceInput) SetNextToken(v string) *ListTagsOfResourceInput { + s.NextToken = &v + return s +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *ListTagsOfResourceInput) SetResourceArn(v string) *ListTagsOfResourceInput { + s.ResourceArn = &v + return s +} + +type ListTagsOfResourceOutput struct { + _ struct{} `type:"structure"` + + // If this value is returned, there are additional results to be displayed. + // To retrieve them, call ListTagsOfResource again, with NextToken set to this + // value. + NextToken *string `type:"string"` + + // The tags currently associated with the Amazon DynamoDB resource. + Tags []*Tag `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListTagsOfResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListTagsOfResourceOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListTagsOfResourceOutput) SetNextToken(v string) *ListTagsOfResourceOutput { + s.NextToken = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ListTagsOfResourceOutput) SetTags(v []*Tag) *ListTagsOfResourceOutput { + s.Tags = v + return s +} + +// Represents the properties of a local secondary index. +type LocalSecondaryIndex struct { + _ struct{} `type:"structure"` + + // The name of the local secondary index. The name must be unique among all + // other indexes on this table. + // + // IndexName is a required field + IndexName *string `min:"3" type:"string" required:"true"` + + // The complete key schema for the local secondary index, consisting of one + // or more pairs of attribute names and key types: + // + // * HASH - partition key + // + // * RANGE - sort key + // + // The partition key of an item is also known as its hash attribute. The term + // "hash attribute" derives from DynamoDB's usage of an internal hash function + // to evenly distribute data items across partitions, based on their partition + // key values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. + // + // KeySchema is a required field + KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"` + + // Represents attributes that are copied (projected) from the table into the + // local secondary index. These are in addition to the primary key attributes + // and index key attributes, which are automatically projected. + // + // Projection is a required field + Projection *Projection `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LocalSecondaryIndex) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LocalSecondaryIndex) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LocalSecondaryIndex) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LocalSecondaryIndex"} + if s.IndexName == nil { + invalidParams.Add(request.NewErrParamRequired("IndexName")) + } + if s.IndexName != nil && len(*s.IndexName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) + } + if s.KeySchema == nil { + invalidParams.Add(request.NewErrParamRequired("KeySchema")) + } + if s.KeySchema != nil && len(s.KeySchema) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeySchema", 1)) + } + if s.Projection == nil { + invalidParams.Add(request.NewErrParamRequired("Projection")) + } + if s.KeySchema != nil { + for i, v := range s.KeySchema { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "KeySchema", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Projection != nil { + if err := s.Projection.Validate(); err != nil { + invalidParams.AddNested("Projection", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIndexName sets the IndexName field's value. +func (s *LocalSecondaryIndex) SetIndexName(v string) *LocalSecondaryIndex { + s.IndexName = &v + return s +} + +// SetKeySchema sets the KeySchema field's value. +func (s *LocalSecondaryIndex) SetKeySchema(v []*KeySchemaElement) *LocalSecondaryIndex { + s.KeySchema = v + return s +} + +// SetProjection sets the Projection field's value. +func (s *LocalSecondaryIndex) SetProjection(v *Projection) *LocalSecondaryIndex { + s.Projection = v + return s +} + +// Represents the properties of a local secondary index. +type LocalSecondaryIndexDescription struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) that uniquely identifies the index. + IndexArn *string `type:"string"` + + // Represents the name of the local secondary index. + IndexName *string `min:"3" type:"string"` + + // The total size of the specified index, in bytes. DynamoDB updates this value + // approximately every six hours. Recent changes might not be reflected in this + // value. + IndexSizeBytes *int64 `type:"long"` + + // The number of items in the specified index. DynamoDB updates this value approximately + // every six hours. Recent changes might not be reflected in this value. + ItemCount *int64 `type:"long"` + + // The complete key schema for the local secondary index, consisting of one + // or more pairs of attribute names and key types: + // + // * HASH - partition key + // + // * RANGE - sort key + // + // The partition key of an item is also known as its hash attribute. The term + // "hash attribute" derives from DynamoDB's usage of an internal hash function + // to evenly distribute data items across partitions, based on their partition + // key values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. + KeySchema []*KeySchemaElement `min:"1" type:"list"` + + // Represents attributes that are copied (projected) from the table into the + // global secondary index. These are in addition to the primary key attributes + // and index key attributes, which are automatically projected. + Projection *Projection `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LocalSecondaryIndexDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LocalSecondaryIndexDescription) GoString() string { + return s.String() +} + +// SetIndexArn sets the IndexArn field's value. +func (s *LocalSecondaryIndexDescription) SetIndexArn(v string) *LocalSecondaryIndexDescription { + s.IndexArn = &v + return s +} + +// SetIndexName sets the IndexName field's value. +func (s *LocalSecondaryIndexDescription) SetIndexName(v string) *LocalSecondaryIndexDescription { + s.IndexName = &v + return s +} + +// SetIndexSizeBytes sets the IndexSizeBytes field's value. +func (s *LocalSecondaryIndexDescription) SetIndexSizeBytes(v int64) *LocalSecondaryIndexDescription { + s.IndexSizeBytes = &v + return s +} + +// SetItemCount sets the ItemCount field's value. +func (s *LocalSecondaryIndexDescription) SetItemCount(v int64) *LocalSecondaryIndexDescription { + s.ItemCount = &v + return s +} + +// SetKeySchema sets the KeySchema field's value. +func (s *LocalSecondaryIndexDescription) SetKeySchema(v []*KeySchemaElement) *LocalSecondaryIndexDescription { + s.KeySchema = v + return s +} + +// SetProjection sets the Projection field's value. +func (s *LocalSecondaryIndexDescription) SetProjection(v *Projection) *LocalSecondaryIndexDescription { + s.Projection = v + return s +} + +// Represents the properties of a local secondary index for the table when the +// backup was created. +type LocalSecondaryIndexInfo struct { + _ struct{} `type:"structure"` + + // Represents the name of the local secondary index. + IndexName *string `min:"3" type:"string"` + + // The complete key schema for a local secondary index, which consists of one + // or more pairs of attribute names and key types: + // + // * HASH - partition key + // + // * RANGE - sort key + // + // The partition key of an item is also known as its hash attribute. The term + // "hash attribute" derives from DynamoDB's usage of an internal hash function + // to evenly distribute data items across partitions, based on their partition + // key values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. + KeySchema []*KeySchemaElement `min:"1" type:"list"` + + // Represents attributes that are copied (projected) from the table into the + // global secondary index. These are in addition to the primary key attributes + // and index key attributes, which are automatically projected. + Projection *Projection `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LocalSecondaryIndexInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LocalSecondaryIndexInfo) GoString() string { + return s.String() +} + +// SetIndexName sets the IndexName field's value. +func (s *LocalSecondaryIndexInfo) SetIndexName(v string) *LocalSecondaryIndexInfo { + s.IndexName = &v + return s +} + +// SetKeySchema sets the KeySchema field's value. +func (s *LocalSecondaryIndexInfo) SetKeySchema(v []*KeySchemaElement) *LocalSecondaryIndexInfo { + s.KeySchema = v + return s +} + +// SetProjection sets the Projection field's value. +func (s *LocalSecondaryIndexInfo) SetProjection(v *Projection) *LocalSecondaryIndexInfo { + s.Projection = v + return s +} + +// Represents a PartiQL statment that uses parameters. +type ParameterizedStatement struct { + _ struct{} `type:"structure"` + + // The parameter values. + Parameters []*AttributeValue `min:"1" type:"list"` + + // A PartiQL statment that uses parameters. + // + // Statement is a required field + Statement *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ParameterizedStatement) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ParameterizedStatement) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ParameterizedStatement) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ParameterizedStatement"} + if s.Parameters != nil && len(s.Parameters) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Parameters", 1)) + } + if s.Statement == nil { + invalidParams.Add(request.NewErrParamRequired("Statement")) + } + if s.Statement != nil && len(*s.Statement) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Statement", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetParameters sets the Parameters field's value. +func (s *ParameterizedStatement) SetParameters(v []*AttributeValue) *ParameterizedStatement { + s.Parameters = v + return s +} + +// SetStatement sets the Statement field's value. +func (s *ParameterizedStatement) SetStatement(v string) *ParameterizedStatement { + s.Statement = &v + return s +} + +// The description of the point in time settings applied to the table. +type PointInTimeRecoveryDescription struct { + _ struct{} `type:"structure"` + + // Specifies the earliest point in time you can restore your table to. You can + // restore your table to any point in time during the last 35 days. + EarliestRestorableDateTime *time.Time `type:"timestamp"` + + // LatestRestorableDateTime is typically 5 minutes before the current time. + LatestRestorableDateTime *time.Time `type:"timestamp"` + + // The current state of point in time recovery: + // + // * ENABLED - Point in time recovery is enabled. + // + // * DISABLED - Point in time recovery is disabled. + PointInTimeRecoveryStatus *string `type:"string" enum:"PointInTimeRecoveryStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PointInTimeRecoveryDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PointInTimeRecoveryDescription) GoString() string { + return s.String() +} + +// SetEarliestRestorableDateTime sets the EarliestRestorableDateTime field's value. +func (s *PointInTimeRecoveryDescription) SetEarliestRestorableDateTime(v time.Time) *PointInTimeRecoveryDescription { + s.EarliestRestorableDateTime = &v + return s +} + +// SetLatestRestorableDateTime sets the LatestRestorableDateTime field's value. +func (s *PointInTimeRecoveryDescription) SetLatestRestorableDateTime(v time.Time) *PointInTimeRecoveryDescription { + s.LatestRestorableDateTime = &v + return s +} + +// SetPointInTimeRecoveryStatus sets the PointInTimeRecoveryStatus field's value. +func (s *PointInTimeRecoveryDescription) SetPointInTimeRecoveryStatus(v string) *PointInTimeRecoveryDescription { + s.PointInTimeRecoveryStatus = &v + return s +} + +// Represents the settings used to enable point in time recovery. +type PointInTimeRecoverySpecification struct { + _ struct{} `type:"structure"` + + // Indicates whether point in time recovery is enabled (true) or disabled (false) + // on the table. + // + // PointInTimeRecoveryEnabled is a required field + PointInTimeRecoveryEnabled *bool `type:"boolean" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PointInTimeRecoverySpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PointInTimeRecoverySpecification) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PointInTimeRecoverySpecification) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PointInTimeRecoverySpecification"} + if s.PointInTimeRecoveryEnabled == nil { + invalidParams.Add(request.NewErrParamRequired("PointInTimeRecoveryEnabled")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPointInTimeRecoveryEnabled sets the PointInTimeRecoveryEnabled field's value. +func (s *PointInTimeRecoverySpecification) SetPointInTimeRecoveryEnabled(v bool) *PointInTimeRecoverySpecification { + s.PointInTimeRecoveryEnabled = &v + return s +} + +// Point in time recovery has not yet been enabled for this source table. +type PointInTimeRecoveryUnavailableException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PointInTimeRecoveryUnavailableException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PointInTimeRecoveryUnavailableException) GoString() string { + return s.String() +} + +func newErrorPointInTimeRecoveryUnavailableException(v protocol.ResponseMetadata) error { + return &PointInTimeRecoveryUnavailableException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *PointInTimeRecoveryUnavailableException) Code() string { + return "PointInTimeRecoveryUnavailableException" +} + +// Message returns the exception's message. +func (s *PointInTimeRecoveryUnavailableException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *PointInTimeRecoveryUnavailableException) OrigErr() error { + return nil +} + +func (s *PointInTimeRecoveryUnavailableException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *PointInTimeRecoveryUnavailableException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *PointInTimeRecoveryUnavailableException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Represents attributes that are copied (projected) from the table into an +// index. These are in addition to the primary key attributes and index key +// attributes, which are automatically projected. +type Projection struct { + _ struct{} `type:"structure"` + + // Represents the non-key attribute names which will be projected into the index. + // + // For local secondary indexes, the total count of NonKeyAttributes summed across + // all of the local secondary indexes, must not exceed 100. If you project the + // same attribute into two different indexes, this counts as two distinct attributes + // when determining the total. + NonKeyAttributes []*string `min:"1" type:"list"` + + // The set of attributes that are projected into the index: + // + // * KEYS_ONLY - Only the index and primary keys are projected into the index. + // + // * INCLUDE - In addition to the attributes described in KEYS_ONLY, the + // secondary index will include other non-key attributes that you specify. + // + // * ALL - All of the table attributes are projected into the index. + ProjectionType *string `type:"string" enum:"ProjectionType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Projection) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Projection) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Projection) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Projection"} + if s.NonKeyAttributes != nil && len(s.NonKeyAttributes) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NonKeyAttributes", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNonKeyAttributes sets the NonKeyAttributes field's value. +func (s *Projection) SetNonKeyAttributes(v []*string) *Projection { + s.NonKeyAttributes = v + return s +} + +// SetProjectionType sets the ProjectionType field's value. +func (s *Projection) SetProjectionType(v string) *Projection { + s.ProjectionType = &v + return s +} + +// Represents the provisioned throughput settings for a specified table or index. +// The settings can be modified using the UpdateTable operation. +// +// For current minimum and maximum provisioned throughput values, see Service, +// Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) +// in the Amazon DynamoDB Developer Guide. +type ProvisionedThroughput struct { + _ struct{} `type:"structure"` + + // The maximum number of strongly consistent reads consumed per second before + // DynamoDB returns a ThrottlingException. For more information, see Specifying + // Read and Write Requirements (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput) + // in the Amazon DynamoDB Developer Guide. + // + // If read/write capacity mode is PAY_PER_REQUEST the value is set to 0. + // + // ReadCapacityUnits is a required field + ReadCapacityUnits *int64 `min:"1" type:"long" required:"true"` + + // The maximum number of writes consumed per second before DynamoDB returns + // a ThrottlingException. For more information, see Specifying Read and Write + // Requirements (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput) + // in the Amazon DynamoDB Developer Guide. + // + // If read/write capacity mode is PAY_PER_REQUEST the value is set to 0. + // + // WriteCapacityUnits is a required field + WriteCapacityUnits *int64 `min:"1" type:"long" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ProvisionedThroughput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ProvisionedThroughput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ProvisionedThroughput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ProvisionedThroughput"} + if s.ReadCapacityUnits == nil { + invalidParams.Add(request.NewErrParamRequired("ReadCapacityUnits")) + } + if s.ReadCapacityUnits != nil && *s.ReadCapacityUnits < 1 { + invalidParams.Add(request.NewErrParamMinValue("ReadCapacityUnits", 1)) + } + if s.WriteCapacityUnits == nil { + invalidParams.Add(request.NewErrParamRequired("WriteCapacityUnits")) + } + if s.WriteCapacityUnits != nil && *s.WriteCapacityUnits < 1 { + invalidParams.Add(request.NewErrParamMinValue("WriteCapacityUnits", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetReadCapacityUnits sets the ReadCapacityUnits field's value. +func (s *ProvisionedThroughput) SetReadCapacityUnits(v int64) *ProvisionedThroughput { + s.ReadCapacityUnits = &v + return s +} + +// SetWriteCapacityUnits sets the WriteCapacityUnits field's value. +func (s *ProvisionedThroughput) SetWriteCapacityUnits(v int64) *ProvisionedThroughput { + s.WriteCapacityUnits = &v + return s +} + +// Represents the provisioned throughput settings for the table, consisting +// of read and write capacity units, along with data about increases and decreases. +type ProvisionedThroughputDescription struct { + _ struct{} `type:"structure"` + + // The date and time of the last provisioned throughput decrease for this table. + LastDecreaseDateTime *time.Time `type:"timestamp"` + + // The date and time of the last provisioned throughput increase for this table. + LastIncreaseDateTime *time.Time `type:"timestamp"` + + // The number of provisioned throughput decreases for this table during this + // UTC calendar day. For current maximums on provisioned throughput decreases, + // see Service, Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) + // in the Amazon DynamoDB Developer Guide. + NumberOfDecreasesToday *int64 `min:"1" type:"long"` + + // The maximum number of strongly consistent reads consumed per second before + // DynamoDB returns a ThrottlingException. Eventually consistent reads require + // less effort than strongly consistent reads, so a setting of 50 ReadCapacityUnits + // per second provides 100 eventually consistent ReadCapacityUnits per second. + ReadCapacityUnits *int64 `type:"long"` + + // The maximum number of writes consumed per second before DynamoDB returns + // a ThrottlingException. + WriteCapacityUnits *int64 `type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ProvisionedThroughputDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ProvisionedThroughputDescription) GoString() string { + return s.String() +} + +// SetLastDecreaseDateTime sets the LastDecreaseDateTime field's value. +func (s *ProvisionedThroughputDescription) SetLastDecreaseDateTime(v time.Time) *ProvisionedThroughputDescription { + s.LastDecreaseDateTime = &v + return s +} + +// SetLastIncreaseDateTime sets the LastIncreaseDateTime field's value. +func (s *ProvisionedThroughputDescription) SetLastIncreaseDateTime(v time.Time) *ProvisionedThroughputDescription { + s.LastIncreaseDateTime = &v + return s +} + +// SetNumberOfDecreasesToday sets the NumberOfDecreasesToday field's value. +func (s *ProvisionedThroughputDescription) SetNumberOfDecreasesToday(v int64) *ProvisionedThroughputDescription { + s.NumberOfDecreasesToday = &v + return s +} + +// SetReadCapacityUnits sets the ReadCapacityUnits field's value. +func (s *ProvisionedThroughputDescription) SetReadCapacityUnits(v int64) *ProvisionedThroughputDescription { + s.ReadCapacityUnits = &v + return s +} + +// SetWriteCapacityUnits sets the WriteCapacityUnits field's value. +func (s *ProvisionedThroughputDescription) SetWriteCapacityUnits(v int64) *ProvisionedThroughputDescription { + s.WriteCapacityUnits = &v + return s +} + +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +type ProvisionedThroughputExceededException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // You exceeded your maximum allowed provisioned throughput. + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ProvisionedThroughputExceededException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ProvisionedThroughputExceededException) GoString() string { + return s.String() +} + +func newErrorProvisionedThroughputExceededException(v protocol.ResponseMetadata) error { + return &ProvisionedThroughputExceededException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ProvisionedThroughputExceededException) Code() string { + return "ProvisionedThroughputExceededException" +} + +// Message returns the exception's message. +func (s *ProvisionedThroughputExceededException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ProvisionedThroughputExceededException) OrigErr() error { + return nil +} + +func (s *ProvisionedThroughputExceededException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ProvisionedThroughputExceededException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ProvisionedThroughputExceededException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Replica-specific provisioned throughput settings. If not specified, uses +// the source table's provisioned throughput settings. +type ProvisionedThroughputOverride struct { + _ struct{} `type:"structure"` + + // Replica-specific read capacity units. If not specified, uses the source table's + // read capacity settings. + ReadCapacityUnits *int64 `min:"1" type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ProvisionedThroughputOverride) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ProvisionedThroughputOverride) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ProvisionedThroughputOverride) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ProvisionedThroughputOverride"} + if s.ReadCapacityUnits != nil && *s.ReadCapacityUnits < 1 { + invalidParams.Add(request.NewErrParamMinValue("ReadCapacityUnits", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetReadCapacityUnits sets the ReadCapacityUnits field's value. +func (s *ProvisionedThroughputOverride) SetReadCapacityUnits(v int64) *ProvisionedThroughputOverride { + s.ReadCapacityUnits = &v + return s +} + +// Represents a request to perform a PutItem operation. +type Put struct { + _ struct{} `type:"structure"` + + // A condition that must be satisfied in order for a conditional update to succeed. + ConditionExpression *string `type:"string"` + + // One or more substitution tokens for attribute names in an expression. + ExpressionAttributeNames map[string]*string `type:"map"` + + // One or more values that can be substituted in an expression. + ExpressionAttributeValues map[string]*AttributeValue `type:"map"` + + // A map of attribute name to attribute values, representing the primary key + // of the item to be written by PutItem. All of the table's primary key attributes + // must be specified, and their data types must match those of the table's key + // schema. If any attributes are present in the item that are part of an index + // key schema for the table, their types must match the index key schema. + // + // Item is a required field + Item map[string]*AttributeValue `type:"map" required:"true"` + + // Use ReturnValuesOnConditionCheckFailure to get the item attributes if the + // Put condition fails. For ReturnValuesOnConditionCheckFailure, the valid values + // are: NONE and ALL_OLD. + ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"` + + // Name of the table in which to write the item. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Put) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Put) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Put) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Put"} + if s.Item == nil { + invalidParams.Add(request.NewErrParamRequired("Item")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConditionExpression sets the ConditionExpression field's value. +func (s *Put) SetConditionExpression(v string) *Put { + s.ConditionExpression = &v + return s +} + +// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. +func (s *Put) SetExpressionAttributeNames(v map[string]*string) *Put { + s.ExpressionAttributeNames = v + return s +} + +// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value. +func (s *Put) SetExpressionAttributeValues(v map[string]*AttributeValue) *Put { + s.ExpressionAttributeValues = v + return s +} + +// SetItem sets the Item field's value. +func (s *Put) SetItem(v map[string]*AttributeValue) *Put { + s.Item = v + return s +} + +// SetReturnValuesOnConditionCheckFailure sets the ReturnValuesOnConditionCheckFailure field's value. +func (s *Put) SetReturnValuesOnConditionCheckFailure(v string) *Put { + s.ReturnValuesOnConditionCheckFailure = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *Put) SetTableName(v string) *Put { + s.TableName = &v + return s +} + +// Represents the input of a PutItem operation. +type PutItemInput struct { + _ struct{} `type:"structure"` + + // A condition that must be satisfied in order for a conditional PutItem operation + // to succeed. + // + // An expression can contain any of the following: + // + // * Functions: attribute_exists | attribute_not_exists | attribute_type + // | contains | begins_with | size These function names are case-sensitive. + // + // * Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN + // + // * Logical operators: AND | OR | NOT + // + // For more information on condition expressions, see Condition Expressions + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + ConditionExpression *string `type:"string"` + + // This is a legacy parameter. Use ConditionExpression instead. For more information, + // see ConditionalOperator (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html) + // in the Amazon DynamoDB Developer Guide. + ConditionalOperator *string `type:"string" enum:"ConditionalOperator"` + + // This is a legacy parameter. Use ConditionExpression instead. For more information, + // see Expected (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html) + // in the Amazon DynamoDB Developer Guide. + Expected map[string]*ExpectedAttributeValue `type:"map"` + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames: + // + // * To access an attribute whose name conflicts with a DynamoDB reserved + // word. + // + // * To create a placeholder for repeating occurrences of an attribute name + // in an expression. + // + // * To prevent special characters in an attribute name from being misinterpreted + // in an expression. + // + // Use the # character in an expression to dereference an attribute name. For + // example, consider the following attribute name: + // + // * Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot be + // used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide). To work around this, you could specify + // the following for ExpressionAttributeNames: + // + // * {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // * #P = :val + // + // Tokens that begin with the : character are expression attribute values, which + // are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see Specifying Item Attributes + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeNames map[string]*string `type:"map"` + + // One or more values that can be substituted in an expression. + // + // Use the : (colon) character in an expression to dereference an attribute + // value. For example, suppose that you wanted to check whether the value of + // the ProductStatus attribute was one of the following: + // + // Available | Backordered | Discontinued + // + // You would first need to specify ExpressionAttributeValues as follows: + // + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} + // } + // + // You could then use these values in an expression, such as this: + // + // ProductStatus IN (:avail, :back, :disc) + // + // For more information on expression attribute values, see Condition Expressions + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeValues map[string]*AttributeValue `type:"map"` + + // A map of attribute name/value pairs, one for each attribute. Only the primary + // key attributes are required; you can optionally provide other attribute name-value + // pairs for the item. + // + // You must provide all of the attributes for the primary key. For example, + // with a simple primary key, you only need to provide a value for the partition + // key. For a composite primary key, you must provide both values for both the + // partition key and the sort key. + // + // If you specify any attributes that are part of an index key, then the data + // types for those attributes must match those of the schema in the table's + // attribute definition. + // + // Empty String and Binary attribute values are allowed. Attribute values of + // type String and Binary must have a length greater than zero if the attribute + // is used as a key attribute for a table or index. + // + // For more information about primary keys, see Primary Key (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.CoreComponents.html#HowItWorks.CoreComponents.PrimaryKey) + // in the Amazon DynamoDB Developer Guide. + // + // Each element in the Item map is an AttributeValue object. + // + // Item is a required field + Item map[string]*AttributeValue `type:"map" required:"true"` + + // Determines the level of detail about either provisioned or on-demand throughput + // consumption that is returned in the response: + // + // * INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary + // index that was accessed. Note that some operations, such as GetItem and + // BatchGetItem, do not access any indexes at all. In these cases, specifying + // INDEXES will only return ConsumedCapacity information for table(s). + // + // * TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. + // + // * NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // Determines whether item collection metrics are returned. If set to SIZE, + // the response includes statistics about item collections, if any, that were + // modified during the operation are returned in the response. If set to NONE + // (the default), no statistics are returned. + ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"` + + // Use ReturnValues if you want to get the item attributes as they appeared + // before they were updated with the PutItem request. For PutItem, the valid + // values are: + // + // * NONE - If ReturnValues is not specified, or if its value is NONE, then + // nothing is returned. (This setting is the default for ReturnValues.) + // + // * ALL_OLD - If PutItem overwrote an attribute name-value pair, then the + // content of the old item is returned. + // + // The values returned are strongly consistent. + // + // There is no additional cost associated with requesting a return value aside + // from the small network and processing overhead of receiving a larger response. + // No read capacity units are consumed. + // + // The ReturnValues parameter is used by several DynamoDB operations; however, + // PutItem does not recognize any values other than NONE or ALL_OLD. + ReturnValues *string `type:"string" enum:"ReturnValue"` + + // The name of the table to contain the item. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutItemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutItemInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutItemInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutItemInput"} + if s.Item == nil { + invalidParams.Add(request.NewErrParamRequired("Item")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConditionExpression sets the ConditionExpression field's value. +func (s *PutItemInput) SetConditionExpression(v string) *PutItemInput { + s.ConditionExpression = &v + return s +} + +// SetConditionalOperator sets the ConditionalOperator field's value. +func (s *PutItemInput) SetConditionalOperator(v string) *PutItemInput { + s.ConditionalOperator = &v + return s +} + +// SetExpected sets the Expected field's value. +func (s *PutItemInput) SetExpected(v map[string]*ExpectedAttributeValue) *PutItemInput { + s.Expected = v + return s +} + +// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. +func (s *PutItemInput) SetExpressionAttributeNames(v map[string]*string) *PutItemInput { + s.ExpressionAttributeNames = v + return s +} + +// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value. +func (s *PutItemInput) SetExpressionAttributeValues(v map[string]*AttributeValue) *PutItemInput { + s.ExpressionAttributeValues = v + return s +} + +// SetItem sets the Item field's value. +func (s *PutItemInput) SetItem(v map[string]*AttributeValue) *PutItemInput { + s.Item = v + return s +} + +// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. +func (s *PutItemInput) SetReturnConsumedCapacity(v string) *PutItemInput { + s.ReturnConsumedCapacity = &v + return s +} + +// SetReturnItemCollectionMetrics sets the ReturnItemCollectionMetrics field's value. +func (s *PutItemInput) SetReturnItemCollectionMetrics(v string) *PutItemInput { + s.ReturnItemCollectionMetrics = &v + return s +} + +// SetReturnValues sets the ReturnValues field's value. +func (s *PutItemInput) SetReturnValues(v string) *PutItemInput { + s.ReturnValues = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *PutItemInput) SetTableName(v string) *PutItemInput { + s.TableName = &v + return s +} + +// Represents the output of a PutItem operation. +type PutItemOutput struct { + _ struct{} `type:"structure"` + + // The attribute values as they appeared before the PutItem operation, but only + // if ReturnValues is specified as ALL_OLD in the request. Each element consists + // of an attribute name and an attribute value. + Attributes map[string]*AttributeValue `type:"map"` + + // The capacity units consumed by the PutItem operation. The data returned includes + // the total provisioned throughput consumed, along with statistics for the + // table and any indexes involved in the operation. ConsumedCapacity is only + // returned if the ReturnConsumedCapacity parameter was specified. For more + // information, see Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // in the Amazon DynamoDB Developer Guide. + ConsumedCapacity *ConsumedCapacity `type:"structure"` + + // Information about item collections, if any, that were affected by the PutItem + // operation. ItemCollectionMetrics is only returned if the ReturnItemCollectionMetrics + // parameter was specified. If the table does not have any local secondary indexes, + // this information is not returned in the response. + // + // Each ItemCollectionMetrics element consists of: + // + // * ItemCollectionKey - The partition key value of the item collection. + // This is the same as the partition key value of the item itself. + // + // * SizeEstimateRangeGB - An estimate of item collection size, in gigabytes. + // This value is a two-element array containing a lower bound and an upper + // bound for the estimate. The estimate includes the size of all the items + // in the table, plus the size of all attributes projected into all of the + // local secondary indexes on that table. Use this estimate to measure whether + // a local secondary index is approaching its size limit. The estimate is + // subject to change over time; therefore, do not rely on the precision or + // accuracy of the estimate. + ItemCollectionMetrics *ItemCollectionMetrics `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutItemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutItemOutput) GoString() string { + return s.String() +} + +// SetAttributes sets the Attributes field's value. +func (s *PutItemOutput) SetAttributes(v map[string]*AttributeValue) *PutItemOutput { + s.Attributes = v + return s +} + +// SetConsumedCapacity sets the ConsumedCapacity field's value. +func (s *PutItemOutput) SetConsumedCapacity(v *ConsumedCapacity) *PutItemOutput { + s.ConsumedCapacity = v + return s +} + +// SetItemCollectionMetrics sets the ItemCollectionMetrics field's value. +func (s *PutItemOutput) SetItemCollectionMetrics(v *ItemCollectionMetrics) *PutItemOutput { + s.ItemCollectionMetrics = v + return s +} + +// Represents a request to perform a PutItem operation on an item. +type PutRequest struct { + _ struct{} `type:"structure"` + + // A map of attribute name to attribute values, representing the primary key + // of an item to be processed by PutItem. All of the table's primary key attributes + // must be specified, and their data types must match those of the table's key + // schema. If any attributes are present in the item that are part of an index + // key schema for the table, their types must match the index key schema. + // + // Item is a required field + Item map[string]*AttributeValue `type:"map" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutRequest) GoString() string { + return s.String() +} + +// SetItem sets the Item field's value. +func (s *PutRequest) SetItem(v map[string]*AttributeValue) *PutRequest { + s.Item = v + return s +} + +// Represents the input of a Query operation. +type QueryInput struct { + _ struct{} `type:"structure"` + + // This is a legacy parameter. Use ProjectionExpression instead. For more information, + // see AttributesToGet (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html) + // in the Amazon DynamoDB Developer Guide. + AttributesToGet []*string `min:"1" type:"list"` + + // This is a legacy parameter. Use FilterExpression instead. For more information, + // see ConditionalOperator (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html) + // in the Amazon DynamoDB Developer Guide. + ConditionalOperator *string `type:"string" enum:"ConditionalOperator"` + + // Determines the read consistency model: If set to true, then the operation + // uses strongly consistent reads; otherwise, the operation uses eventually + // consistent reads. + // + // Strongly consistent reads are not supported on global secondary indexes. + // If you query a global secondary index with ConsistentRead set to true, you + // will receive a ValidationException. + ConsistentRead *bool `type:"boolean"` + + // The primary key of the first item that this operation will evaluate. Use + // the value that was returned for LastEvaluatedKey in the previous operation. + // + // The data type for ExclusiveStartKey must be String, Number, or Binary. No + // set data types are allowed. + ExclusiveStartKey map[string]*AttributeValue `type:"map"` + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames: + // + // * To access an attribute whose name conflicts with a DynamoDB reserved + // word. + // + // * To create a placeholder for repeating occurrences of an attribute name + // in an expression. + // + // * To prevent special characters in an attribute name from being misinterpreted + // in an expression. + // + // Use the # character in an expression to dereference an attribute name. For + // example, consider the following attribute name: + // + // * Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot be + // used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide). To work around this, you could specify + // the following for ExpressionAttributeNames: + // + // * {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // * #P = :val + // + // Tokens that begin with the : character are expression attribute values, which + // are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see Specifying Item Attributes + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeNames map[string]*string `type:"map"` + + // One or more values that can be substituted in an expression. + // + // Use the : (colon) character in an expression to dereference an attribute + // value. For example, suppose that you wanted to check whether the value of + // the ProductStatus attribute was one of the following: + // + // Available | Backordered | Discontinued + // + // You would first need to specify ExpressionAttributeValues as follows: + // + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} + // } + // + // You could then use these values in an expression, such as this: + // + // ProductStatus IN (:avail, :back, :disc) + // + // For more information on expression attribute values, see Specifying Conditions + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeValues map[string]*AttributeValue `type:"map"` + + // A string that contains conditions that DynamoDB applies after the Query operation, + // but before the data is returned to you. Items that do not satisfy the FilterExpression + // criteria are not returned. + // + // A FilterExpression does not allow key attributes. You cannot define a filter + // expression based on a partition key or a sort key. + // + // A FilterExpression is applied after the items have already been read; the + // process of filtering does not consume any additional read capacity units. + // + // For more information, see Filter Expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#Query.FilterExpression) + // in the Amazon DynamoDB Developer Guide. + FilterExpression *string `type:"string"` + + // The name of an index to query. This index can be any local secondary index + // or global secondary index on the table. Note that if you use the IndexName + // parameter, you must also provide TableName. + IndexName *string `min:"3" type:"string"` + + // The condition that specifies the key values for items to be retrieved by + // the Query action. + // + // The condition must perform an equality test on a single partition key value. + // + // The condition can optionally perform one of several comparison tests on a + // single sort key value. This allows Query to retrieve one item with a given + // partition key value and sort key value, or several items that have the same + // partition key value but different sort key values. + // + // The partition key equality test is required, and must be specified in the + // following format: + // + // partitionKeyName = :partitionkeyval + // + // If you also want to provide a condition for the sort key, it must be combined + // using AND with the condition for the sort key. Following is an example, using + // the = comparison operator for the sort key: + // + // partitionKeyName = :partitionkeyval AND sortKeyName = :sortkeyval + // + // Valid comparisons for the sort key condition are as follows: + // + // * sortKeyName = :sortkeyval - true if the sort key value is equal to :sortkeyval. + // + // * sortKeyName < :sortkeyval - true if the sort key value is less than + // :sortkeyval. + // + // * sortKeyName <= :sortkeyval - true if the sort key value is less than + // or equal to :sortkeyval. + // + // * sortKeyName > :sortkeyval - true if the sort key value is greater than + // :sortkeyval. + // + // * sortKeyName >= :sortkeyval - true if the sort key value is greater than + // or equal to :sortkeyval. + // + // * sortKeyName BETWEEN :sortkeyval1 AND :sortkeyval2 - true if the sort + // key value is greater than or equal to :sortkeyval1, and less than or equal + // to :sortkeyval2. + // + // * begins_with ( sortKeyName, :sortkeyval ) - true if the sort key value + // begins with a particular operand. (You cannot use this function with a + // sort key that is of type Number.) Note that the function name begins_with + // is case-sensitive. + // + // Use the ExpressionAttributeValues parameter to replace tokens such as :partitionval + // and :sortval with actual values at runtime. + // + // You can optionally use the ExpressionAttributeNames parameter to replace + // the names of the partition key and sort key with placeholder tokens. This + // option might be necessary if an attribute name conflicts with a DynamoDB + // reserved word. For example, the following KeyConditionExpression parameter + // causes an error because Size is a reserved word: + // + // * Size = :myval + // + // To work around this, define a placeholder (such a #S) to represent the attribute + // name Size. KeyConditionExpression then is as follows: + // + // * #S = :myval + // + // For a list of reserved words, see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide. + // + // For more information on ExpressionAttributeNames and ExpressionAttributeValues, + // see Using Placeholders for Attribute Names and Values (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ExpressionPlaceholders.html) + // in the Amazon DynamoDB Developer Guide. + KeyConditionExpression *string `type:"string"` + + // This is a legacy parameter. Use KeyConditionExpression instead. For more + // information, see KeyConditions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.KeyConditions.html) + // in the Amazon DynamoDB Developer Guide. + KeyConditions map[string]*Condition `type:"map"` + + // The maximum number of items to evaluate (not necessarily the number of matching + // items). If DynamoDB processes the number of items up to the limit while processing + // the results, it stops the operation and returns the matching values up to + // that point, and a key in LastEvaluatedKey to apply in a subsequent operation, + // so that you can pick up where you left off. Also, if the processed dataset + // size exceeds 1 MB before DynamoDB reaches this limit, it stops the operation + // and returns the matching values up to the limit, and a key in LastEvaluatedKey + // to apply in a subsequent operation to continue the operation. For more information, + // see Query and Scan (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html) + // in the Amazon DynamoDB Developer Guide. + Limit *int64 `min:"1" type:"integer"` + + // A string that identifies one or more attributes to retrieve from the table. + // These attributes can include scalars, sets, or elements of a JSON document. + // The attributes in the expression must be separated by commas. + // + // If no attribute names are specified, then all attributes will be returned. + // If any of the requested attributes are not found, they will not appear in + // the result. + // + // For more information, see Accessing Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ProjectionExpression *string `type:"string"` + + // This is a legacy parameter. Use FilterExpression instead. For more information, + // see QueryFilter (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.QueryFilter.html) + // in the Amazon DynamoDB Developer Guide. + QueryFilter map[string]*Condition `type:"map"` + + // Determines the level of detail about either provisioned or on-demand throughput + // consumption that is returned in the response: + // + // * INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary + // index that was accessed. Note that some operations, such as GetItem and + // BatchGetItem, do not access any indexes at all. In these cases, specifying + // INDEXES will only return ConsumedCapacity information for table(s). + // + // * TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. + // + // * NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // Specifies the order for index traversal: If true (default), the traversal + // is performed in ascending order; if false, the traversal is performed in + // descending order. + // + // Items with the same partition key value are stored in sorted order by sort + // key. If the sort key data type is Number, the results are stored in numeric + // order. For type String, the results are stored in order of UTF-8 bytes. For + // type Binary, DynamoDB treats each byte of the binary data as unsigned. + // + // If ScanIndexForward is true, DynamoDB returns the results in the order in + // which they are stored (by sort key value). This is the default behavior. + // If ScanIndexForward is false, DynamoDB reads the results in reverse order + // by sort key value, and then returns the results to the client. + ScanIndexForward *bool `type:"boolean"` + + // The attributes to be returned in the result. You can retrieve all item attributes, + // specific item attributes, the count of matching items, or in the case of + // an index, some or all of the attributes projected into the index. + // + // * ALL_ATTRIBUTES - Returns all of the item attributes from the specified + // table or index. If you query a local secondary index, then for each matching + // item in the index, DynamoDB fetches the entire item from the parent table. + // If the index is configured to project all item attributes, then all of + // the data can be obtained from the local secondary index, and no fetching + // is required. + // + // * ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index. Retrieves + // all attributes that have been projected into the index. If the index is + // configured to project all attributes, this return value is equivalent + // to specifying ALL_ATTRIBUTES. + // + // * COUNT - Returns the number of matching items, rather than the matching + // items themselves. Note that this uses the same quantity of read capacity + // units as getting the items, and is subject to the same item size calculations. + // + // * SPECIFIC_ATTRIBUTES - Returns only the attributes listed in ProjectionExpression. + // This return value is equivalent to specifying ProjectionExpression without + // specifying any value for Select. If you query or scan a local secondary + // index and request only attributes that are projected into that index, + // the operation will read only the index and not the table. If any of the + // requested attributes are not projected into the local secondary index, + // DynamoDB fetches each of these attributes from the parent table. This + // extra fetching incurs additional throughput cost and latency. If you query + // or scan a global secondary index, you can only request attributes that + // are projected into the index. Global secondary index queries cannot fetch + // attributes from the parent table. + // + // If neither Select nor ProjectionExpression are specified, DynamoDB defaults + // to ALL_ATTRIBUTES when accessing a table, and ALL_PROJECTED_ATTRIBUTES when + // accessing an index. You cannot use both Select and ProjectionExpression together + // in a single request, unless the value for Select is SPECIFIC_ATTRIBUTES. + // (This usage is equivalent to specifying ProjectionExpression without any + // value for Select.) + // + // If you use the ProjectionExpression parameter, then the value for Select + // can only be SPECIFIC_ATTRIBUTES. Any other value for Select will return an + // error. + Select *string `type:"string" enum:"Select"` + + // The name of the table containing the requested items. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s QueryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s QueryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *QueryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "QueryInput"} + if s.AttributesToGet != nil && len(s.AttributesToGet) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AttributesToGet", 1)) + } + if s.IndexName != nil && len(*s.IndexName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + if s.KeyConditions != nil { + for i, v := range s.KeyConditions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "KeyConditions", i), err.(request.ErrInvalidParams)) + } + } + } + if s.QueryFilter != nil { + for i, v := range s.QueryFilter { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "QueryFilter", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributesToGet sets the AttributesToGet field's value. +func (s *QueryInput) SetAttributesToGet(v []*string) *QueryInput { + s.AttributesToGet = v + return s +} + +// SetConditionalOperator sets the ConditionalOperator field's value. +func (s *QueryInput) SetConditionalOperator(v string) *QueryInput { + s.ConditionalOperator = &v + return s +} + +// SetConsistentRead sets the ConsistentRead field's value. +func (s *QueryInput) SetConsistentRead(v bool) *QueryInput { + s.ConsistentRead = &v + return s +} + +// SetExclusiveStartKey sets the ExclusiveStartKey field's value. +func (s *QueryInput) SetExclusiveStartKey(v map[string]*AttributeValue) *QueryInput { + s.ExclusiveStartKey = v + return s +} + +// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. +func (s *QueryInput) SetExpressionAttributeNames(v map[string]*string) *QueryInput { + s.ExpressionAttributeNames = v + return s +} + +// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value. +func (s *QueryInput) SetExpressionAttributeValues(v map[string]*AttributeValue) *QueryInput { + s.ExpressionAttributeValues = v + return s +} + +// SetFilterExpression sets the FilterExpression field's value. +func (s *QueryInput) SetFilterExpression(v string) *QueryInput { + s.FilterExpression = &v + return s +} + +// SetIndexName sets the IndexName field's value. +func (s *QueryInput) SetIndexName(v string) *QueryInput { + s.IndexName = &v + return s +} + +// SetKeyConditionExpression sets the KeyConditionExpression field's value. +func (s *QueryInput) SetKeyConditionExpression(v string) *QueryInput { + s.KeyConditionExpression = &v + return s +} + +// SetKeyConditions sets the KeyConditions field's value. +func (s *QueryInput) SetKeyConditions(v map[string]*Condition) *QueryInput { + s.KeyConditions = v + return s +} + +// SetLimit sets the Limit field's value. +func (s *QueryInput) SetLimit(v int64) *QueryInput { + s.Limit = &v + return s +} + +// SetProjectionExpression sets the ProjectionExpression field's value. +func (s *QueryInput) SetProjectionExpression(v string) *QueryInput { + s.ProjectionExpression = &v + return s +} + +// SetQueryFilter sets the QueryFilter field's value. +func (s *QueryInput) SetQueryFilter(v map[string]*Condition) *QueryInput { + s.QueryFilter = v + return s +} + +// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. +func (s *QueryInput) SetReturnConsumedCapacity(v string) *QueryInput { + s.ReturnConsumedCapacity = &v + return s +} + +// SetScanIndexForward sets the ScanIndexForward field's value. +func (s *QueryInput) SetScanIndexForward(v bool) *QueryInput { + s.ScanIndexForward = &v + return s +} + +// SetSelect sets the Select field's value. +func (s *QueryInput) SetSelect(v string) *QueryInput { + s.Select = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *QueryInput) SetTableName(v string) *QueryInput { + s.TableName = &v + return s +} + +// Represents the output of a Query operation. +type QueryOutput struct { + _ struct{} `type:"structure"` + + // The capacity units consumed by the Query operation. The data returned includes + // the total provisioned throughput consumed, along with statistics for the + // table and any indexes involved in the operation. ConsumedCapacity is only + // returned if the ReturnConsumedCapacity parameter was specified. For more + // information, see Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // in the Amazon DynamoDB Developer Guide. + ConsumedCapacity *ConsumedCapacity `type:"structure"` + + // The number of items in the response. + // + // If you used a QueryFilter in the request, then Count is the number of items + // returned after the filter was applied, and ScannedCount is the number of + // matching items before the filter was applied. + // + // If you did not use a filter in the request, then Count and ScannedCount are + // the same. + Count *int64 `type:"integer"` + + // An array of item attributes that match the query criteria. Each element in + // this array consists of an attribute name and the value for that attribute. + Items []map[string]*AttributeValue `type:"list"` + + // The primary key of the item where the operation stopped, inclusive of the + // previous result set. Use this value to start a new operation, excluding this + // value in the new request. + // + // If LastEvaluatedKey is empty, then the "last page" of results has been processed + // and there is no more data to be retrieved. + // + // If LastEvaluatedKey is not empty, it does not necessarily mean that there + // is more data in the result set. The only way to know when you have reached + // the end of the result set is when LastEvaluatedKey is empty. + LastEvaluatedKey map[string]*AttributeValue `type:"map"` + + // The number of items evaluated, before any QueryFilter is applied. A high + // ScannedCount value with few, or no, Count results indicates an inefficient + // Query operation. For more information, see Count and ScannedCount (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#Count) + // in the Amazon DynamoDB Developer Guide. + // + // If you did not use a filter in the request, then ScannedCount is the same + // as Count. + ScannedCount *int64 `type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s QueryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s QueryOutput) GoString() string { + return s.String() +} + +// SetConsumedCapacity sets the ConsumedCapacity field's value. +func (s *QueryOutput) SetConsumedCapacity(v *ConsumedCapacity) *QueryOutput { + s.ConsumedCapacity = v + return s +} + +// SetCount sets the Count field's value. +func (s *QueryOutput) SetCount(v int64) *QueryOutput { + s.Count = &v + return s +} + +// SetItems sets the Items field's value. +func (s *QueryOutput) SetItems(v []map[string]*AttributeValue) *QueryOutput { + s.Items = v + return s +} + +// SetLastEvaluatedKey sets the LastEvaluatedKey field's value. +func (s *QueryOutput) SetLastEvaluatedKey(v map[string]*AttributeValue) *QueryOutput { + s.LastEvaluatedKey = v + return s +} + +// SetScannedCount sets the ScannedCount field's value. +func (s *QueryOutput) SetScannedCount(v int64) *QueryOutput { + s.ScannedCount = &v + return s +} + +// Represents the properties of a replica. +type Replica struct { + _ struct{} `type:"structure"` + + // The Region where the replica needs to be created. + RegionName *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Replica) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Replica) GoString() string { + return s.String() +} + +// SetRegionName sets the RegionName field's value. +func (s *Replica) SetRegionName(v string) *Replica { + s.RegionName = &v + return s +} + +// The specified replica is already part of the global table. +type ReplicaAlreadyExistsException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaAlreadyExistsException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaAlreadyExistsException) GoString() string { + return s.String() +} + +func newErrorReplicaAlreadyExistsException(v protocol.ResponseMetadata) error { + return &ReplicaAlreadyExistsException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ReplicaAlreadyExistsException) Code() string { + return "ReplicaAlreadyExistsException" +} + +// Message returns the exception's message. +func (s *ReplicaAlreadyExistsException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ReplicaAlreadyExistsException) OrigErr() error { + return nil +} + +func (s *ReplicaAlreadyExistsException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ReplicaAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ReplicaAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Represents the auto scaling settings of the replica. +type ReplicaAutoScalingDescription struct { + _ struct{} `type:"structure"` + + // Replica-specific global secondary index auto scaling settings. + GlobalSecondaryIndexes []*ReplicaGlobalSecondaryIndexAutoScalingDescription `type:"list"` + + // The Region where the replica exists. + RegionName *string `type:"string"` + + // Represents the auto scaling settings for a global table or global secondary + // index. + ReplicaProvisionedReadCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"` + + // Represents the auto scaling settings for a global table or global secondary + // index. + ReplicaProvisionedWriteCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"` + + // The current state of the replica: + // + // * CREATING - The replica is being created. + // + // * UPDATING - The replica is being updated. + // + // * DELETING - The replica is being deleted. + // + // * ACTIVE - The replica is ready for use. + ReplicaStatus *string `type:"string" enum:"ReplicaStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaAutoScalingDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaAutoScalingDescription) GoString() string { + return s.String() +} + +// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value. +func (s *ReplicaAutoScalingDescription) SetGlobalSecondaryIndexes(v []*ReplicaGlobalSecondaryIndexAutoScalingDescription) *ReplicaAutoScalingDescription { + s.GlobalSecondaryIndexes = v + return s +} + +// SetRegionName sets the RegionName field's value. +func (s *ReplicaAutoScalingDescription) SetRegionName(v string) *ReplicaAutoScalingDescription { + s.RegionName = &v + return s +} + +// SetReplicaProvisionedReadCapacityAutoScalingSettings sets the ReplicaProvisionedReadCapacityAutoScalingSettings field's value. +func (s *ReplicaAutoScalingDescription) SetReplicaProvisionedReadCapacityAutoScalingSettings(v *AutoScalingSettingsDescription) *ReplicaAutoScalingDescription { + s.ReplicaProvisionedReadCapacityAutoScalingSettings = v + return s +} + +// SetReplicaProvisionedWriteCapacityAutoScalingSettings sets the ReplicaProvisionedWriteCapacityAutoScalingSettings field's value. +func (s *ReplicaAutoScalingDescription) SetReplicaProvisionedWriteCapacityAutoScalingSettings(v *AutoScalingSettingsDescription) *ReplicaAutoScalingDescription { + s.ReplicaProvisionedWriteCapacityAutoScalingSettings = v + return s +} + +// SetReplicaStatus sets the ReplicaStatus field's value. +func (s *ReplicaAutoScalingDescription) SetReplicaStatus(v string) *ReplicaAutoScalingDescription { + s.ReplicaStatus = &v + return s +} + +// Represents the auto scaling settings of a replica that will be modified. +type ReplicaAutoScalingUpdate struct { + _ struct{} `type:"structure"` + + // The Region where the replica exists. + // + // RegionName is a required field + RegionName *string `type:"string" required:"true"` + + // Represents the auto scaling settings of global secondary indexes that will + // be modified. + ReplicaGlobalSecondaryIndexUpdates []*ReplicaGlobalSecondaryIndexAutoScalingUpdate `type:"list"` + + // Represents the auto scaling settings to be modified for a global table or + // global secondary index. + ReplicaProvisionedReadCapacityAutoScalingUpdate *AutoScalingSettingsUpdate `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaAutoScalingUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaAutoScalingUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicaAutoScalingUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicaAutoScalingUpdate"} + if s.RegionName == nil { + invalidParams.Add(request.NewErrParamRequired("RegionName")) + } + if s.ReplicaGlobalSecondaryIndexUpdates != nil { + for i, v := range s.ReplicaGlobalSecondaryIndexUpdates { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ReplicaGlobalSecondaryIndexUpdates", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ReplicaProvisionedReadCapacityAutoScalingUpdate != nil { + if err := s.ReplicaProvisionedReadCapacityAutoScalingUpdate.Validate(); err != nil { + invalidParams.AddNested("ReplicaProvisionedReadCapacityAutoScalingUpdate", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRegionName sets the RegionName field's value. +func (s *ReplicaAutoScalingUpdate) SetRegionName(v string) *ReplicaAutoScalingUpdate { + s.RegionName = &v + return s +} + +// SetReplicaGlobalSecondaryIndexUpdates sets the ReplicaGlobalSecondaryIndexUpdates field's value. +func (s *ReplicaAutoScalingUpdate) SetReplicaGlobalSecondaryIndexUpdates(v []*ReplicaGlobalSecondaryIndexAutoScalingUpdate) *ReplicaAutoScalingUpdate { + s.ReplicaGlobalSecondaryIndexUpdates = v + return s +} + +// SetReplicaProvisionedReadCapacityAutoScalingUpdate sets the ReplicaProvisionedReadCapacityAutoScalingUpdate field's value. +func (s *ReplicaAutoScalingUpdate) SetReplicaProvisionedReadCapacityAutoScalingUpdate(v *AutoScalingSettingsUpdate) *ReplicaAutoScalingUpdate { + s.ReplicaProvisionedReadCapacityAutoScalingUpdate = v + return s +} + +// Contains the details of the replica. +type ReplicaDescription struct { + _ struct{} `type:"structure"` + + // Replica-specific global secondary index settings. + GlobalSecondaryIndexes []*ReplicaGlobalSecondaryIndexDescription `type:"list"` + + // The KMS key of the replica that will be used for KMS encryption. + KMSMasterKeyId *string `type:"string"` + + // Replica-specific provisioned throughput. If not described, uses the source + // table's provisioned throughput settings. + ProvisionedThroughputOverride *ProvisionedThroughputOverride `type:"structure"` + + // The name of the Region. + RegionName *string `type:"string"` + + // The time at which the replica was first detected as inaccessible. To determine + // cause of inaccessibility check the ReplicaStatus property. + ReplicaInaccessibleDateTime *time.Time `type:"timestamp"` + + // The current state of the replica: + // + // * CREATING - The replica is being created. + // + // * UPDATING - The replica is being updated. + // + // * DELETING - The replica is being deleted. + // + // * ACTIVE - The replica is ready for use. + // + // * REGION_DISABLED - The replica is inaccessible because the Amazon Web + // Services Region has been disabled. If the Amazon Web Services Region remains + // inaccessible for more than 20 hours, DynamoDB will remove this replica + // from the replication group. The replica will not be deleted and replication + // will stop from and to this region. + // + // * INACCESSIBLE_ENCRYPTION_CREDENTIALS - The KMS key used to encrypt the + // table is inaccessible. If the KMS key remains inaccessible for more than + // 20 hours, DynamoDB will remove this replica from the replication group. + // The replica will not be deleted and replication will stop from and to + // this region. + ReplicaStatus *string `type:"string" enum:"ReplicaStatus"` + + // Detailed information about the replica status. + ReplicaStatusDescription *string `type:"string"` + + // Specifies the progress of a Create, Update, or Delete action on the replica + // as a percentage. + ReplicaStatusPercentProgress *string `type:"string"` + + // Contains details of the table class. + ReplicaTableClassSummary *TableClassSummary `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaDescription) GoString() string { + return s.String() +} + +// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value. +func (s *ReplicaDescription) SetGlobalSecondaryIndexes(v []*ReplicaGlobalSecondaryIndexDescription) *ReplicaDescription { + s.GlobalSecondaryIndexes = v + return s +} + +// SetKMSMasterKeyId sets the KMSMasterKeyId field's value. +func (s *ReplicaDescription) SetKMSMasterKeyId(v string) *ReplicaDescription { + s.KMSMasterKeyId = &v + return s +} + +// SetProvisionedThroughputOverride sets the ProvisionedThroughputOverride field's value. +func (s *ReplicaDescription) SetProvisionedThroughputOverride(v *ProvisionedThroughputOverride) *ReplicaDescription { + s.ProvisionedThroughputOverride = v + return s +} + +// SetRegionName sets the RegionName field's value. +func (s *ReplicaDescription) SetRegionName(v string) *ReplicaDescription { + s.RegionName = &v + return s +} + +// SetReplicaInaccessibleDateTime sets the ReplicaInaccessibleDateTime field's value. +func (s *ReplicaDescription) SetReplicaInaccessibleDateTime(v time.Time) *ReplicaDescription { + s.ReplicaInaccessibleDateTime = &v + return s +} + +// SetReplicaStatus sets the ReplicaStatus field's value. +func (s *ReplicaDescription) SetReplicaStatus(v string) *ReplicaDescription { + s.ReplicaStatus = &v + return s +} + +// SetReplicaStatusDescription sets the ReplicaStatusDescription field's value. +func (s *ReplicaDescription) SetReplicaStatusDescription(v string) *ReplicaDescription { + s.ReplicaStatusDescription = &v + return s +} + +// SetReplicaStatusPercentProgress sets the ReplicaStatusPercentProgress field's value. +func (s *ReplicaDescription) SetReplicaStatusPercentProgress(v string) *ReplicaDescription { + s.ReplicaStatusPercentProgress = &v + return s +} + +// SetReplicaTableClassSummary sets the ReplicaTableClassSummary field's value. +func (s *ReplicaDescription) SetReplicaTableClassSummary(v *TableClassSummary) *ReplicaDescription { + s.ReplicaTableClassSummary = v + return s +} + +// Represents the properties of a replica global secondary index. +type ReplicaGlobalSecondaryIndex struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index. + // + // IndexName is a required field + IndexName *string `min:"3" type:"string" required:"true"` + + // Replica table GSI-specific provisioned throughput. If not specified, uses + // the source table GSI's read capacity settings. + ProvisionedThroughputOverride *ProvisionedThroughputOverride `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaGlobalSecondaryIndex) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaGlobalSecondaryIndex) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicaGlobalSecondaryIndex) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicaGlobalSecondaryIndex"} + if s.IndexName == nil { + invalidParams.Add(request.NewErrParamRequired("IndexName")) + } + if s.IndexName != nil && len(*s.IndexName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) + } + if s.ProvisionedThroughputOverride != nil { + if err := s.ProvisionedThroughputOverride.Validate(); err != nil { + invalidParams.AddNested("ProvisionedThroughputOverride", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIndexName sets the IndexName field's value. +func (s *ReplicaGlobalSecondaryIndex) SetIndexName(v string) *ReplicaGlobalSecondaryIndex { + s.IndexName = &v + return s +} + +// SetProvisionedThroughputOverride sets the ProvisionedThroughputOverride field's value. +func (s *ReplicaGlobalSecondaryIndex) SetProvisionedThroughputOverride(v *ProvisionedThroughputOverride) *ReplicaGlobalSecondaryIndex { + s.ProvisionedThroughputOverride = v + return s +} + +// Represents the auto scaling configuration for a replica global secondary +// index. +type ReplicaGlobalSecondaryIndexAutoScalingDescription struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index. + IndexName *string `min:"3" type:"string"` + + // The current state of the replica global secondary index: + // + // * CREATING - The index is being created. + // + // * UPDATING - The table/index configuration is being updated. The table/index + // remains available for data operations when UPDATING + // + // * DELETING - The index is being deleted. + // + // * ACTIVE - The index is ready for use. + IndexStatus *string `type:"string" enum:"IndexStatus"` + + // Represents the auto scaling settings for a global table or global secondary + // index. + ProvisionedReadCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"` + + // Represents the auto scaling settings for a global table or global secondary + // index. + ProvisionedWriteCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaGlobalSecondaryIndexAutoScalingDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaGlobalSecondaryIndexAutoScalingDescription) GoString() string { + return s.String() +} + +// SetIndexName sets the IndexName field's value. +func (s *ReplicaGlobalSecondaryIndexAutoScalingDescription) SetIndexName(v string) *ReplicaGlobalSecondaryIndexAutoScalingDescription { + s.IndexName = &v + return s +} + +// SetIndexStatus sets the IndexStatus field's value. +func (s *ReplicaGlobalSecondaryIndexAutoScalingDescription) SetIndexStatus(v string) *ReplicaGlobalSecondaryIndexAutoScalingDescription { + s.IndexStatus = &v + return s +} + +// SetProvisionedReadCapacityAutoScalingSettings sets the ProvisionedReadCapacityAutoScalingSettings field's value. +func (s *ReplicaGlobalSecondaryIndexAutoScalingDescription) SetProvisionedReadCapacityAutoScalingSettings(v *AutoScalingSettingsDescription) *ReplicaGlobalSecondaryIndexAutoScalingDescription { + s.ProvisionedReadCapacityAutoScalingSettings = v + return s +} + +// SetProvisionedWriteCapacityAutoScalingSettings sets the ProvisionedWriteCapacityAutoScalingSettings field's value. +func (s *ReplicaGlobalSecondaryIndexAutoScalingDescription) SetProvisionedWriteCapacityAutoScalingSettings(v *AutoScalingSettingsDescription) *ReplicaGlobalSecondaryIndexAutoScalingDescription { + s.ProvisionedWriteCapacityAutoScalingSettings = v + return s +} + +// Represents the auto scaling settings of a global secondary index for a replica +// that will be modified. +type ReplicaGlobalSecondaryIndexAutoScalingUpdate struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index. + IndexName *string `min:"3" type:"string"` + + // Represents the auto scaling settings to be modified for a global table or + // global secondary index. + ProvisionedReadCapacityAutoScalingUpdate *AutoScalingSettingsUpdate `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaGlobalSecondaryIndexAutoScalingUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaGlobalSecondaryIndexAutoScalingUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicaGlobalSecondaryIndexAutoScalingUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicaGlobalSecondaryIndexAutoScalingUpdate"} + if s.IndexName != nil && len(*s.IndexName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) + } + if s.ProvisionedReadCapacityAutoScalingUpdate != nil { + if err := s.ProvisionedReadCapacityAutoScalingUpdate.Validate(); err != nil { + invalidParams.AddNested("ProvisionedReadCapacityAutoScalingUpdate", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIndexName sets the IndexName field's value. +func (s *ReplicaGlobalSecondaryIndexAutoScalingUpdate) SetIndexName(v string) *ReplicaGlobalSecondaryIndexAutoScalingUpdate { + s.IndexName = &v + return s +} + +// SetProvisionedReadCapacityAutoScalingUpdate sets the ProvisionedReadCapacityAutoScalingUpdate field's value. +func (s *ReplicaGlobalSecondaryIndexAutoScalingUpdate) SetProvisionedReadCapacityAutoScalingUpdate(v *AutoScalingSettingsUpdate) *ReplicaGlobalSecondaryIndexAutoScalingUpdate { + s.ProvisionedReadCapacityAutoScalingUpdate = v + return s +} + +// Represents the properties of a replica global secondary index. +type ReplicaGlobalSecondaryIndexDescription struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index. + IndexName *string `min:"3" type:"string"` + + // If not described, uses the source table GSI's read capacity settings. + ProvisionedThroughputOverride *ProvisionedThroughputOverride `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaGlobalSecondaryIndexDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaGlobalSecondaryIndexDescription) GoString() string { + return s.String() +} + +// SetIndexName sets the IndexName field's value. +func (s *ReplicaGlobalSecondaryIndexDescription) SetIndexName(v string) *ReplicaGlobalSecondaryIndexDescription { + s.IndexName = &v + return s +} + +// SetProvisionedThroughputOverride sets the ProvisionedThroughputOverride field's value. +func (s *ReplicaGlobalSecondaryIndexDescription) SetProvisionedThroughputOverride(v *ProvisionedThroughputOverride) *ReplicaGlobalSecondaryIndexDescription { + s.ProvisionedThroughputOverride = v + return s +} + +// Represents the properties of a global secondary index. +type ReplicaGlobalSecondaryIndexSettingsDescription struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index. The name must be unique among all + // other indexes on this table. + // + // IndexName is a required field + IndexName *string `min:"3" type:"string" required:"true"` + + // The current status of the global secondary index: + // + // * CREATING - The global secondary index is being created. + // + // * UPDATING - The global secondary index is being updated. + // + // * DELETING - The global secondary index is being deleted. + // + // * ACTIVE - The global secondary index is ready for use. + IndexStatus *string `type:"string" enum:"IndexStatus"` + + // Auto scaling settings for a global secondary index replica's read capacity + // units. + ProvisionedReadCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"` + + // The maximum number of strongly consistent reads consumed per second before + // DynamoDB returns a ThrottlingException. + ProvisionedReadCapacityUnits *int64 `min:"1" type:"long"` + + // Auto scaling settings for a global secondary index replica's write capacity + // units. + ProvisionedWriteCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"` + + // The maximum number of writes consumed per second before DynamoDB returns + // a ThrottlingException. + ProvisionedWriteCapacityUnits *int64 `min:"1" type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaGlobalSecondaryIndexSettingsDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaGlobalSecondaryIndexSettingsDescription) GoString() string { + return s.String() +} + +// SetIndexName sets the IndexName field's value. +func (s *ReplicaGlobalSecondaryIndexSettingsDescription) SetIndexName(v string) *ReplicaGlobalSecondaryIndexSettingsDescription { + s.IndexName = &v + return s +} + +// SetIndexStatus sets the IndexStatus field's value. +func (s *ReplicaGlobalSecondaryIndexSettingsDescription) SetIndexStatus(v string) *ReplicaGlobalSecondaryIndexSettingsDescription { + s.IndexStatus = &v + return s +} + +// SetProvisionedReadCapacityAutoScalingSettings sets the ProvisionedReadCapacityAutoScalingSettings field's value. +func (s *ReplicaGlobalSecondaryIndexSettingsDescription) SetProvisionedReadCapacityAutoScalingSettings(v *AutoScalingSettingsDescription) *ReplicaGlobalSecondaryIndexSettingsDescription { + s.ProvisionedReadCapacityAutoScalingSettings = v + return s +} + +// SetProvisionedReadCapacityUnits sets the ProvisionedReadCapacityUnits field's value. +func (s *ReplicaGlobalSecondaryIndexSettingsDescription) SetProvisionedReadCapacityUnits(v int64) *ReplicaGlobalSecondaryIndexSettingsDescription { + s.ProvisionedReadCapacityUnits = &v + return s +} + +// SetProvisionedWriteCapacityAutoScalingSettings sets the ProvisionedWriteCapacityAutoScalingSettings field's value. +func (s *ReplicaGlobalSecondaryIndexSettingsDescription) SetProvisionedWriteCapacityAutoScalingSettings(v *AutoScalingSettingsDescription) *ReplicaGlobalSecondaryIndexSettingsDescription { + s.ProvisionedWriteCapacityAutoScalingSettings = v + return s +} + +// SetProvisionedWriteCapacityUnits sets the ProvisionedWriteCapacityUnits field's value. +func (s *ReplicaGlobalSecondaryIndexSettingsDescription) SetProvisionedWriteCapacityUnits(v int64) *ReplicaGlobalSecondaryIndexSettingsDescription { + s.ProvisionedWriteCapacityUnits = &v + return s +} + +// Represents the settings of a global secondary index for a global table that +// will be modified. +type ReplicaGlobalSecondaryIndexSettingsUpdate struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index. The name must be unique among all + // other indexes on this table. + // + // IndexName is a required field + IndexName *string `min:"3" type:"string" required:"true"` + + // Auto scaling settings for managing a global secondary index replica's read + // capacity units. + ProvisionedReadCapacityAutoScalingSettingsUpdate *AutoScalingSettingsUpdate `type:"structure"` + + // The maximum number of strongly consistent reads consumed per second before + // DynamoDB returns a ThrottlingException. + ProvisionedReadCapacityUnits *int64 `min:"1" type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaGlobalSecondaryIndexSettingsUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaGlobalSecondaryIndexSettingsUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicaGlobalSecondaryIndexSettingsUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicaGlobalSecondaryIndexSettingsUpdate"} + if s.IndexName == nil { + invalidParams.Add(request.NewErrParamRequired("IndexName")) + } + if s.IndexName != nil && len(*s.IndexName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) + } + if s.ProvisionedReadCapacityUnits != nil && *s.ProvisionedReadCapacityUnits < 1 { + invalidParams.Add(request.NewErrParamMinValue("ProvisionedReadCapacityUnits", 1)) + } + if s.ProvisionedReadCapacityAutoScalingSettingsUpdate != nil { + if err := s.ProvisionedReadCapacityAutoScalingSettingsUpdate.Validate(); err != nil { + invalidParams.AddNested("ProvisionedReadCapacityAutoScalingSettingsUpdate", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIndexName sets the IndexName field's value. +func (s *ReplicaGlobalSecondaryIndexSettingsUpdate) SetIndexName(v string) *ReplicaGlobalSecondaryIndexSettingsUpdate { + s.IndexName = &v + return s +} + +// SetProvisionedReadCapacityAutoScalingSettingsUpdate sets the ProvisionedReadCapacityAutoScalingSettingsUpdate field's value. +func (s *ReplicaGlobalSecondaryIndexSettingsUpdate) SetProvisionedReadCapacityAutoScalingSettingsUpdate(v *AutoScalingSettingsUpdate) *ReplicaGlobalSecondaryIndexSettingsUpdate { + s.ProvisionedReadCapacityAutoScalingSettingsUpdate = v + return s +} + +// SetProvisionedReadCapacityUnits sets the ProvisionedReadCapacityUnits field's value. +func (s *ReplicaGlobalSecondaryIndexSettingsUpdate) SetProvisionedReadCapacityUnits(v int64) *ReplicaGlobalSecondaryIndexSettingsUpdate { + s.ProvisionedReadCapacityUnits = &v + return s +} + +// The specified replica is no longer part of the global table. +type ReplicaNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaNotFoundException) GoString() string { + return s.String() +} + +func newErrorReplicaNotFoundException(v protocol.ResponseMetadata) error { + return &ReplicaNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ReplicaNotFoundException) Code() string { + return "ReplicaNotFoundException" +} + +// Message returns the exception's message. +func (s *ReplicaNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ReplicaNotFoundException) OrigErr() error { + return nil +} + +func (s *ReplicaNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ReplicaNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ReplicaNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Represents the properties of a replica. +type ReplicaSettingsDescription struct { + _ struct{} `type:"structure"` + + // The Region name of the replica. + // + // RegionName is a required field + RegionName *string `type:"string" required:"true"` + + // The read/write capacity mode of the replica. + ReplicaBillingModeSummary *BillingModeSummary `type:"structure"` + + // Replica global secondary index settings for the global table. + ReplicaGlobalSecondaryIndexSettings []*ReplicaGlobalSecondaryIndexSettingsDescription `type:"list"` + + // Auto scaling settings for a global table replica's read capacity units. + ReplicaProvisionedReadCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"` + + // The maximum number of strongly consistent reads consumed per second before + // DynamoDB returns a ThrottlingException. For more information, see Specifying + // Read and Write Requirements (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput) + // in the Amazon DynamoDB Developer Guide. + ReplicaProvisionedReadCapacityUnits *int64 `type:"long"` + + // Auto scaling settings for a global table replica's write capacity units. + ReplicaProvisionedWriteCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"` + + // The maximum number of writes consumed per second before DynamoDB returns + // a ThrottlingException. For more information, see Specifying Read and Write + // Requirements (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput) + // in the Amazon DynamoDB Developer Guide. + ReplicaProvisionedWriteCapacityUnits *int64 `type:"long"` + + // The current state of the Region: + // + // * CREATING - The Region is being created. + // + // * UPDATING - The Region is being updated. + // + // * DELETING - The Region is being deleted. + // + // * ACTIVE - The Region is ready for use. + ReplicaStatus *string `type:"string" enum:"ReplicaStatus"` + + // Contains details of the table class. + ReplicaTableClassSummary *TableClassSummary `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaSettingsDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaSettingsDescription) GoString() string { + return s.String() +} + +// SetRegionName sets the RegionName field's value. +func (s *ReplicaSettingsDescription) SetRegionName(v string) *ReplicaSettingsDescription { + s.RegionName = &v + return s +} + +// SetReplicaBillingModeSummary sets the ReplicaBillingModeSummary field's value. +func (s *ReplicaSettingsDescription) SetReplicaBillingModeSummary(v *BillingModeSummary) *ReplicaSettingsDescription { + s.ReplicaBillingModeSummary = v + return s +} + +// SetReplicaGlobalSecondaryIndexSettings sets the ReplicaGlobalSecondaryIndexSettings field's value. +func (s *ReplicaSettingsDescription) SetReplicaGlobalSecondaryIndexSettings(v []*ReplicaGlobalSecondaryIndexSettingsDescription) *ReplicaSettingsDescription { + s.ReplicaGlobalSecondaryIndexSettings = v + return s +} + +// SetReplicaProvisionedReadCapacityAutoScalingSettings sets the ReplicaProvisionedReadCapacityAutoScalingSettings field's value. +func (s *ReplicaSettingsDescription) SetReplicaProvisionedReadCapacityAutoScalingSettings(v *AutoScalingSettingsDescription) *ReplicaSettingsDescription { + s.ReplicaProvisionedReadCapacityAutoScalingSettings = v + return s +} + +// SetReplicaProvisionedReadCapacityUnits sets the ReplicaProvisionedReadCapacityUnits field's value. +func (s *ReplicaSettingsDescription) SetReplicaProvisionedReadCapacityUnits(v int64) *ReplicaSettingsDescription { + s.ReplicaProvisionedReadCapacityUnits = &v + return s +} + +// SetReplicaProvisionedWriteCapacityAutoScalingSettings sets the ReplicaProvisionedWriteCapacityAutoScalingSettings field's value. +func (s *ReplicaSettingsDescription) SetReplicaProvisionedWriteCapacityAutoScalingSettings(v *AutoScalingSettingsDescription) *ReplicaSettingsDescription { + s.ReplicaProvisionedWriteCapacityAutoScalingSettings = v + return s +} + +// SetReplicaProvisionedWriteCapacityUnits sets the ReplicaProvisionedWriteCapacityUnits field's value. +func (s *ReplicaSettingsDescription) SetReplicaProvisionedWriteCapacityUnits(v int64) *ReplicaSettingsDescription { + s.ReplicaProvisionedWriteCapacityUnits = &v + return s +} + +// SetReplicaStatus sets the ReplicaStatus field's value. +func (s *ReplicaSettingsDescription) SetReplicaStatus(v string) *ReplicaSettingsDescription { + s.ReplicaStatus = &v + return s +} + +// SetReplicaTableClassSummary sets the ReplicaTableClassSummary field's value. +func (s *ReplicaSettingsDescription) SetReplicaTableClassSummary(v *TableClassSummary) *ReplicaSettingsDescription { + s.ReplicaTableClassSummary = v + return s +} + +// Represents the settings for a global table in a Region that will be modified. +type ReplicaSettingsUpdate struct { + _ struct{} `type:"structure"` + + // The Region of the replica to be added. + // + // RegionName is a required field + RegionName *string `type:"string" required:"true"` + + // Represents the settings of a global secondary index for a global table that + // will be modified. + ReplicaGlobalSecondaryIndexSettingsUpdate []*ReplicaGlobalSecondaryIndexSettingsUpdate `min:"1" type:"list"` + + // Auto scaling settings for managing a global table replica's read capacity + // units. + ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate *AutoScalingSettingsUpdate `type:"structure"` + + // The maximum number of strongly consistent reads consumed per second before + // DynamoDB returns a ThrottlingException. For more information, see Specifying + // Read and Write Requirements (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput) + // in the Amazon DynamoDB Developer Guide. + ReplicaProvisionedReadCapacityUnits *int64 `min:"1" type:"long"` + + // Replica-specific table class. If not specified, uses the source table's table + // class. + ReplicaTableClass *string `type:"string" enum:"TableClass"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaSettingsUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaSettingsUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicaSettingsUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicaSettingsUpdate"} + if s.RegionName == nil { + invalidParams.Add(request.NewErrParamRequired("RegionName")) + } + if s.ReplicaGlobalSecondaryIndexSettingsUpdate != nil && len(s.ReplicaGlobalSecondaryIndexSettingsUpdate) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ReplicaGlobalSecondaryIndexSettingsUpdate", 1)) + } + if s.ReplicaProvisionedReadCapacityUnits != nil && *s.ReplicaProvisionedReadCapacityUnits < 1 { + invalidParams.Add(request.NewErrParamMinValue("ReplicaProvisionedReadCapacityUnits", 1)) + } + if s.ReplicaGlobalSecondaryIndexSettingsUpdate != nil { + for i, v := range s.ReplicaGlobalSecondaryIndexSettingsUpdate { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ReplicaGlobalSecondaryIndexSettingsUpdate", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate != nil { + if err := s.ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate.Validate(); err != nil { + invalidParams.AddNested("ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRegionName sets the RegionName field's value. +func (s *ReplicaSettingsUpdate) SetRegionName(v string) *ReplicaSettingsUpdate { + s.RegionName = &v + return s +} + +// SetReplicaGlobalSecondaryIndexSettingsUpdate sets the ReplicaGlobalSecondaryIndexSettingsUpdate field's value. +func (s *ReplicaSettingsUpdate) SetReplicaGlobalSecondaryIndexSettingsUpdate(v []*ReplicaGlobalSecondaryIndexSettingsUpdate) *ReplicaSettingsUpdate { + s.ReplicaGlobalSecondaryIndexSettingsUpdate = v + return s +} + +// SetReplicaProvisionedReadCapacityAutoScalingSettingsUpdate sets the ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate field's value. +func (s *ReplicaSettingsUpdate) SetReplicaProvisionedReadCapacityAutoScalingSettingsUpdate(v *AutoScalingSettingsUpdate) *ReplicaSettingsUpdate { + s.ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate = v + return s +} + +// SetReplicaProvisionedReadCapacityUnits sets the ReplicaProvisionedReadCapacityUnits field's value. +func (s *ReplicaSettingsUpdate) SetReplicaProvisionedReadCapacityUnits(v int64) *ReplicaSettingsUpdate { + s.ReplicaProvisionedReadCapacityUnits = &v + return s +} + +// SetReplicaTableClass sets the ReplicaTableClass field's value. +func (s *ReplicaSettingsUpdate) SetReplicaTableClass(v string) *ReplicaSettingsUpdate { + s.ReplicaTableClass = &v + return s +} + +// Represents one of the following: +// +// - A new replica to be added to an existing global table. +// +// - New parameters for an existing replica. +// +// - An existing replica to be removed from an existing global table. +type ReplicaUpdate struct { + _ struct{} `type:"structure"` + + // The parameters required for creating a replica on an existing global table. + Create *CreateReplicaAction `type:"structure"` + + // The name of the existing replica to be removed. + Delete *DeleteReplicaAction `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicaUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicaUpdate"} + if s.Create != nil { + if err := s.Create.Validate(); err != nil { + invalidParams.AddNested("Create", err.(request.ErrInvalidParams)) + } + } + if s.Delete != nil { + if err := s.Delete.Validate(); err != nil { + invalidParams.AddNested("Delete", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCreate sets the Create field's value. +func (s *ReplicaUpdate) SetCreate(v *CreateReplicaAction) *ReplicaUpdate { + s.Create = v + return s +} + +// SetDelete sets the Delete field's value. +func (s *ReplicaUpdate) SetDelete(v *DeleteReplicaAction) *ReplicaUpdate { + s.Delete = v + return s +} + +// Represents one of the following: +// +// - A new replica to be added to an existing regional table or global table. +// This request invokes the CreateTableReplica action in the destination +// Region. +// +// - New parameters for an existing replica. This request invokes the UpdateTable +// action in the destination Region. +// +// - An existing replica to be deleted. The request invokes the DeleteTableReplica +// action in the destination Region, deleting the replica and all if its +// items in the destination Region. +// +// When you manually remove a table or global table replica, you do not automatically +// remove any associated scalable targets, scaling policies, or CloudWatch alarms. +type ReplicationGroupUpdate struct { + _ struct{} `type:"structure"` + + // The parameters required for creating a replica for the table. + Create *CreateReplicationGroupMemberAction `type:"structure"` + + // The parameters required for deleting a replica for the table. + Delete *DeleteReplicationGroupMemberAction `type:"structure"` + + // The parameters required for updating a replica for the table. + Update *UpdateReplicationGroupMemberAction `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicationGroupUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicationGroupUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationGroupUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationGroupUpdate"} + if s.Create != nil { + if err := s.Create.Validate(); err != nil { + invalidParams.AddNested("Create", err.(request.ErrInvalidParams)) + } + } + if s.Delete != nil { + if err := s.Delete.Validate(); err != nil { + invalidParams.AddNested("Delete", err.(request.ErrInvalidParams)) + } + } + if s.Update != nil { + if err := s.Update.Validate(); err != nil { + invalidParams.AddNested("Update", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCreate sets the Create field's value. +func (s *ReplicationGroupUpdate) SetCreate(v *CreateReplicationGroupMemberAction) *ReplicationGroupUpdate { + s.Create = v + return s +} + +// SetDelete sets the Delete field's value. +func (s *ReplicationGroupUpdate) SetDelete(v *DeleteReplicationGroupMemberAction) *ReplicationGroupUpdate { + s.Delete = v + return s +} + +// SetUpdate sets the Update field's value. +func (s *ReplicationGroupUpdate) SetUpdate(v *UpdateReplicationGroupMemberAction) *ReplicationGroupUpdate { + s.Update = v + return s +} + +// Throughput exceeds the current throughput quota for your account. Please +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request +// a quota increase. +type RequestLimitExceeded struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RequestLimitExceeded) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RequestLimitExceeded) GoString() string { + return s.String() +} + +func newErrorRequestLimitExceeded(v protocol.ResponseMetadata) error { + return &RequestLimitExceeded{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *RequestLimitExceeded) Code() string { + return "RequestLimitExceeded" +} + +// Message returns the exception's message. +func (s *RequestLimitExceeded) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *RequestLimitExceeded) OrigErr() error { + return nil +} + +func (s *RequestLimitExceeded) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *RequestLimitExceeded) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *RequestLimitExceeded) RequestID() string { + return s.RespMetadata.RequestID +} + +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. +type ResourceInUseException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // The resource which is being attempted to be changed is in use. + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ResourceInUseException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ResourceInUseException) GoString() string { + return s.String() +} + +func newErrorResourceInUseException(v protocol.ResponseMetadata) error { + return &ResourceInUseException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ResourceInUseException) Code() string { + return "ResourceInUseException" +} + +// Message returns the exception's message. +func (s *ResourceInUseException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ResourceInUseException) OrigErr() error { + return nil +} + +func (s *ResourceInUseException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ResourceInUseException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ResourceInUseException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +type ResourceNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // The resource which is being requested does not exist. + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ResourceNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ResourceNotFoundException) GoString() string { + return s.String() +} + +func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { + return &ResourceNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ResourceNotFoundException) Code() string { + return "ResourceNotFoundException" +} + +// Message returns the exception's message. +func (s *ResourceNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ResourceNotFoundException) OrigErr() error { + return nil +} + +func (s *ResourceNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Contains details for the restore. +type RestoreSummary struct { + _ struct{} `type:"structure"` + + // Point in time or source backup time. + // + // RestoreDateTime is a required field + RestoreDateTime *time.Time `type:"timestamp" required:"true"` + + // Indicates if a restore is in progress or not. + // + // RestoreInProgress is a required field + RestoreInProgress *bool `type:"boolean" required:"true"` + + // The Amazon Resource Name (ARN) of the backup from which the table was restored. + SourceBackupArn *string `min:"37" type:"string"` + + // The ARN of the source table of the backup that is being restored. + SourceTableArn *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RestoreSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RestoreSummary) GoString() string { + return s.String() +} + +// SetRestoreDateTime sets the RestoreDateTime field's value. +func (s *RestoreSummary) SetRestoreDateTime(v time.Time) *RestoreSummary { + s.RestoreDateTime = &v + return s +} + +// SetRestoreInProgress sets the RestoreInProgress field's value. +func (s *RestoreSummary) SetRestoreInProgress(v bool) *RestoreSummary { + s.RestoreInProgress = &v + return s +} + +// SetSourceBackupArn sets the SourceBackupArn field's value. +func (s *RestoreSummary) SetSourceBackupArn(v string) *RestoreSummary { + s.SourceBackupArn = &v + return s +} + +// SetSourceTableArn sets the SourceTableArn field's value. +func (s *RestoreSummary) SetSourceTableArn(v string) *RestoreSummary { + s.SourceTableArn = &v + return s +} + +type RestoreTableFromBackupInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) associated with the backup. + // + // BackupArn is a required field + BackupArn *string `min:"37" type:"string" required:"true"` + + // The billing mode of the restored table. + BillingModeOverride *string `type:"string" enum:"BillingMode"` + + // List of global secondary indexes for the restored table. The indexes provided + // should match existing secondary indexes. You can choose to exclude some or + // all of the indexes at the time of restore. + GlobalSecondaryIndexOverride []*GlobalSecondaryIndex `type:"list"` + + // List of local secondary indexes for the restored table. The indexes provided + // should match existing secondary indexes. You can choose to exclude some or + // all of the indexes at the time of restore. + LocalSecondaryIndexOverride []*LocalSecondaryIndex `type:"list"` + + // Provisioned throughput settings for the restored table. + ProvisionedThroughputOverride *ProvisionedThroughput `type:"structure"` + + // The new server-side encryption settings for the restored table. + SSESpecificationOverride *SSESpecification `type:"structure"` + + // The name of the new table to which the backup must be restored. + // + // TargetTableName is a required field + TargetTableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RestoreTableFromBackupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RestoreTableFromBackupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreTableFromBackupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreTableFromBackupInput"} + if s.BackupArn == nil { + invalidParams.Add(request.NewErrParamRequired("BackupArn")) + } + if s.BackupArn != nil && len(*s.BackupArn) < 37 { + invalidParams.Add(request.NewErrParamMinLen("BackupArn", 37)) + } + if s.TargetTableName == nil { + invalidParams.Add(request.NewErrParamRequired("TargetTableName")) + } + if s.TargetTableName != nil && len(*s.TargetTableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TargetTableName", 3)) + } + if s.GlobalSecondaryIndexOverride != nil { + for i, v := range s.GlobalSecondaryIndexOverride { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalSecondaryIndexOverride", i), err.(request.ErrInvalidParams)) + } + } + } + if s.LocalSecondaryIndexOverride != nil { + for i, v := range s.LocalSecondaryIndexOverride { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LocalSecondaryIndexOverride", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ProvisionedThroughputOverride != nil { + if err := s.ProvisionedThroughputOverride.Validate(); err != nil { + invalidParams.AddNested("ProvisionedThroughputOverride", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBackupArn sets the BackupArn field's value. +func (s *RestoreTableFromBackupInput) SetBackupArn(v string) *RestoreTableFromBackupInput { + s.BackupArn = &v + return s +} + +// SetBillingModeOverride sets the BillingModeOverride field's value. +func (s *RestoreTableFromBackupInput) SetBillingModeOverride(v string) *RestoreTableFromBackupInput { + s.BillingModeOverride = &v + return s +} + +// SetGlobalSecondaryIndexOverride sets the GlobalSecondaryIndexOverride field's value. +func (s *RestoreTableFromBackupInput) SetGlobalSecondaryIndexOverride(v []*GlobalSecondaryIndex) *RestoreTableFromBackupInput { + s.GlobalSecondaryIndexOverride = v + return s +} + +// SetLocalSecondaryIndexOverride sets the LocalSecondaryIndexOverride field's value. +func (s *RestoreTableFromBackupInput) SetLocalSecondaryIndexOverride(v []*LocalSecondaryIndex) *RestoreTableFromBackupInput { + s.LocalSecondaryIndexOverride = v + return s +} + +// SetProvisionedThroughputOverride sets the ProvisionedThroughputOverride field's value. +func (s *RestoreTableFromBackupInput) SetProvisionedThroughputOverride(v *ProvisionedThroughput) *RestoreTableFromBackupInput { + s.ProvisionedThroughputOverride = v + return s +} + +// SetSSESpecificationOverride sets the SSESpecificationOverride field's value. +func (s *RestoreTableFromBackupInput) SetSSESpecificationOverride(v *SSESpecification) *RestoreTableFromBackupInput { + s.SSESpecificationOverride = v + return s +} + +// SetTargetTableName sets the TargetTableName field's value. +func (s *RestoreTableFromBackupInput) SetTargetTableName(v string) *RestoreTableFromBackupInput { + s.TargetTableName = &v + return s +} + +type RestoreTableFromBackupOutput struct { + _ struct{} `type:"structure"` + + // The description of the table created from an existing backup. + TableDescription *TableDescription `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RestoreTableFromBackupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RestoreTableFromBackupOutput) GoString() string { + return s.String() +} + +// SetTableDescription sets the TableDescription field's value. +func (s *RestoreTableFromBackupOutput) SetTableDescription(v *TableDescription) *RestoreTableFromBackupOutput { + s.TableDescription = v + return s +} + +type RestoreTableToPointInTimeInput struct { + _ struct{} `type:"structure"` + + // The billing mode of the restored table. + BillingModeOverride *string `type:"string" enum:"BillingMode"` + + // List of global secondary indexes for the restored table. The indexes provided + // should match existing secondary indexes. You can choose to exclude some or + // all of the indexes at the time of restore. + GlobalSecondaryIndexOverride []*GlobalSecondaryIndex `type:"list"` + + // List of local secondary indexes for the restored table. The indexes provided + // should match existing secondary indexes. You can choose to exclude some or + // all of the indexes at the time of restore. + LocalSecondaryIndexOverride []*LocalSecondaryIndex `type:"list"` + + // Provisioned throughput settings for the restored table. + ProvisionedThroughputOverride *ProvisionedThroughput `type:"structure"` + + // Time in the past to restore the table to. + RestoreDateTime *time.Time `type:"timestamp"` + + // The new server-side encryption settings for the restored table. + SSESpecificationOverride *SSESpecification `type:"structure"` + + // The DynamoDB table that will be restored. This value is an Amazon Resource + // Name (ARN). + SourceTableArn *string `type:"string"` + + // Name of the source table that is being restored. + SourceTableName *string `min:"3" type:"string"` + + // The name of the new table to which it must be restored to. + // + // TargetTableName is a required field + TargetTableName *string `min:"3" type:"string" required:"true"` + + // Restore the table to the latest possible time. LatestRestorableDateTime is + // typically 5 minutes before the current time. + UseLatestRestorableTime *bool `type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RestoreTableToPointInTimeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RestoreTableToPointInTimeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreTableToPointInTimeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreTableToPointInTimeInput"} + if s.SourceTableName != nil && len(*s.SourceTableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("SourceTableName", 3)) + } + if s.TargetTableName == nil { + invalidParams.Add(request.NewErrParamRequired("TargetTableName")) + } + if s.TargetTableName != nil && len(*s.TargetTableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TargetTableName", 3)) + } + if s.GlobalSecondaryIndexOverride != nil { + for i, v := range s.GlobalSecondaryIndexOverride { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalSecondaryIndexOverride", i), err.(request.ErrInvalidParams)) + } + } + } + if s.LocalSecondaryIndexOverride != nil { + for i, v := range s.LocalSecondaryIndexOverride { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LocalSecondaryIndexOverride", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ProvisionedThroughputOverride != nil { + if err := s.ProvisionedThroughputOverride.Validate(); err != nil { + invalidParams.AddNested("ProvisionedThroughputOverride", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBillingModeOverride sets the BillingModeOverride field's value. +func (s *RestoreTableToPointInTimeInput) SetBillingModeOverride(v string) *RestoreTableToPointInTimeInput { + s.BillingModeOverride = &v + return s +} + +// SetGlobalSecondaryIndexOverride sets the GlobalSecondaryIndexOverride field's value. +func (s *RestoreTableToPointInTimeInput) SetGlobalSecondaryIndexOverride(v []*GlobalSecondaryIndex) *RestoreTableToPointInTimeInput { + s.GlobalSecondaryIndexOverride = v + return s +} + +// SetLocalSecondaryIndexOverride sets the LocalSecondaryIndexOverride field's value. +func (s *RestoreTableToPointInTimeInput) SetLocalSecondaryIndexOverride(v []*LocalSecondaryIndex) *RestoreTableToPointInTimeInput { + s.LocalSecondaryIndexOverride = v + return s +} + +// SetProvisionedThroughputOverride sets the ProvisionedThroughputOverride field's value. +func (s *RestoreTableToPointInTimeInput) SetProvisionedThroughputOverride(v *ProvisionedThroughput) *RestoreTableToPointInTimeInput { + s.ProvisionedThroughputOverride = v + return s +} + +// SetRestoreDateTime sets the RestoreDateTime field's value. +func (s *RestoreTableToPointInTimeInput) SetRestoreDateTime(v time.Time) *RestoreTableToPointInTimeInput { + s.RestoreDateTime = &v + return s +} + +// SetSSESpecificationOverride sets the SSESpecificationOverride field's value. +func (s *RestoreTableToPointInTimeInput) SetSSESpecificationOverride(v *SSESpecification) *RestoreTableToPointInTimeInput { + s.SSESpecificationOverride = v + return s +} + +// SetSourceTableArn sets the SourceTableArn field's value. +func (s *RestoreTableToPointInTimeInput) SetSourceTableArn(v string) *RestoreTableToPointInTimeInput { + s.SourceTableArn = &v + return s +} + +// SetSourceTableName sets the SourceTableName field's value. +func (s *RestoreTableToPointInTimeInput) SetSourceTableName(v string) *RestoreTableToPointInTimeInput { + s.SourceTableName = &v + return s +} + +// SetTargetTableName sets the TargetTableName field's value. +func (s *RestoreTableToPointInTimeInput) SetTargetTableName(v string) *RestoreTableToPointInTimeInput { + s.TargetTableName = &v + return s +} + +// SetUseLatestRestorableTime sets the UseLatestRestorableTime field's value. +func (s *RestoreTableToPointInTimeInput) SetUseLatestRestorableTime(v bool) *RestoreTableToPointInTimeInput { + s.UseLatestRestorableTime = &v + return s +} + +type RestoreTableToPointInTimeOutput struct { + _ struct{} `type:"structure"` + + // Represents the properties of a table. + TableDescription *TableDescription `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RestoreTableToPointInTimeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RestoreTableToPointInTimeOutput) GoString() string { + return s.String() +} + +// SetTableDescription sets the TableDescription field's value. +func (s *RestoreTableToPointInTimeOutput) SetTableDescription(v *TableDescription) *RestoreTableToPointInTimeOutput { + s.TableDescription = v + return s +} + +// The S3 bucket that is being imported from. +type S3BucketSource struct { + _ struct{} `type:"structure"` + + // The S3 bucket that is being imported from. + // + // S3Bucket is a required field + S3Bucket *string `type:"string" required:"true"` + + // The account number of the S3 bucket that is being imported from. If the bucket + // is owned by the requester this is optional. + S3BucketOwner *string `type:"string"` + + // The key prefix shared by all S3 Objects that are being imported. + S3KeyPrefix *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s S3BucketSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s S3BucketSource) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *S3BucketSource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "S3BucketSource"} + if s.S3Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("S3Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3Bucket sets the S3Bucket field's value. +func (s *S3BucketSource) SetS3Bucket(v string) *S3BucketSource { + s.S3Bucket = &v + return s +} + +// SetS3BucketOwner sets the S3BucketOwner field's value. +func (s *S3BucketSource) SetS3BucketOwner(v string) *S3BucketSource { + s.S3BucketOwner = &v + return s +} + +// SetS3KeyPrefix sets the S3KeyPrefix field's value. +func (s *S3BucketSource) SetS3KeyPrefix(v string) *S3BucketSource { + s.S3KeyPrefix = &v + return s +} + +// The description of the server-side encryption status on the specified table. +type SSEDescription struct { + _ struct{} `type:"structure"` + + // Indicates the time, in UNIX epoch date format, when DynamoDB detected that + // the table's KMS key was inaccessible. This attribute will automatically be + // cleared when DynamoDB detects that the table's KMS key is accessible again. + // DynamoDB will initiate the table archival process when table's KMS key remains + // inaccessible for more than seven days from this date. + InaccessibleEncryptionDateTime *time.Time `type:"timestamp"` + + // The KMS key ARN used for the KMS encryption. + KMSMasterKeyArn *string `type:"string"` + + // Server-side encryption type. The only supported value is: + // + // * KMS - Server-side encryption that uses Key Management Service. The key + // is stored in your account and is managed by KMS (KMS charges apply). + SSEType *string `type:"string" enum:"SSEType"` + + // Represents the current state of server-side encryption. The only supported + // values are: + // + // * ENABLED - Server-side encryption is enabled. + // + // * UPDATING - Server-side encryption is being updated. + Status *string `type:"string" enum:"SSEStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SSEDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SSEDescription) GoString() string { + return s.String() +} + +// SetInaccessibleEncryptionDateTime sets the InaccessibleEncryptionDateTime field's value. +func (s *SSEDescription) SetInaccessibleEncryptionDateTime(v time.Time) *SSEDescription { + s.InaccessibleEncryptionDateTime = &v + return s +} + +// SetKMSMasterKeyArn sets the KMSMasterKeyArn field's value. +func (s *SSEDescription) SetKMSMasterKeyArn(v string) *SSEDescription { + s.KMSMasterKeyArn = &v + return s +} + +// SetSSEType sets the SSEType field's value. +func (s *SSEDescription) SetSSEType(v string) *SSEDescription { + s.SSEType = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *SSEDescription) SetStatus(v string) *SSEDescription { + s.Status = &v + return s +} + +// Represents the settings used to enable server-side encryption. +type SSESpecification struct { + _ struct{} `type:"structure"` + + // Indicates whether server-side encryption is done using an Amazon Web Services + // managed key or an Amazon Web Services owned key. If enabled (true), server-side + // encryption type is set to KMS and an Amazon Web Services managed key is used + // (KMS charges apply). If disabled (false) or not specified, server-side encryption + // is set to Amazon Web Services owned key. + Enabled *bool `type:"boolean"` + + // The KMS key that should be used for the KMS encryption. To specify a key, + // use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. Note + // that you should only provide this parameter if the key is different from + // the default DynamoDB key alias/aws/dynamodb. + KMSMasterKeyId *string `type:"string"` + + // Server-side encryption type. The only supported value is: + // + // * KMS - Server-side encryption that uses Key Management Service. The key + // is stored in your account and is managed by KMS (KMS charges apply). + SSEType *string `type:"string" enum:"SSEType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SSESpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SSESpecification) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *SSESpecification) SetEnabled(v bool) *SSESpecification { + s.Enabled = &v + return s +} + +// SetKMSMasterKeyId sets the KMSMasterKeyId field's value. +func (s *SSESpecification) SetKMSMasterKeyId(v string) *SSESpecification { + s.KMSMasterKeyId = &v + return s +} + +// SetSSEType sets the SSEType field's value. +func (s *SSESpecification) SetSSEType(v string) *SSESpecification { + s.SSEType = &v + return s +} + +// Represents the input of a Scan operation. +type ScanInput struct { + _ struct{} `type:"structure"` + + // This is a legacy parameter. Use ProjectionExpression instead. For more information, + // see AttributesToGet (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html) + // in the Amazon DynamoDB Developer Guide. + AttributesToGet []*string `min:"1" type:"list"` + + // This is a legacy parameter. Use FilterExpression instead. For more information, + // see ConditionalOperator (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html) + // in the Amazon DynamoDB Developer Guide. + ConditionalOperator *string `type:"string" enum:"ConditionalOperator"` + + // A Boolean value that determines the read consistency model during the scan: + // + // * If ConsistentRead is false, then the data returned from Scan might not + // contain the results from other recently completed write operations (PutItem, + // UpdateItem, or DeleteItem). + // + // * If ConsistentRead is true, then all of the write operations that completed + // before the Scan began are guaranteed to be contained in the Scan response. + // + // The default setting for ConsistentRead is false. + // + // The ConsistentRead parameter is not supported on global secondary indexes. + // If you scan a global secondary index with ConsistentRead set to true, you + // will receive a ValidationException. + ConsistentRead *bool `type:"boolean"` + + // The primary key of the first item that this operation will evaluate. Use + // the value that was returned for LastEvaluatedKey in the previous operation. + // + // The data type for ExclusiveStartKey must be String, Number or Binary. No + // set data types are allowed. + // + // In a parallel scan, a Scan request that includes ExclusiveStartKey must specify + // the same segment whose previous Scan returned the corresponding value of + // LastEvaluatedKey. + ExclusiveStartKey map[string]*AttributeValue `type:"map"` + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames: + // + // * To access an attribute whose name conflicts with a DynamoDB reserved + // word. + // + // * To create a placeholder for repeating occurrences of an attribute name + // in an expression. + // + // * To prevent special characters in an attribute name from being misinterpreted + // in an expression. + // + // Use the # character in an expression to dereference an attribute name. For + // example, consider the following attribute name: + // + // * Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot be + // used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide). To work around this, you could specify + // the following for ExpressionAttributeNames: + // + // * {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // * #P = :val + // + // Tokens that begin with the : character are expression attribute values, which + // are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see Specifying Item Attributes + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeNames map[string]*string `type:"map"` + + // One or more values that can be substituted in an expression. + // + // Use the : (colon) character in an expression to dereference an attribute + // value. For example, suppose that you wanted to check whether the value of + // the ProductStatus attribute was one of the following: + // + // Available | Backordered | Discontinued + // + // You would first need to specify ExpressionAttributeValues as follows: + // + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} + // } + // + // You could then use these values in an expression, such as this: + // + // ProductStatus IN (:avail, :back, :disc) + // + // For more information on expression attribute values, see Condition Expressions + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeValues map[string]*AttributeValue `type:"map"` + + // A string that contains conditions that DynamoDB applies after the Scan operation, + // but before the data is returned to you. Items that do not satisfy the FilterExpression + // criteria are not returned. + // + // A FilterExpression is applied after the items have already been read; the + // process of filtering does not consume any additional read capacity units. + // + // For more information, see Filter Expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#Query.FilterExpression) + // in the Amazon DynamoDB Developer Guide. + FilterExpression *string `type:"string"` + + // The name of a secondary index to scan. This index can be any local secondary + // index or global secondary index. Note that if you use the IndexName parameter, + // you must also provide TableName. + IndexName *string `min:"3" type:"string"` + + // The maximum number of items to evaluate (not necessarily the number of matching + // items). If DynamoDB processes the number of items up to the limit while processing + // the results, it stops the operation and returns the matching values up to + // that point, and a key in LastEvaluatedKey to apply in a subsequent operation, + // so that you can pick up where you left off. Also, if the processed dataset + // size exceeds 1 MB before DynamoDB reaches this limit, it stops the operation + // and returns the matching values up to the limit, and a key in LastEvaluatedKey + // to apply in a subsequent operation to continue the operation. For more information, + // see Working with Queries (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html) + // in the Amazon DynamoDB Developer Guide. + Limit *int64 `min:"1" type:"integer"` + + // A string that identifies one or more attributes to retrieve from the specified + // table or index. These attributes can include scalars, sets, or elements of + // a JSON document. The attributes in the expression must be separated by commas. + // + // If no attribute names are specified, then all attributes will be returned. + // If any of the requested attributes are not found, they will not appear in + // the result. + // + // For more information, see Specifying Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ProjectionExpression *string `type:"string"` + + // Determines the level of detail about either provisioned or on-demand throughput + // consumption that is returned in the response: + // + // * INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary + // index that was accessed. Note that some operations, such as GetItem and + // BatchGetItem, do not access any indexes at all. In these cases, specifying + // INDEXES will only return ConsumedCapacity information for table(s). + // + // * TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. + // + // * NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // This is a legacy parameter. Use FilterExpression instead. For more information, + // see ScanFilter (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ScanFilter.html) + // in the Amazon DynamoDB Developer Guide. + ScanFilter map[string]*Condition `type:"map"` + + // For a parallel Scan request, Segment identifies an individual segment to + // be scanned by an application worker. + // + // Segment IDs are zero-based, so the first segment is always 0. For example, + // if you want to use four application threads to scan a table or an index, + // then the first thread specifies a Segment value of 0, the second thread specifies + // 1, and so on. + // + // The value of LastEvaluatedKey returned from a parallel Scan request must + // be used as ExclusiveStartKey with the same segment ID in a subsequent Scan + // operation. + // + // The value for Segment must be greater than or equal to 0, and less than the + // value provided for TotalSegments. + // + // If you provide Segment, you must also provide TotalSegments. + Segment *int64 `type:"integer"` + + // The attributes to be returned in the result. You can retrieve all item attributes, + // specific item attributes, the count of matching items, or in the case of + // an index, some or all of the attributes projected into the index. + // + // * ALL_ATTRIBUTES - Returns all of the item attributes from the specified + // table or index. If you query a local secondary index, then for each matching + // item in the index, DynamoDB fetches the entire item from the parent table. + // If the index is configured to project all item attributes, then all of + // the data can be obtained from the local secondary index, and no fetching + // is required. + // + // * ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index. Retrieves + // all attributes that have been projected into the index. If the index is + // configured to project all attributes, this return value is equivalent + // to specifying ALL_ATTRIBUTES. + // + // * COUNT - Returns the number of matching items, rather than the matching + // items themselves. Note that this uses the same quantity of read capacity + // units as getting the items, and is subject to the same item size calculations. + // + // * SPECIFIC_ATTRIBUTES - Returns only the attributes listed in ProjectionExpression. + // This return value is equivalent to specifying ProjectionExpression without + // specifying any value for Select. If you query or scan a local secondary + // index and request only attributes that are projected into that index, + // the operation reads only the index and not the table. If any of the requested + // attributes are not projected into the local secondary index, DynamoDB + // fetches each of these attributes from the parent table. This extra fetching + // incurs additional throughput cost and latency. If you query or scan a + // global secondary index, you can only request attributes that are projected + // into the index. Global secondary index queries cannot fetch attributes + // from the parent table. + // + // If neither Select nor ProjectionExpression are specified, DynamoDB defaults + // to ALL_ATTRIBUTES when accessing a table, and ALL_PROJECTED_ATTRIBUTES when + // accessing an index. You cannot use both Select and ProjectionExpression together + // in a single request, unless the value for Select is SPECIFIC_ATTRIBUTES. + // (This usage is equivalent to specifying ProjectionExpression without any + // value for Select.) + // + // If you use the ProjectionExpression parameter, then the value for Select + // can only be SPECIFIC_ATTRIBUTES. Any other value for Select will return an + // error. + Select *string `type:"string" enum:"Select"` + + // The name of the table containing the requested items; or, if you provide + // IndexName, the name of the table to which that index belongs. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` + + // For a parallel Scan request, TotalSegments represents the total number of + // segments into which the Scan operation will be divided. The value of TotalSegments + // corresponds to the number of application workers that will perform the parallel + // scan. For example, if you want to use four application threads to scan a + // table or an index, specify a TotalSegments value of 4. + // + // The value for TotalSegments must be greater than or equal to 1, and less + // than or equal to 1000000. If you specify a TotalSegments value of 1, the + // Scan operation will be sequential rather than parallel. + // + // If you specify TotalSegments, you must also specify Segment. + TotalSegments *int64 `min:"1" type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ScanInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ScanInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ScanInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ScanInput"} + if s.AttributesToGet != nil && len(s.AttributesToGet) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AttributesToGet", 1)) + } + if s.IndexName != nil && len(*s.IndexName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + if s.TotalSegments != nil && *s.TotalSegments < 1 { + invalidParams.Add(request.NewErrParamMinValue("TotalSegments", 1)) + } + if s.ScanFilter != nil { + for i, v := range s.ScanFilter { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ScanFilter", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributesToGet sets the AttributesToGet field's value. +func (s *ScanInput) SetAttributesToGet(v []*string) *ScanInput { + s.AttributesToGet = v + return s +} + +// SetConditionalOperator sets the ConditionalOperator field's value. +func (s *ScanInput) SetConditionalOperator(v string) *ScanInput { + s.ConditionalOperator = &v + return s +} + +// SetConsistentRead sets the ConsistentRead field's value. +func (s *ScanInput) SetConsistentRead(v bool) *ScanInput { + s.ConsistentRead = &v + return s +} + +// SetExclusiveStartKey sets the ExclusiveStartKey field's value. +func (s *ScanInput) SetExclusiveStartKey(v map[string]*AttributeValue) *ScanInput { + s.ExclusiveStartKey = v + return s +} + +// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. +func (s *ScanInput) SetExpressionAttributeNames(v map[string]*string) *ScanInput { + s.ExpressionAttributeNames = v + return s +} + +// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value. +func (s *ScanInput) SetExpressionAttributeValues(v map[string]*AttributeValue) *ScanInput { + s.ExpressionAttributeValues = v + return s +} + +// SetFilterExpression sets the FilterExpression field's value. +func (s *ScanInput) SetFilterExpression(v string) *ScanInput { + s.FilterExpression = &v + return s +} + +// SetIndexName sets the IndexName field's value. +func (s *ScanInput) SetIndexName(v string) *ScanInput { + s.IndexName = &v + return s +} + +// SetLimit sets the Limit field's value. +func (s *ScanInput) SetLimit(v int64) *ScanInput { + s.Limit = &v + return s +} + +// SetProjectionExpression sets the ProjectionExpression field's value. +func (s *ScanInput) SetProjectionExpression(v string) *ScanInput { + s.ProjectionExpression = &v + return s +} + +// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. +func (s *ScanInput) SetReturnConsumedCapacity(v string) *ScanInput { + s.ReturnConsumedCapacity = &v + return s +} + +// SetScanFilter sets the ScanFilter field's value. +func (s *ScanInput) SetScanFilter(v map[string]*Condition) *ScanInput { + s.ScanFilter = v + return s +} + +// SetSegment sets the Segment field's value. +func (s *ScanInput) SetSegment(v int64) *ScanInput { + s.Segment = &v + return s +} + +// SetSelect sets the Select field's value. +func (s *ScanInput) SetSelect(v string) *ScanInput { + s.Select = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *ScanInput) SetTableName(v string) *ScanInput { + s.TableName = &v + return s +} + +// SetTotalSegments sets the TotalSegments field's value. +func (s *ScanInput) SetTotalSegments(v int64) *ScanInput { + s.TotalSegments = &v + return s +} + +// Represents the output of a Scan operation. +type ScanOutput struct { + _ struct{} `type:"structure"` + + // The capacity units consumed by the Scan operation. The data returned includes + // the total provisioned throughput consumed, along with statistics for the + // table and any indexes involved in the operation. ConsumedCapacity is only + // returned if the ReturnConsumedCapacity parameter was specified. For more + // information, see Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughput.html#ItemSizeCalculations.Reads) + // in the Amazon DynamoDB Developer Guide. + ConsumedCapacity *ConsumedCapacity `type:"structure"` + + // The number of items in the response. + // + // If you set ScanFilter in the request, then Count is the number of items returned + // after the filter was applied, and ScannedCount is the number of matching + // items before the filter was applied. + // + // If you did not use a filter in the request, then Count is the same as ScannedCount. + Count *int64 `type:"integer"` + + // An array of item attributes that match the scan criteria. Each element in + // this array consists of an attribute name and the value for that attribute. + Items []map[string]*AttributeValue `type:"list"` + + // The primary key of the item where the operation stopped, inclusive of the + // previous result set. Use this value to start a new operation, excluding this + // value in the new request. + // + // If LastEvaluatedKey is empty, then the "last page" of results has been processed + // and there is no more data to be retrieved. + // + // If LastEvaluatedKey is not empty, it does not necessarily mean that there + // is more data in the result set. The only way to know when you have reached + // the end of the result set is when LastEvaluatedKey is empty. + LastEvaluatedKey map[string]*AttributeValue `type:"map"` + + // The number of items evaluated, before any ScanFilter is applied. A high ScannedCount + // value with few, or no, Count results indicates an inefficient Scan operation. + // For more information, see Count and ScannedCount (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#Count) + // in the Amazon DynamoDB Developer Guide. + // + // If you did not use a filter in the request, then ScannedCount is the same + // as Count. + ScannedCount *int64 `type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ScanOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ScanOutput) GoString() string { + return s.String() +} + +// SetConsumedCapacity sets the ConsumedCapacity field's value. +func (s *ScanOutput) SetConsumedCapacity(v *ConsumedCapacity) *ScanOutput { + s.ConsumedCapacity = v + return s +} + +// SetCount sets the Count field's value. +func (s *ScanOutput) SetCount(v int64) *ScanOutput { + s.Count = &v + return s +} + +// SetItems sets the Items field's value. +func (s *ScanOutput) SetItems(v []map[string]*AttributeValue) *ScanOutput { + s.Items = v + return s +} + +// SetLastEvaluatedKey sets the LastEvaluatedKey field's value. +func (s *ScanOutput) SetLastEvaluatedKey(v map[string]*AttributeValue) *ScanOutput { + s.LastEvaluatedKey = v + return s +} + +// SetScannedCount sets the ScannedCount field's value. +func (s *ScanOutput) SetScannedCount(v int64) *ScanOutput { + s.ScannedCount = &v + return s +} + +// Contains the details of the table when the backup was created. +type SourceTableDetails struct { + _ struct{} `type:"structure"` + + // Controls how you are charged for read and write throughput and how you manage + // capacity. This setting can be changed later. + // + // * PROVISIONED - Sets the read/write capacity mode to PROVISIONED. We recommend + // using PROVISIONED for predictable workloads. + // + // * PAY_PER_REQUEST - Sets the read/write capacity mode to PAY_PER_REQUEST. + // We recommend using PAY_PER_REQUEST for unpredictable workloads. + BillingMode *string `type:"string" enum:"BillingMode"` + + // Number of items in the table. Note that this is an approximate value. + ItemCount *int64 `type:"long"` + + // Schema of the table. + // + // KeySchema is a required field + KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"` + + // Read IOPs and Write IOPS on the table when the backup was created. + // + // ProvisionedThroughput is a required field + ProvisionedThroughput *ProvisionedThroughput `type:"structure" required:"true"` + + // ARN of the table for which backup was created. + TableArn *string `type:"string"` + + // Time when the source table was created. + // + // TableCreationDateTime is a required field + TableCreationDateTime *time.Time `type:"timestamp" required:"true"` + + // Unique identifier for the table for which the backup was created. + // + // TableId is a required field + TableId *string `type:"string" required:"true"` + + // The name of the table for which the backup was created. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` + + // Size of the table in bytes. Note that this is an approximate value. + TableSizeBytes *int64 `type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SourceTableDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SourceTableDetails) GoString() string { + return s.String() +} + +// SetBillingMode sets the BillingMode field's value. +func (s *SourceTableDetails) SetBillingMode(v string) *SourceTableDetails { + s.BillingMode = &v + return s +} + +// SetItemCount sets the ItemCount field's value. +func (s *SourceTableDetails) SetItemCount(v int64) *SourceTableDetails { + s.ItemCount = &v + return s +} + +// SetKeySchema sets the KeySchema field's value. +func (s *SourceTableDetails) SetKeySchema(v []*KeySchemaElement) *SourceTableDetails { + s.KeySchema = v + return s +} + +// SetProvisionedThroughput sets the ProvisionedThroughput field's value. +func (s *SourceTableDetails) SetProvisionedThroughput(v *ProvisionedThroughput) *SourceTableDetails { + s.ProvisionedThroughput = v + return s +} + +// SetTableArn sets the TableArn field's value. +func (s *SourceTableDetails) SetTableArn(v string) *SourceTableDetails { + s.TableArn = &v + return s +} + +// SetTableCreationDateTime sets the TableCreationDateTime field's value. +func (s *SourceTableDetails) SetTableCreationDateTime(v time.Time) *SourceTableDetails { + s.TableCreationDateTime = &v + return s +} + +// SetTableId sets the TableId field's value. +func (s *SourceTableDetails) SetTableId(v string) *SourceTableDetails { + s.TableId = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *SourceTableDetails) SetTableName(v string) *SourceTableDetails { + s.TableName = &v + return s +} + +// SetTableSizeBytes sets the TableSizeBytes field's value. +func (s *SourceTableDetails) SetTableSizeBytes(v int64) *SourceTableDetails { + s.TableSizeBytes = &v + return s +} + +// Contains the details of the features enabled on the table when the backup +// was created. For example, LSIs, GSIs, streams, TTL. +type SourceTableFeatureDetails struct { + _ struct{} `type:"structure"` + + // Represents the GSI properties for the table when the backup was created. + // It includes the IndexName, KeySchema, Projection, and ProvisionedThroughput + // for the GSIs on the table at the time of backup. + GlobalSecondaryIndexes []*GlobalSecondaryIndexInfo `type:"list"` + + // Represents the LSI properties for the table when the backup was created. + // It includes the IndexName, KeySchema and Projection for the LSIs on the table + // at the time of backup. + LocalSecondaryIndexes []*LocalSecondaryIndexInfo `type:"list"` + + // The description of the server-side encryption status on the table when the + // backup was created. + SSEDescription *SSEDescription `type:"structure"` + + // Stream settings on the table when the backup was created. + StreamDescription *StreamSpecification `type:"structure"` + + // Time to Live settings on the table when the backup was created. + TimeToLiveDescription *TimeToLiveDescription `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SourceTableFeatureDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SourceTableFeatureDetails) GoString() string { + return s.String() +} + +// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value. +func (s *SourceTableFeatureDetails) SetGlobalSecondaryIndexes(v []*GlobalSecondaryIndexInfo) *SourceTableFeatureDetails { + s.GlobalSecondaryIndexes = v + return s +} + +// SetLocalSecondaryIndexes sets the LocalSecondaryIndexes field's value. +func (s *SourceTableFeatureDetails) SetLocalSecondaryIndexes(v []*LocalSecondaryIndexInfo) *SourceTableFeatureDetails { + s.LocalSecondaryIndexes = v + return s +} + +// SetSSEDescription sets the SSEDescription field's value. +func (s *SourceTableFeatureDetails) SetSSEDescription(v *SSEDescription) *SourceTableFeatureDetails { + s.SSEDescription = v + return s +} + +// SetStreamDescription sets the StreamDescription field's value. +func (s *SourceTableFeatureDetails) SetStreamDescription(v *StreamSpecification) *SourceTableFeatureDetails { + s.StreamDescription = v + return s +} + +// SetTimeToLiveDescription sets the TimeToLiveDescription field's value. +func (s *SourceTableFeatureDetails) SetTimeToLiveDescription(v *TimeToLiveDescription) *SourceTableFeatureDetails { + s.TimeToLiveDescription = v + return s +} + +// Represents the DynamoDB Streams configuration for a table in DynamoDB. +type StreamSpecification struct { + _ struct{} `type:"structure"` + + // Indicates whether DynamoDB Streams is enabled (true) or disabled (false) + // on the table. + // + // StreamEnabled is a required field + StreamEnabled *bool `type:"boolean" required:"true"` + + // When an item in the table is modified, StreamViewType determines what information + // is written to the stream for this table. Valid values for StreamViewType + // are: + // + // * KEYS_ONLY - Only the key attributes of the modified item are written + // to the stream. + // + // * NEW_IMAGE - The entire item, as it appears after it was modified, is + // written to the stream. + // + // * OLD_IMAGE - The entire item, as it appeared before it was modified, + // is written to the stream. + // + // * NEW_AND_OLD_IMAGES - Both the new and the old item images of the item + // are written to the stream. + StreamViewType *string `type:"string" enum:"StreamViewType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StreamSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StreamSpecification) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StreamSpecification) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StreamSpecification"} + if s.StreamEnabled == nil { + invalidParams.Add(request.NewErrParamRequired("StreamEnabled")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStreamEnabled sets the StreamEnabled field's value. +func (s *StreamSpecification) SetStreamEnabled(v bool) *StreamSpecification { + s.StreamEnabled = &v + return s +} + +// SetStreamViewType sets the StreamViewType field's value. +func (s *StreamSpecification) SetStreamViewType(v string) *StreamSpecification { + s.StreamViewType = &v + return s +} + +// A target table with the specified name already exists. +type TableAlreadyExistsException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TableAlreadyExistsException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TableAlreadyExistsException) GoString() string { + return s.String() +} + +func newErrorTableAlreadyExistsException(v protocol.ResponseMetadata) error { + return &TableAlreadyExistsException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *TableAlreadyExistsException) Code() string { + return "TableAlreadyExistsException" +} + +// Message returns the exception's message. +func (s *TableAlreadyExistsException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *TableAlreadyExistsException) OrigErr() error { + return nil +} + +func (s *TableAlreadyExistsException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *TableAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *TableAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Represents the auto scaling configuration for a global table. +type TableAutoScalingDescription struct { + _ struct{} `type:"structure"` + + // Represents replicas of the global table. + Replicas []*ReplicaAutoScalingDescription `type:"list"` + + // The name of the table. + TableName *string `min:"3" type:"string"` + + // The current state of the table: + // + // * CREATING - The table is being created. + // + // * UPDATING - The table is being updated. + // + // * DELETING - The table is being deleted. + // + // * ACTIVE - The table is ready for use. + TableStatus *string `type:"string" enum:"TableStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TableAutoScalingDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TableAutoScalingDescription) GoString() string { + return s.String() +} + +// SetReplicas sets the Replicas field's value. +func (s *TableAutoScalingDescription) SetReplicas(v []*ReplicaAutoScalingDescription) *TableAutoScalingDescription { + s.Replicas = v + return s +} + +// SetTableName sets the TableName field's value. +func (s *TableAutoScalingDescription) SetTableName(v string) *TableAutoScalingDescription { + s.TableName = &v + return s +} + +// SetTableStatus sets the TableStatus field's value. +func (s *TableAutoScalingDescription) SetTableStatus(v string) *TableAutoScalingDescription { + s.TableStatus = &v + return s +} + +// Contains details of the table class. +type TableClassSummary struct { + _ struct{} `type:"structure"` + + // The date and time at which the table class was last updated. + LastUpdateDateTime *time.Time `type:"timestamp"` + + // The table class of the specified table. Valid values are STANDARD and STANDARD_INFREQUENT_ACCESS. + TableClass *string `type:"string" enum:"TableClass"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TableClassSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TableClassSummary) GoString() string { + return s.String() +} + +// SetLastUpdateDateTime sets the LastUpdateDateTime field's value. +func (s *TableClassSummary) SetLastUpdateDateTime(v time.Time) *TableClassSummary { + s.LastUpdateDateTime = &v + return s +} + +// SetTableClass sets the TableClass field's value. +func (s *TableClassSummary) SetTableClass(v string) *TableClassSummary { + s.TableClass = &v + return s +} + +// The parameters for the table created as part of the import operation. +type TableCreationParameters struct { + _ struct{} `type:"structure"` + + // The attributes of the table created as part of the import operation. + // + // AttributeDefinitions is a required field + AttributeDefinitions []*AttributeDefinition `type:"list" required:"true"` + + // The billing mode for provisioning the table created as part of the import + // operation. + BillingMode *string `type:"string" enum:"BillingMode"` + + // The Global Secondary Indexes (GSI) of the table to be created as part of + // the import operation. + GlobalSecondaryIndexes []*GlobalSecondaryIndex `type:"list"` + + // The primary key and option sort key of the table created as part of the import + // operation. + // + // KeySchema is a required field + KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"` + + // Represents the provisioned throughput settings for a specified table or index. + // The settings can be modified using the UpdateTable operation. + // + // For current minimum and maximum provisioned throughput values, see Service, + // Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) + // in the Amazon DynamoDB Developer Guide. + ProvisionedThroughput *ProvisionedThroughput `type:"structure"` + + // Represents the settings used to enable server-side encryption. + SSESpecification *SSESpecification `type:"structure"` + + // The name of the table created as part of the import operation. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TableCreationParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TableCreationParameters) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TableCreationParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TableCreationParameters"} + if s.AttributeDefinitions == nil { + invalidParams.Add(request.NewErrParamRequired("AttributeDefinitions")) + } + if s.KeySchema == nil { + invalidParams.Add(request.NewErrParamRequired("KeySchema")) + } + if s.KeySchema != nil && len(s.KeySchema) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeySchema", 1)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + if s.AttributeDefinitions != nil { + for i, v := range s.AttributeDefinitions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AttributeDefinitions", i), err.(request.ErrInvalidParams)) + } + } + } + if s.GlobalSecondaryIndexes != nil { + for i, v := range s.GlobalSecondaryIndexes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalSecondaryIndexes", i), err.(request.ErrInvalidParams)) + } + } + } + if s.KeySchema != nil { + for i, v := range s.KeySchema { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "KeySchema", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ProvisionedThroughput != nil { + if err := s.ProvisionedThroughput.Validate(); err != nil { + invalidParams.AddNested("ProvisionedThroughput", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributeDefinitions sets the AttributeDefinitions field's value. +func (s *TableCreationParameters) SetAttributeDefinitions(v []*AttributeDefinition) *TableCreationParameters { + s.AttributeDefinitions = v + return s +} + +// SetBillingMode sets the BillingMode field's value. +func (s *TableCreationParameters) SetBillingMode(v string) *TableCreationParameters { + s.BillingMode = &v + return s +} + +// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value. +func (s *TableCreationParameters) SetGlobalSecondaryIndexes(v []*GlobalSecondaryIndex) *TableCreationParameters { + s.GlobalSecondaryIndexes = v + return s +} + +// SetKeySchema sets the KeySchema field's value. +func (s *TableCreationParameters) SetKeySchema(v []*KeySchemaElement) *TableCreationParameters { + s.KeySchema = v + return s +} + +// SetProvisionedThroughput sets the ProvisionedThroughput field's value. +func (s *TableCreationParameters) SetProvisionedThroughput(v *ProvisionedThroughput) *TableCreationParameters { + s.ProvisionedThroughput = v + return s +} + +// SetSSESpecification sets the SSESpecification field's value. +func (s *TableCreationParameters) SetSSESpecification(v *SSESpecification) *TableCreationParameters { + s.SSESpecification = v + return s +} + +// SetTableName sets the TableName field's value. +func (s *TableCreationParameters) SetTableName(v string) *TableCreationParameters { + s.TableName = &v + return s +} + +// Represents the properties of a table. +type TableDescription struct { + _ struct{} `type:"structure"` + + // Contains information about the table archive. + ArchivalSummary *ArchivalSummary `type:"structure"` + + // An array of AttributeDefinition objects. Each of these objects describes + // one attribute in the table and index key schema. + // + // Each AttributeDefinition object in this array is composed of: + // + // * AttributeName - The name of the attribute. + // + // * AttributeType - The data type for the attribute. + AttributeDefinitions []*AttributeDefinition `type:"list"` + + // Contains the details for the read/write capacity mode. + BillingModeSummary *BillingModeSummary `type:"structure"` + + // The date and time when the table was created, in UNIX epoch time (http://www.epochconverter.com/) + // format. + CreationDateTime *time.Time `type:"timestamp"` + + // Indicates whether deletion protection is enabled (true) or disabled (false) + // on the table. + DeletionProtectionEnabled *bool `type:"boolean"` + + // The global secondary indexes, if any, on the table. Each index is scoped + // to a given partition key value. Each element is composed of: + // + // * Backfilling - If true, then the index is currently in the backfilling + // phase. Backfilling occurs only when a new global secondary index is added + // to the table. It is the process by which DynamoDB populates the new index + // with data from the table. (This attribute does not appear for indexes + // that were created during a CreateTable operation.) You can delete an index + // that is being created during the Backfilling phase when IndexStatus is + // set to CREATING and Backfilling is true. You can't delete the index that + // is being created when IndexStatus is set to CREATING and Backfilling is + // false. (This attribute does not appear for indexes that were created during + // a CreateTable operation.) + // + // * IndexName - The name of the global secondary index. + // + // * IndexSizeBytes - The total size of the global secondary index, in bytes. + // DynamoDB updates this value approximately every six hours. Recent changes + // might not be reflected in this value. + // + // * IndexStatus - The current status of the global secondary index: CREATING + // - The index is being created. UPDATING - The index is being updated. DELETING + // - The index is being deleted. ACTIVE - The index is ready for use. + // + // * ItemCount - The number of items in the global secondary index. DynamoDB + // updates this value approximately every six hours. Recent changes might + // not be reflected in this value. + // + // * KeySchema - Specifies the complete index key schema. The attribute names + // in the key schema must be between 1 and 255 characters (inclusive). The + // key schema must begin with the same partition key as the table. + // + // * Projection - Specifies attributes that are copied (projected) from the + // table into the index. These are in addition to the primary key attributes + // and index key attributes, which are automatically projected. Each attribute + // specification is composed of: ProjectionType - One of the following: KEYS_ONLY + // - Only the index and primary keys are projected into the index. INCLUDE + // - In addition to the attributes described in KEYS_ONLY, the secondary + // index will include other non-key attributes that you specify. ALL - All + // of the table attributes are projected into the index. NonKeyAttributes + // - A list of one or more non-key attribute names that are projected into + // the secondary index. The total count of attributes provided in NonKeyAttributes, + // summed across all of the secondary indexes, must not exceed 100. If you + // project the same attribute into two different indexes, this counts as + // two distinct attributes when determining the total. + // + // * ProvisionedThroughput - The provisioned throughput settings for the + // global secondary index, consisting of read and write capacity units, along + // with data about increases and decreases. + // + // If the table is in the DELETING state, no information about indexes will + // be returned. + GlobalSecondaryIndexes []*GlobalSecondaryIndexDescription `type:"list"` + + // Represents the version of global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html) + // in use, if the table is replicated across Amazon Web Services Regions. + GlobalTableVersion *string `type:"string"` + + // The number of items in the specified table. DynamoDB updates this value approximately + // every six hours. Recent changes might not be reflected in this value. + ItemCount *int64 `type:"long"` + + // The primary key structure for the table. Each KeySchemaElement consists of: + // + // * AttributeName - The name of the attribute. + // + // * KeyType - The role of the attribute: HASH - partition key RANGE - sort + // key The partition key of an item is also known as its hash attribute. + // The term "hash attribute" derives from DynamoDB's usage of an internal + // hash function to evenly distribute data items across partitions, based + // on their partition key values. The sort key of an item is also known as + // its range attribute. The term "range attribute" derives from the way DynamoDB + // stores items with the same partition key physically close together, in + // sorted order by the sort key value. + // + // For more information about primary keys, see Primary Key (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelPrimaryKey) + // in the Amazon DynamoDB Developer Guide. + KeySchema []*KeySchemaElement `min:"1" type:"list"` + + // The Amazon Resource Name (ARN) that uniquely identifies the latest stream + // for this table. + LatestStreamArn *string `min:"37" type:"string"` + + // A timestamp, in ISO 8601 format, for this stream. + // + // Note that LatestStreamLabel is not a unique identifier for the stream, because + // it is possible that a stream from another table might have the same timestamp. + // However, the combination of the following three elements is guaranteed to + // be unique: + // + // * Amazon Web Services customer ID + // + // * Table name + // + // * StreamLabel + LatestStreamLabel *string `type:"string"` + + // Represents one or more local secondary indexes on the table. Each index is + // scoped to a given partition key value. Tables with one or more local secondary + // indexes are subject to an item collection size limit, where the amount of + // data within a given item collection cannot exceed 10 GB. Each element is + // composed of: + // + // * IndexName - The name of the local secondary index. + // + // * KeySchema - Specifies the complete index key schema. The attribute names + // in the key schema must be between 1 and 255 characters (inclusive). The + // key schema must begin with the same partition key as the table. + // + // * Projection - Specifies attributes that are copied (projected) from the + // table into the index. These are in addition to the primary key attributes + // and index key attributes, which are automatically projected. Each attribute + // specification is composed of: ProjectionType - One of the following: KEYS_ONLY + // - Only the index and primary keys are projected into the index. INCLUDE + // - Only the specified table attributes are projected into the index. The + // list of projected attributes is in NonKeyAttributes. ALL - All of the + // table attributes are projected into the index. NonKeyAttributes - A list + // of one or more non-key attribute names that are projected into the secondary + // index. The total count of attributes provided in NonKeyAttributes, summed + // across all of the secondary indexes, must not exceed 100. If you project + // the same attribute into two different indexes, this counts as two distinct + // attributes when determining the total. + // + // * IndexSizeBytes - Represents the total size of the index, in bytes. DynamoDB + // updates this value approximately every six hours. Recent changes might + // not be reflected in this value. + // + // * ItemCount - Represents the number of items in the index. DynamoDB updates + // this value approximately every six hours. Recent changes might not be + // reflected in this value. + // + // If the table is in the DELETING state, no information about indexes will + // be returned. + LocalSecondaryIndexes []*LocalSecondaryIndexDescription `type:"list"` + + // The provisioned throughput settings for the table, consisting of read and + // write capacity units, along with data about increases and decreases. + ProvisionedThroughput *ProvisionedThroughputDescription `type:"structure"` + + // Represents replicas of the table. + Replicas []*ReplicaDescription `type:"list"` + + // Contains details for the restore. + RestoreSummary *RestoreSummary `type:"structure"` + + // The description of the server-side encryption status on the specified table. + SSEDescription *SSEDescription `type:"structure"` + + // The current DynamoDB Streams configuration for the table. + StreamSpecification *StreamSpecification `type:"structure"` + + // The Amazon Resource Name (ARN) that uniquely identifies the table. + TableArn *string `type:"string"` + + // Contains details of the table class. + TableClassSummary *TableClassSummary `type:"structure"` + + // Unique identifier for the table for which the backup was created. + TableId *string `type:"string"` + + // The name of the table. + TableName *string `min:"3" type:"string"` + + // The total size of the specified table, in bytes. DynamoDB updates this value + // approximately every six hours. Recent changes might not be reflected in this + // value. + TableSizeBytes *int64 `type:"long"` + + // The current state of the table: + // + // * CREATING - The table is being created. + // + // * UPDATING - The table/index configuration is being updated. The table/index + // remains available for data operations when UPDATING. + // + // * DELETING - The table is being deleted. + // + // * ACTIVE - The table is ready for use. + // + // * INACCESSIBLE_ENCRYPTION_CREDENTIALS - The KMS key used to encrypt the + // table in inaccessible. Table operations may fail due to failure to use + // the KMS key. DynamoDB will initiate the table archival process when a + // table's KMS key remains inaccessible for more than seven days. + // + // * ARCHIVING - The table is being archived. Operations are not allowed + // until archival is complete. + // + // * ARCHIVED - The table has been archived. See the ArchivalReason for more + // information. + TableStatus *string `type:"string" enum:"TableStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TableDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TableDescription) GoString() string { + return s.String() +} + +// SetArchivalSummary sets the ArchivalSummary field's value. +func (s *TableDescription) SetArchivalSummary(v *ArchivalSummary) *TableDescription { + s.ArchivalSummary = v + return s +} + +// SetAttributeDefinitions sets the AttributeDefinitions field's value. +func (s *TableDescription) SetAttributeDefinitions(v []*AttributeDefinition) *TableDescription { + s.AttributeDefinitions = v + return s +} + +// SetBillingModeSummary sets the BillingModeSummary field's value. +func (s *TableDescription) SetBillingModeSummary(v *BillingModeSummary) *TableDescription { + s.BillingModeSummary = v + return s +} + +// SetCreationDateTime sets the CreationDateTime field's value. +func (s *TableDescription) SetCreationDateTime(v time.Time) *TableDescription { + s.CreationDateTime = &v + return s +} + +// SetDeletionProtectionEnabled sets the DeletionProtectionEnabled field's value. +func (s *TableDescription) SetDeletionProtectionEnabled(v bool) *TableDescription { + s.DeletionProtectionEnabled = &v + return s +} + +// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value. +func (s *TableDescription) SetGlobalSecondaryIndexes(v []*GlobalSecondaryIndexDescription) *TableDescription { + s.GlobalSecondaryIndexes = v + return s +} + +// SetGlobalTableVersion sets the GlobalTableVersion field's value. +func (s *TableDescription) SetGlobalTableVersion(v string) *TableDescription { + s.GlobalTableVersion = &v + return s +} + +// SetItemCount sets the ItemCount field's value. +func (s *TableDescription) SetItemCount(v int64) *TableDescription { + s.ItemCount = &v + return s +} + +// SetKeySchema sets the KeySchema field's value. +func (s *TableDescription) SetKeySchema(v []*KeySchemaElement) *TableDescription { + s.KeySchema = v + return s +} + +// SetLatestStreamArn sets the LatestStreamArn field's value. +func (s *TableDescription) SetLatestStreamArn(v string) *TableDescription { + s.LatestStreamArn = &v + return s +} + +// SetLatestStreamLabel sets the LatestStreamLabel field's value. +func (s *TableDescription) SetLatestStreamLabel(v string) *TableDescription { + s.LatestStreamLabel = &v + return s +} + +// SetLocalSecondaryIndexes sets the LocalSecondaryIndexes field's value. +func (s *TableDescription) SetLocalSecondaryIndexes(v []*LocalSecondaryIndexDescription) *TableDescription { + s.LocalSecondaryIndexes = v + return s +} + +// SetProvisionedThroughput sets the ProvisionedThroughput field's value. +func (s *TableDescription) SetProvisionedThroughput(v *ProvisionedThroughputDescription) *TableDescription { + s.ProvisionedThroughput = v + return s +} + +// SetReplicas sets the Replicas field's value. +func (s *TableDescription) SetReplicas(v []*ReplicaDescription) *TableDescription { + s.Replicas = v + return s +} + +// SetRestoreSummary sets the RestoreSummary field's value. +func (s *TableDescription) SetRestoreSummary(v *RestoreSummary) *TableDescription { + s.RestoreSummary = v + return s +} + +// SetSSEDescription sets the SSEDescription field's value. +func (s *TableDescription) SetSSEDescription(v *SSEDescription) *TableDescription { + s.SSEDescription = v + return s +} + +// SetStreamSpecification sets the StreamSpecification field's value. +func (s *TableDescription) SetStreamSpecification(v *StreamSpecification) *TableDescription { + s.StreamSpecification = v + return s +} + +// SetTableArn sets the TableArn field's value. +func (s *TableDescription) SetTableArn(v string) *TableDescription { + s.TableArn = &v + return s +} + +// SetTableClassSummary sets the TableClassSummary field's value. +func (s *TableDescription) SetTableClassSummary(v *TableClassSummary) *TableDescription { + s.TableClassSummary = v + return s +} + +// SetTableId sets the TableId field's value. +func (s *TableDescription) SetTableId(v string) *TableDescription { + s.TableId = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *TableDescription) SetTableName(v string) *TableDescription { + s.TableName = &v + return s +} + +// SetTableSizeBytes sets the TableSizeBytes field's value. +func (s *TableDescription) SetTableSizeBytes(v int64) *TableDescription { + s.TableSizeBytes = &v + return s +} + +// SetTableStatus sets the TableStatus field's value. +func (s *TableDescription) SetTableStatus(v string) *TableDescription { + s.TableStatus = &v + return s +} + +// A target table with the specified name is either being created or deleted. +type TableInUseException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TableInUseException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TableInUseException) GoString() string { + return s.String() +} + +func newErrorTableInUseException(v protocol.ResponseMetadata) error { + return &TableInUseException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *TableInUseException) Code() string { + return "TableInUseException" +} + +// Message returns the exception's message. +func (s *TableInUseException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *TableInUseException) OrigErr() error { + return nil +} + +func (s *TableInUseException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *TableInUseException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *TableInUseException) RequestID() string { + return s.RespMetadata.RequestID +} + +// A source table with the name TableName does not currently exist within the +// subscriber's account or the subscriber is operating in the wrong Amazon Web +// Services Region. +type TableNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TableNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TableNotFoundException) GoString() string { + return s.String() +} + +func newErrorTableNotFoundException(v protocol.ResponseMetadata) error { + return &TableNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *TableNotFoundException) Code() string { + return "TableNotFoundException" +} + +// Message returns the exception's message. +func (s *TableNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *TableNotFoundException) OrigErr() error { + return nil +} + +func (s *TableNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *TableNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *TableNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Describes a tag. A tag is a key-value pair. You can add up to 50 tags to +// a single DynamoDB table. +// +// Amazon Web Services-assigned tag names and values are automatically assigned +// the aws: prefix, which the user cannot assign. Amazon Web Services-assigned +// tag names do not count towards the tag limit of 50. User-assigned tag names +// have the prefix user: in the Cost Allocation Report. You cannot backdate +// the application of a tag. +// +// For an overview on tagging DynamoDB resources, see Tagging for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html) +// in the Amazon DynamoDB Developer Guide. +type Tag struct { + _ struct{} `type:"structure"` + + // The key of the tag. Tag keys are case sensitive. Each DynamoDB table can + // only have up to one tag with the same key. If you try to add an existing + // tag (same key), the existing tag value will be updated to the new value. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // The value of the tag. Tag values are case-sensitive and can be null. + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} + +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // Identifies the Amazon DynamoDB resource to which tags should be added. This + // value is an Amazon Resource Name (ARN). + // + // ResourceArn is a required field + ResourceArn *string `min:"1" type:"string" required:"true"` + + // The tags to be assigned to the Amazon DynamoDB resource. + // + // Tags is a required field + Tags []*Tag `type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *TagResourceInput) SetResourceArn(v string) *TagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v []*Tag) *TagResourceInput { + s.Tags = v + return s +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TagResourceOutput) GoString() string { + return s.String() +} + +// The description of the Time to Live (TTL) status on the specified table. +type TimeToLiveDescription struct { + _ struct{} `type:"structure"` + + // The name of the TTL attribute for items in the table. + AttributeName *string `min:"1" type:"string"` + + // The TTL status for the table. + TimeToLiveStatus *string `type:"string" enum:"TimeToLiveStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TimeToLiveDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TimeToLiveDescription) GoString() string { + return s.String() +} + +// SetAttributeName sets the AttributeName field's value. +func (s *TimeToLiveDescription) SetAttributeName(v string) *TimeToLiveDescription { + s.AttributeName = &v + return s +} + +// SetTimeToLiveStatus sets the TimeToLiveStatus field's value. +func (s *TimeToLiveDescription) SetTimeToLiveStatus(v string) *TimeToLiveDescription { + s.TimeToLiveStatus = &v + return s +} + +// Represents the settings used to enable or disable Time to Live (TTL) for +// the specified table. +type TimeToLiveSpecification struct { + _ struct{} `type:"structure"` + + // The name of the TTL attribute used to store the expiration time for items + // in the table. + // + // AttributeName is a required field + AttributeName *string `min:"1" type:"string" required:"true"` + + // Indicates whether TTL is to be enabled (true) or disabled (false) on the + // table. + // + // Enabled is a required field + Enabled *bool `type:"boolean" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TimeToLiveSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TimeToLiveSpecification) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TimeToLiveSpecification) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TimeToLiveSpecification"} + if s.AttributeName == nil { + invalidParams.Add(request.NewErrParamRequired("AttributeName")) + } + if s.AttributeName != nil && len(*s.AttributeName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AttributeName", 1)) + } + if s.Enabled == nil { + invalidParams.Add(request.NewErrParamRequired("Enabled")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributeName sets the AttributeName field's value. +func (s *TimeToLiveSpecification) SetAttributeName(v string) *TimeToLiveSpecification { + s.AttributeName = &v + return s +} + +// SetEnabled sets the Enabled field's value. +func (s *TimeToLiveSpecification) SetEnabled(v bool) *TimeToLiveSpecification { + s.Enabled = &v + return s +} + +// Specifies an item to be retrieved as part of the transaction. +type TransactGetItem struct { + _ struct{} `type:"structure"` + + // Contains the primary key that identifies the item to get, together with the + // name of the table that contains the item, and optionally the specific attributes + // of the item to retrieve. + // + // Get is a required field + Get *Get `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TransactGetItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TransactGetItem) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TransactGetItem) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TransactGetItem"} + if s.Get == nil { + invalidParams.Add(request.NewErrParamRequired("Get")) + } + if s.Get != nil { + if err := s.Get.Validate(); err != nil { + invalidParams.AddNested("Get", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGet sets the Get field's value. +func (s *TransactGetItem) SetGet(v *Get) *TransactGetItem { + s.Get = v + return s +} + +type TransactGetItemsInput struct { + _ struct{} `type:"structure"` + + // A value of TOTAL causes consumed capacity information to be returned, and + // a value of NONE prevents that information from being returned. No other value + // is valid. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // An ordered array of up to 100 TransactGetItem objects, each of which contains + // a Get structure. + // + // TransactItems is a required field + TransactItems []*TransactGetItem `min:"1" type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TransactGetItemsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TransactGetItemsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TransactGetItemsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TransactGetItemsInput"} + if s.TransactItems == nil { + invalidParams.Add(request.NewErrParamRequired("TransactItems")) + } + if s.TransactItems != nil && len(s.TransactItems) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TransactItems", 1)) + } + if s.TransactItems != nil { + for i, v := range s.TransactItems { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TransactItems", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. +func (s *TransactGetItemsInput) SetReturnConsumedCapacity(v string) *TransactGetItemsInput { + s.ReturnConsumedCapacity = &v + return s +} + +// SetTransactItems sets the TransactItems field's value. +func (s *TransactGetItemsInput) SetTransactItems(v []*TransactGetItem) *TransactGetItemsInput { + s.TransactItems = v + return s +} + +type TransactGetItemsOutput struct { + _ struct{} `type:"structure"` + + // If the ReturnConsumedCapacity value was TOTAL, this is an array of ConsumedCapacity + // objects, one for each table addressed by TransactGetItem objects in the TransactItems + // parameter. These ConsumedCapacity objects report the read-capacity units + // consumed by the TransactGetItems call in that table. + ConsumedCapacity []*ConsumedCapacity `type:"list"` + + // An ordered array of up to 100 ItemResponse objects, each of which corresponds + // to the TransactGetItem object in the same position in the TransactItems array. + // Each ItemResponse object contains a Map of the name-value pairs that are + // the projected attributes of the requested item. + // + // If a requested item could not be retrieved, the corresponding ItemResponse + // object is Null, or if the requested item has no projected attributes, the + // corresponding ItemResponse object is an empty Map. + Responses []*ItemResponse `min:"1" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TransactGetItemsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TransactGetItemsOutput) GoString() string { + return s.String() +} + +// SetConsumedCapacity sets the ConsumedCapacity field's value. +func (s *TransactGetItemsOutput) SetConsumedCapacity(v []*ConsumedCapacity) *TransactGetItemsOutput { + s.ConsumedCapacity = v + return s +} + +// SetResponses sets the Responses field's value. +func (s *TransactGetItemsOutput) SetResponses(v []*ItemResponse) *TransactGetItemsOutput { + s.Responses = v + return s +} + +// A list of requests that can perform update, put, delete, or check operations +// on multiple items in one or more tables atomically. +type TransactWriteItem struct { + _ struct{} `type:"structure"` + + // A request to perform a check item operation. + ConditionCheck *ConditionCheck `type:"structure"` + + // A request to perform a DeleteItem operation. + Delete *Delete `type:"structure"` + + // A request to perform a PutItem operation. + Put *Put `type:"structure"` + + // A request to perform an UpdateItem operation. + Update *Update `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TransactWriteItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TransactWriteItem) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TransactWriteItem) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TransactWriteItem"} + if s.ConditionCheck != nil { + if err := s.ConditionCheck.Validate(); err != nil { + invalidParams.AddNested("ConditionCheck", err.(request.ErrInvalidParams)) + } + } + if s.Delete != nil { + if err := s.Delete.Validate(); err != nil { + invalidParams.AddNested("Delete", err.(request.ErrInvalidParams)) + } + } + if s.Put != nil { + if err := s.Put.Validate(); err != nil { + invalidParams.AddNested("Put", err.(request.ErrInvalidParams)) + } + } + if s.Update != nil { + if err := s.Update.Validate(); err != nil { + invalidParams.AddNested("Update", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConditionCheck sets the ConditionCheck field's value. +func (s *TransactWriteItem) SetConditionCheck(v *ConditionCheck) *TransactWriteItem { + s.ConditionCheck = v + return s +} + +// SetDelete sets the Delete field's value. +func (s *TransactWriteItem) SetDelete(v *Delete) *TransactWriteItem { + s.Delete = v + return s +} + +// SetPut sets the Put field's value. +func (s *TransactWriteItem) SetPut(v *Put) *TransactWriteItem { + s.Put = v + return s +} + +// SetUpdate sets the Update field's value. +func (s *TransactWriteItem) SetUpdate(v *Update) *TransactWriteItem { + s.Update = v + return s +} + +type TransactWriteItemsInput struct { + _ struct{} `type:"structure"` + + // Providing a ClientRequestToken makes the call to TransactWriteItems idempotent, + // meaning that multiple identical calls have the same effect as one single + // call. + // + // Although multiple identical calls using the same client request token produce + // the same result on the server (no side effects), the responses to the calls + // might not be the same. If the ReturnConsumedCapacity parameter is set, then + // the initial TransactWriteItems call returns the amount of write capacity + // units consumed in making the changes. Subsequent TransactWriteItems calls + // with the same client token return the number of read capacity units consumed + // in reading the item. + // + // A client request token is valid for 10 minutes after the first request that + // uses it is completed. After 10 minutes, any request with the same client + // token is treated as a new request. Do not resubmit the same request with + // the same client token for more than 10 minutes, or the result might not be + // idempotent. + // + // If you submit a request with the same client token but a change in other + // parameters within the 10-minute idempotency window, DynamoDB returns an IdempotentParameterMismatch + // exception. + ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` + + // Determines the level of detail about either provisioned or on-demand throughput + // consumption that is returned in the response: + // + // * INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary + // index that was accessed. Note that some operations, such as GetItem and + // BatchGetItem, do not access any indexes at all. In these cases, specifying + // INDEXES will only return ConsumedCapacity information for table(s). + // + // * TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. + // + // * NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // Determines whether item collection metrics are returned. If set to SIZE, + // the response includes statistics about item collections (if any), that were + // modified during the operation and are returned in the response. If set to + // NONE (the default), no statistics are returned. + ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"` + + // An ordered array of up to 100 TransactWriteItem objects, each of which contains + // a ConditionCheck, Put, Update, or Delete object. These can operate on items + // in different tables, but the tables must reside in the same Amazon Web Services + // account and Region, and no two of them can operate on the same item. + // + // TransactItems is a required field + TransactItems []*TransactWriteItem `min:"1" type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TransactWriteItemsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TransactWriteItemsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TransactWriteItemsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TransactWriteItemsInput"} + if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1)) + } + if s.TransactItems == nil { + invalidParams.Add(request.NewErrParamRequired("TransactItems")) + } + if s.TransactItems != nil && len(s.TransactItems) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TransactItems", 1)) + } + if s.TransactItems != nil { + for i, v := range s.TransactItems { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TransactItems", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientRequestToken sets the ClientRequestToken field's value. +func (s *TransactWriteItemsInput) SetClientRequestToken(v string) *TransactWriteItemsInput { + s.ClientRequestToken = &v + return s +} + +// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. +func (s *TransactWriteItemsInput) SetReturnConsumedCapacity(v string) *TransactWriteItemsInput { + s.ReturnConsumedCapacity = &v + return s +} + +// SetReturnItemCollectionMetrics sets the ReturnItemCollectionMetrics field's value. +func (s *TransactWriteItemsInput) SetReturnItemCollectionMetrics(v string) *TransactWriteItemsInput { + s.ReturnItemCollectionMetrics = &v + return s +} + +// SetTransactItems sets the TransactItems field's value. +func (s *TransactWriteItemsInput) SetTransactItems(v []*TransactWriteItem) *TransactWriteItemsInput { + s.TransactItems = v + return s +} + +type TransactWriteItemsOutput struct { + _ struct{} `type:"structure"` + + // The capacity units consumed by the entire TransactWriteItems operation. The + // values of the list are ordered according to the ordering of the TransactItems + // request parameter. + ConsumedCapacity []*ConsumedCapacity `type:"list"` + + // A list of tables that were processed by TransactWriteItems and, for each + // table, information about any item collections that were affected by individual + // UpdateItem, PutItem, or DeleteItem operations. + ItemCollectionMetrics map[string][]*ItemCollectionMetrics `type:"map"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TransactWriteItemsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TransactWriteItemsOutput) GoString() string { + return s.String() +} + +// SetConsumedCapacity sets the ConsumedCapacity field's value. +func (s *TransactWriteItemsOutput) SetConsumedCapacity(v []*ConsumedCapacity) *TransactWriteItemsOutput { + s.ConsumedCapacity = v + return s +} + +// SetItemCollectionMetrics sets the ItemCollectionMetrics field's value. +func (s *TransactWriteItemsOutput) SetItemCollectionMetrics(v map[string][]*ItemCollectionMetrics) *TransactWriteItemsOutput { + s.ItemCollectionMetrics = v + return s +} + +// The entire transaction request was canceled. +// +// DynamoDB cancels a TransactWriteItems request under the following circumstances: +// +// - A condition in one of the condition expressions is not met. +// +// - A table in the TransactWriteItems request is in a different account +// or region. +// +// - More than one action in the TransactWriteItems operation targets the +// same item. +// +// - There is insufficient provisioned capacity for the transaction to be +// completed. +// +// - An item size becomes too large (larger than 400 KB), or a local secondary +// index (LSI) becomes too large, or a similar validation error occurs because +// of changes made by the transaction. +// +// - There is a user error, such as an invalid data format. +// +// DynamoDB cancels a TransactGetItems request under the following circumstances: +// +// - There is an ongoing TransactGetItems operation that conflicts with a +// concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request. +// In this case the TransactGetItems operation fails with a TransactionCanceledException. +// +// - A table in the TransactGetItems request is in a different account or +// region. +// +// - There is insufficient provisioned capacity for the transaction to be +// completed. +// +// - There is a user error, such as an invalid data format. +// +// If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons +// property. This property is not set for other languages. Transaction cancellation +// reasons are ordered in the order of requested items, if an item has no error +// it will have None code and Null message. +// +// Cancellation reason codes and possible error messages: +// +// - No Errors: Code: None Message: null +// +// - Conditional Check Failed: Code: ConditionalCheckFailed Message: The +// conditional request failed. +// +// - Item Collection Size Limit Exceeded: Code: ItemCollectionSizeLimitExceeded +// Message: Collection size exceeded. +// +// - Transaction Conflict: Code: TransactionConflict Message: Transaction +// is ongoing for the item. +// +// - Provisioned Throughput Exceeded: Code: ProvisionedThroughputExceeded +// Messages: The level of configured provisioned throughput for the table +// was exceeded. Consider increasing your provisioning level with the UpdateTable +// API. This Message is received when provisioned throughput is exceeded +// is on a provisioned DynamoDB table. The level of configured provisioned +// throughput for one or more global secondary indexes of the table was exceeded. +// Consider increasing your provisioning level for the under-provisioned +// global secondary indexes with the UpdateTable API. This message is returned +// when provisioned throughput is exceeded is on a provisioned GSI. +// +// - Throttling Error: Code: ThrottlingError Messages: Throughput exceeds +// the current capacity of your table or index. DynamoDB is automatically +// scaling your table or index so please try again shortly. If exceptions +// persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html. +// This message is returned when writes get throttled on an On-Demand table +// as DynamoDB is automatically scaling the table. Throughput exceeds the +// current capacity for one or more global secondary indexes. DynamoDB is +// automatically scaling your index so please try again shortly. This message +// is returned when writes get throttled on an On-Demand GSI as DynamoDB +// is automatically scaling the GSI. +// +// - Validation Error: Code: ValidationError Messages: One or more parameter +// values were invalid. The update expression attempted to update the secondary +// index key beyond allowed size limits. The update expression attempted +// to update the secondary index key to unsupported type. An operand in the +// update expression has an incorrect data type. Item size to update has +// exceeded the maximum allowed size. Number overflow. Attempting to store +// a number with magnitude larger than supported range. Type mismatch for +// attribute to update. Nesting Levels have exceeded supported limits. The +// document path provided in the update expression is invalid for update. +// The provided expression refers to an attribute that does not exist in +// the item. +type TransactionCanceledException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // A list of cancellation reasons. + CancellationReasons []*CancellationReason `min:"1" type:"list"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TransactionCanceledException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TransactionCanceledException) GoString() string { + return s.String() +} + +func newErrorTransactionCanceledException(v protocol.ResponseMetadata) error { + return &TransactionCanceledException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *TransactionCanceledException) Code() string { + return "TransactionCanceledException" +} + +// Message returns the exception's message. +func (s *TransactionCanceledException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *TransactionCanceledException) OrigErr() error { + return nil +} + +func (s *TransactionCanceledException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *TransactionCanceledException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *TransactionCanceledException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Operation was rejected because there is an ongoing transaction for the item. +type TransactionConflictException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TransactionConflictException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TransactionConflictException) GoString() string { + return s.String() +} + +func newErrorTransactionConflictException(v protocol.ResponseMetadata) error { + return &TransactionConflictException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *TransactionConflictException) Code() string { + return "TransactionConflictException" +} + +// Message returns the exception's message. +func (s *TransactionConflictException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *TransactionConflictException) OrigErr() error { + return nil +} + +func (s *TransactionConflictException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *TransactionConflictException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *TransactionConflictException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The transaction with the given request token is already in progress. +// +// # Recommended Settings +// +// This is a general recommendation for handling the TransactionInProgressException. +// These settings help ensure that the client retries will trigger completion +// of the ongoing TransactWriteItems request. +// +// - Set clientExecutionTimeout to a value that allows at least one retry +// to be processed after 5 seconds have elapsed since the first attempt for +// the TransactWriteItems operation. +// +// - Set socketTimeout to a value a little lower than the requestTimeout +// setting. +// +// - requestTimeout should be set based on the time taken for the individual +// retries of a single HTTP request for your use case, but setting it to +// 1 second or higher should work well to reduce chances of retries and TransactionInProgressException +// errors. +// +// - Use exponential backoff when retrying and tune backoff if needed. +// +// Assuming default retry policy (https://github.com/aws/aws-sdk-java/blob/fd409dee8ae23fb8953e0bb4dbde65536a7e0514/aws-java-sdk-core/src/main/java/com/amazonaws/retry/PredefinedRetryPolicies.java#L97), +// example timeout settings based on the guidelines above are as follows: +// +// Example timeline: +// +// - 0-1000 first attempt +// +// - 1000-1500 first sleep/delay (default retry policy uses 500 ms as base +// delay for 4xx errors) +// +// - 1500-2500 second attempt +// +// - 2500-3500 second sleep/delay (500 * 2, exponential backoff) +// +// - 3500-4500 third attempt +// +// - 4500-6500 third sleep/delay (500 * 2^2) +// +// - 6500-7500 fourth attempt (this can trigger inline recovery since 5 seconds +// have elapsed since the first attempt reached TC) +type TransactionInProgressException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TransactionInProgressException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TransactionInProgressException) GoString() string { + return s.String() +} + +func newErrorTransactionInProgressException(v protocol.ResponseMetadata) error { + return &TransactionInProgressException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *TransactionInProgressException) Code() string { + return "TransactionInProgressException" +} + +// Message returns the exception's message. +func (s *TransactionInProgressException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *TransactionInProgressException) OrigErr() error { + return nil +} + +func (s *TransactionInProgressException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *TransactionInProgressException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *TransactionInProgressException) RequestID() string { + return s.RespMetadata.RequestID +} + +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // The DynamoDB resource that the tags will be removed from. This value is an + // Amazon Resource Name (ARN). + // + // ResourceArn is a required field + ResourceArn *string `min:"1" type:"string" required:"true"` + + // A list of tag keys. Existing tags of the resource whose keys are members + // of this list will be removed from the DynamoDB resource. + // + // TagKeys is a required field + TagKeys []*string `type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UntagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *UntagResourceInput) SetResourceArn(v string) *UntagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v + return s +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UntagResourceOutput) GoString() string { + return s.String() +} + +// Represents a request to perform an UpdateItem operation. +type Update struct { + _ struct{} `type:"structure"` + + // A condition that must be satisfied in order for a conditional update to succeed. + ConditionExpression *string `type:"string"` + + // One or more substitution tokens for attribute names in an expression. + ExpressionAttributeNames map[string]*string `type:"map"` + + // One or more values that can be substituted in an expression. + ExpressionAttributeValues map[string]*AttributeValue `type:"map"` + + // The primary key of the item to be updated. Each element consists of an attribute + // name and a value for that attribute. + // + // Key is a required field + Key map[string]*AttributeValue `type:"map" required:"true"` + + // Use ReturnValuesOnConditionCheckFailure to get the item attributes if the + // Update condition fails. For ReturnValuesOnConditionCheckFailure, the valid + // values are: NONE, ALL_OLD, UPDATED_OLD, ALL_NEW, UPDATED_NEW. + ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"` + + // Name of the table for the UpdateItem request. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` + + // An expression that defines one or more attributes to be updated, the action + // to be performed on them, and new value(s) for them. + // + // UpdateExpression is a required field + UpdateExpression *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Update) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Update) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Update) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Update"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + if s.UpdateExpression == nil { + invalidParams.Add(request.NewErrParamRequired("UpdateExpression")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConditionExpression sets the ConditionExpression field's value. +func (s *Update) SetConditionExpression(v string) *Update { + s.ConditionExpression = &v + return s +} + +// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. +func (s *Update) SetExpressionAttributeNames(v map[string]*string) *Update { + s.ExpressionAttributeNames = v + return s +} + +// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value. +func (s *Update) SetExpressionAttributeValues(v map[string]*AttributeValue) *Update { + s.ExpressionAttributeValues = v + return s +} + +// SetKey sets the Key field's value. +func (s *Update) SetKey(v map[string]*AttributeValue) *Update { + s.Key = v + return s +} + +// SetReturnValuesOnConditionCheckFailure sets the ReturnValuesOnConditionCheckFailure field's value. +func (s *Update) SetReturnValuesOnConditionCheckFailure(v string) *Update { + s.ReturnValuesOnConditionCheckFailure = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *Update) SetTableName(v string) *Update { + s.TableName = &v + return s +} + +// SetUpdateExpression sets the UpdateExpression field's value. +func (s *Update) SetUpdateExpression(v string) *Update { + s.UpdateExpression = &v + return s +} + +type UpdateContinuousBackupsInput struct { + _ struct{} `type:"structure"` + + // Represents the settings used to enable point in time recovery. + // + // PointInTimeRecoverySpecification is a required field + PointInTimeRecoverySpecification *PointInTimeRecoverySpecification `type:"structure" required:"true"` + + // The name of the table. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateContinuousBackupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateContinuousBackupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateContinuousBackupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateContinuousBackupsInput"} + if s.PointInTimeRecoverySpecification == nil { + invalidParams.Add(request.NewErrParamRequired("PointInTimeRecoverySpecification")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + if s.PointInTimeRecoverySpecification != nil { + if err := s.PointInTimeRecoverySpecification.Validate(); err != nil { + invalidParams.AddNested("PointInTimeRecoverySpecification", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPointInTimeRecoverySpecification sets the PointInTimeRecoverySpecification field's value. +func (s *UpdateContinuousBackupsInput) SetPointInTimeRecoverySpecification(v *PointInTimeRecoverySpecification) *UpdateContinuousBackupsInput { + s.PointInTimeRecoverySpecification = v + return s +} + +// SetTableName sets the TableName field's value. +func (s *UpdateContinuousBackupsInput) SetTableName(v string) *UpdateContinuousBackupsInput { + s.TableName = &v + return s +} + +type UpdateContinuousBackupsOutput struct { + _ struct{} `type:"structure"` + + // Represents the continuous backups and point in time recovery settings on + // the table. + ContinuousBackupsDescription *ContinuousBackupsDescription `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateContinuousBackupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateContinuousBackupsOutput) GoString() string { + return s.String() +} + +// SetContinuousBackupsDescription sets the ContinuousBackupsDescription field's value. +func (s *UpdateContinuousBackupsOutput) SetContinuousBackupsDescription(v *ContinuousBackupsDescription) *UpdateContinuousBackupsOutput { + s.ContinuousBackupsDescription = v + return s +} + +type UpdateContributorInsightsInput struct { + _ struct{} `type:"structure"` + + // Represents the contributor insights action. + // + // ContributorInsightsAction is a required field + ContributorInsightsAction *string `type:"string" required:"true" enum:"ContributorInsightsAction"` + + // The global secondary index name, if applicable. + IndexName *string `min:"3" type:"string"` + + // The name of the table. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateContributorInsightsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateContributorInsightsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateContributorInsightsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateContributorInsightsInput"} + if s.ContributorInsightsAction == nil { + invalidParams.Add(request.NewErrParamRequired("ContributorInsightsAction")) + } + if s.IndexName != nil && len(*s.IndexName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetContributorInsightsAction sets the ContributorInsightsAction field's value. +func (s *UpdateContributorInsightsInput) SetContributorInsightsAction(v string) *UpdateContributorInsightsInput { + s.ContributorInsightsAction = &v + return s +} + +// SetIndexName sets the IndexName field's value. +func (s *UpdateContributorInsightsInput) SetIndexName(v string) *UpdateContributorInsightsInput { + s.IndexName = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *UpdateContributorInsightsInput) SetTableName(v string) *UpdateContributorInsightsInput { + s.TableName = &v + return s +} + +type UpdateContributorInsightsOutput struct { + _ struct{} `type:"structure"` + + // The status of contributor insights + ContributorInsightsStatus *string `type:"string" enum:"ContributorInsightsStatus"` + + // The name of the global secondary index, if applicable. + IndexName *string `min:"3" type:"string"` + + // The name of the table. + TableName *string `min:"3" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateContributorInsightsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateContributorInsightsOutput) GoString() string { + return s.String() +} + +// SetContributorInsightsStatus sets the ContributorInsightsStatus field's value. +func (s *UpdateContributorInsightsOutput) SetContributorInsightsStatus(v string) *UpdateContributorInsightsOutput { + s.ContributorInsightsStatus = &v + return s +} + +// SetIndexName sets the IndexName field's value. +func (s *UpdateContributorInsightsOutput) SetIndexName(v string) *UpdateContributorInsightsOutput { + s.IndexName = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *UpdateContributorInsightsOutput) SetTableName(v string) *UpdateContributorInsightsOutput { + s.TableName = &v + return s +} + +// Represents the new provisioned throughput settings to be applied to a global +// secondary index. +type UpdateGlobalSecondaryIndexAction struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index to be updated. + // + // IndexName is a required field + IndexName *string `min:"3" type:"string" required:"true"` + + // Represents the provisioned throughput settings for the specified global secondary + // index. + // + // For current minimum and maximum provisioned throughput values, see Service, + // Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) + // in the Amazon DynamoDB Developer Guide. + // + // ProvisionedThroughput is a required field + ProvisionedThroughput *ProvisionedThroughput `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateGlobalSecondaryIndexAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateGlobalSecondaryIndexAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateGlobalSecondaryIndexAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateGlobalSecondaryIndexAction"} + if s.IndexName == nil { + invalidParams.Add(request.NewErrParamRequired("IndexName")) + } + if s.IndexName != nil && len(*s.IndexName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) + } + if s.ProvisionedThroughput == nil { + invalidParams.Add(request.NewErrParamRequired("ProvisionedThroughput")) + } + if s.ProvisionedThroughput != nil { + if err := s.ProvisionedThroughput.Validate(); err != nil { + invalidParams.AddNested("ProvisionedThroughput", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIndexName sets the IndexName field's value. +func (s *UpdateGlobalSecondaryIndexAction) SetIndexName(v string) *UpdateGlobalSecondaryIndexAction { + s.IndexName = &v + return s +} + +// SetProvisionedThroughput sets the ProvisionedThroughput field's value. +func (s *UpdateGlobalSecondaryIndexAction) SetProvisionedThroughput(v *ProvisionedThroughput) *UpdateGlobalSecondaryIndexAction { + s.ProvisionedThroughput = v + return s +} + +type UpdateGlobalTableInput struct { + _ struct{} `type:"structure"` + + // The global table name. + // + // GlobalTableName is a required field + GlobalTableName *string `min:"3" type:"string" required:"true"` + + // A list of Regions that should be added or removed from the global table. + // + // ReplicaUpdates is a required field + ReplicaUpdates []*ReplicaUpdate `type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateGlobalTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateGlobalTableInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateGlobalTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateGlobalTableInput"} + if s.GlobalTableName == nil { + invalidParams.Add(request.NewErrParamRequired("GlobalTableName")) + } + if s.GlobalTableName != nil && len(*s.GlobalTableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("GlobalTableName", 3)) + } + if s.ReplicaUpdates == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicaUpdates")) + } + if s.ReplicaUpdates != nil { + for i, v := range s.ReplicaUpdates { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ReplicaUpdates", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGlobalTableName sets the GlobalTableName field's value. +func (s *UpdateGlobalTableInput) SetGlobalTableName(v string) *UpdateGlobalTableInput { + s.GlobalTableName = &v + return s +} + +// SetReplicaUpdates sets the ReplicaUpdates field's value. +func (s *UpdateGlobalTableInput) SetReplicaUpdates(v []*ReplicaUpdate) *UpdateGlobalTableInput { + s.ReplicaUpdates = v + return s +} + +type UpdateGlobalTableOutput struct { + _ struct{} `type:"structure"` + + // Contains the details of the global table. + GlobalTableDescription *GlobalTableDescription `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateGlobalTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateGlobalTableOutput) GoString() string { + return s.String() +} + +// SetGlobalTableDescription sets the GlobalTableDescription field's value. +func (s *UpdateGlobalTableOutput) SetGlobalTableDescription(v *GlobalTableDescription) *UpdateGlobalTableOutput { + s.GlobalTableDescription = v + return s +} + +type UpdateGlobalTableSettingsInput struct { + _ struct{} `type:"structure"` + + // The billing mode of the global table. If GlobalTableBillingMode is not specified, + // the global table defaults to PROVISIONED capacity billing mode. + // + // * PROVISIONED - We recommend using PROVISIONED for predictable workloads. + // PROVISIONED sets the billing mode to Provisioned Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual). + // + // * PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable + // workloads. PAY_PER_REQUEST sets the billing mode to On-Demand Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.OnDemand). + GlobalTableBillingMode *string `type:"string" enum:"BillingMode"` + + // Represents the settings of a global secondary index for a global table that + // will be modified. + GlobalTableGlobalSecondaryIndexSettingsUpdate []*GlobalTableGlobalSecondaryIndexSettingsUpdate `min:"1" type:"list"` + + // The name of the global table + // + // GlobalTableName is a required field + GlobalTableName *string `min:"3" type:"string" required:"true"` + + // Auto scaling settings for managing provisioned write capacity for the global + // table. + GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate *AutoScalingSettingsUpdate `type:"structure"` + + // The maximum number of writes consumed per second before DynamoDB returns + // a ThrottlingException. + GlobalTableProvisionedWriteCapacityUnits *int64 `min:"1" type:"long"` + + // Represents the settings for a global table in a Region that will be modified. + ReplicaSettingsUpdate []*ReplicaSettingsUpdate `min:"1" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateGlobalTableSettingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateGlobalTableSettingsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateGlobalTableSettingsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateGlobalTableSettingsInput"} + if s.GlobalTableGlobalSecondaryIndexSettingsUpdate != nil && len(s.GlobalTableGlobalSecondaryIndexSettingsUpdate) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GlobalTableGlobalSecondaryIndexSettingsUpdate", 1)) + } + if s.GlobalTableName == nil { + invalidParams.Add(request.NewErrParamRequired("GlobalTableName")) + } + if s.GlobalTableName != nil && len(*s.GlobalTableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("GlobalTableName", 3)) + } + if s.GlobalTableProvisionedWriteCapacityUnits != nil && *s.GlobalTableProvisionedWriteCapacityUnits < 1 { + invalidParams.Add(request.NewErrParamMinValue("GlobalTableProvisionedWriteCapacityUnits", 1)) + } + if s.ReplicaSettingsUpdate != nil && len(s.ReplicaSettingsUpdate) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ReplicaSettingsUpdate", 1)) + } + if s.GlobalTableGlobalSecondaryIndexSettingsUpdate != nil { + for i, v := range s.GlobalTableGlobalSecondaryIndexSettingsUpdate { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalTableGlobalSecondaryIndexSettingsUpdate", i), err.(request.ErrInvalidParams)) + } + } + } + if s.GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate != nil { + if err := s.GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate.Validate(); err != nil { + invalidParams.AddNested("GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate", err.(request.ErrInvalidParams)) + } + } + if s.ReplicaSettingsUpdate != nil { + for i, v := range s.ReplicaSettingsUpdate { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ReplicaSettingsUpdate", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGlobalTableBillingMode sets the GlobalTableBillingMode field's value. +func (s *UpdateGlobalTableSettingsInput) SetGlobalTableBillingMode(v string) *UpdateGlobalTableSettingsInput { + s.GlobalTableBillingMode = &v + return s +} + +// SetGlobalTableGlobalSecondaryIndexSettingsUpdate sets the GlobalTableGlobalSecondaryIndexSettingsUpdate field's value. +func (s *UpdateGlobalTableSettingsInput) SetGlobalTableGlobalSecondaryIndexSettingsUpdate(v []*GlobalTableGlobalSecondaryIndexSettingsUpdate) *UpdateGlobalTableSettingsInput { + s.GlobalTableGlobalSecondaryIndexSettingsUpdate = v + return s +} + +// SetGlobalTableName sets the GlobalTableName field's value. +func (s *UpdateGlobalTableSettingsInput) SetGlobalTableName(v string) *UpdateGlobalTableSettingsInput { + s.GlobalTableName = &v + return s +} + +// SetGlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate sets the GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate field's value. +func (s *UpdateGlobalTableSettingsInput) SetGlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate(v *AutoScalingSettingsUpdate) *UpdateGlobalTableSettingsInput { + s.GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate = v + return s +} + +// SetGlobalTableProvisionedWriteCapacityUnits sets the GlobalTableProvisionedWriteCapacityUnits field's value. +func (s *UpdateGlobalTableSettingsInput) SetGlobalTableProvisionedWriteCapacityUnits(v int64) *UpdateGlobalTableSettingsInput { + s.GlobalTableProvisionedWriteCapacityUnits = &v + return s +} + +// SetReplicaSettingsUpdate sets the ReplicaSettingsUpdate field's value. +func (s *UpdateGlobalTableSettingsInput) SetReplicaSettingsUpdate(v []*ReplicaSettingsUpdate) *UpdateGlobalTableSettingsInput { + s.ReplicaSettingsUpdate = v + return s +} + +type UpdateGlobalTableSettingsOutput struct { + _ struct{} `type:"structure"` + + // The name of the global table. + GlobalTableName *string `min:"3" type:"string"` + + // The Region-specific settings for the global table. + ReplicaSettings []*ReplicaSettingsDescription `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateGlobalTableSettingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateGlobalTableSettingsOutput) GoString() string { + return s.String() +} + +// SetGlobalTableName sets the GlobalTableName field's value. +func (s *UpdateGlobalTableSettingsOutput) SetGlobalTableName(v string) *UpdateGlobalTableSettingsOutput { + s.GlobalTableName = &v + return s +} + +// SetReplicaSettings sets the ReplicaSettings field's value. +func (s *UpdateGlobalTableSettingsOutput) SetReplicaSettings(v []*ReplicaSettingsDescription) *UpdateGlobalTableSettingsOutput { + s.ReplicaSettings = v + return s +} + +// Represents the input of an UpdateItem operation. +type UpdateItemInput struct { + _ struct{} `type:"structure"` + + // This is a legacy parameter. Use UpdateExpression instead. For more information, + // see AttributeUpdates (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributeUpdates.html) + // in the Amazon DynamoDB Developer Guide. + AttributeUpdates map[string]*AttributeValueUpdate `type:"map"` + + // A condition that must be satisfied in order for a conditional update to succeed. + // + // An expression can contain any of the following: + // + // * Functions: attribute_exists | attribute_not_exists | attribute_type + // | contains | begins_with | size These function names are case-sensitive. + // + // * Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN + // + // * Logical operators: AND | OR | NOT + // + // For more information about condition expressions, see Specifying Conditions + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + ConditionExpression *string `type:"string"` + + // This is a legacy parameter. Use ConditionExpression instead. For more information, + // see ConditionalOperator (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html) + // in the Amazon DynamoDB Developer Guide. + ConditionalOperator *string `type:"string" enum:"ConditionalOperator"` + + // This is a legacy parameter. Use ConditionExpression instead. For more information, + // see Expected (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html) + // in the Amazon DynamoDB Developer Guide. + Expected map[string]*ExpectedAttributeValue `type:"map"` + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames: + // + // * To access an attribute whose name conflicts with a DynamoDB reserved + // word. + // + // * To create a placeholder for repeating occurrences of an attribute name + // in an expression. + // + // * To prevent special characters in an attribute name from being misinterpreted + // in an expression. + // + // Use the # character in an expression to dereference an attribute name. For + // example, consider the following attribute name: + // + // * Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot be + // used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide.) To work around this, you could specify + // the following for ExpressionAttributeNames: + // + // * {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // * #P = :val + // + // Tokens that begin with the : character are expression attribute values, which + // are placeholders for the actual value at runtime. + // + // For more information about expression attribute names, see Specifying Item + // Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeNames map[string]*string `type:"map"` + + // One or more values that can be substituted in an expression. + // + // Use the : (colon) character in an expression to dereference an attribute + // value. For example, suppose that you wanted to check whether the value of + // the ProductStatus attribute was one of the following: + // + // Available | Backordered | Discontinued + // + // You would first need to specify ExpressionAttributeValues as follows: + // + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} + // } + // + // You could then use these values in an expression, such as this: + // + // ProductStatus IN (:avail, :back, :disc) + // + // For more information on expression attribute values, see Condition Expressions + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeValues map[string]*AttributeValue `type:"map"` + + // The primary key of the item to be updated. Each element consists of an attribute + // name and a value for that attribute. + // + // For the primary key, you must provide all of the attributes. For example, + // with a simple primary key, you only need to provide a value for the partition + // key. For a composite primary key, you must provide values for both the partition + // key and the sort key. + // + // Key is a required field + Key map[string]*AttributeValue `type:"map" required:"true"` + + // Determines the level of detail about either provisioned or on-demand throughput + // consumption that is returned in the response: + // + // * INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary + // index that was accessed. Note that some operations, such as GetItem and + // BatchGetItem, do not access any indexes at all. In these cases, specifying + // INDEXES will only return ConsumedCapacity information for table(s). + // + // * TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. + // + // * NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // Determines whether item collection metrics are returned. If set to SIZE, + // the response includes statistics about item collections, if any, that were + // modified during the operation are returned in the response. If set to NONE + // (the default), no statistics are returned. + ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"` + + // Use ReturnValues if you want to get the item attributes as they appear before + // or after they are successfully updated. For UpdateItem, the valid values + // are: + // + // * NONE - If ReturnValues is not specified, or if its value is NONE, then + // nothing is returned. (This setting is the default for ReturnValues.) + // + // * ALL_OLD - Returns all of the attributes of the item, as they appeared + // before the UpdateItem operation. + // + // * UPDATED_OLD - Returns only the updated attributes, as they appeared + // before the UpdateItem operation. + // + // * ALL_NEW - Returns all of the attributes of the item, as they appear + // after the UpdateItem operation. + // + // * UPDATED_NEW - Returns only the updated attributes, as they appear after + // the UpdateItem operation. + // + // There is no additional cost associated with requesting a return value aside + // from the small network and processing overhead of receiving a larger response. + // No read capacity units are consumed. + // + // The values returned are strongly consistent. + ReturnValues *string `type:"string" enum:"ReturnValue"` + + // The name of the table containing the item to update. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` + + // An expression that defines one or more attributes to be updated, the action + // to be performed on them, and new values for them. + // + // The following action values are available for UpdateExpression. + // + // * SET - Adds one or more attributes and values to an item. If any of these + // attributes already exist, they are replaced by the new values. You can + // also use SET to add or subtract from an attribute that is of type Number. + // For example: SET myNum = myNum + :val SET supports the following functions: + // if_not_exists (path, operand) - if the item does not contain an attribute + // at the specified path, then if_not_exists evaluates to operand; otherwise, + // it evaluates to path. You can use this function to avoid overwriting an + // attribute that may already be present in the item. list_append (operand, + // operand) - evaluates to a list with a new element added to it. You can + // append the new element to the start or the end of the list by reversing + // the order of the operands. These function names are case-sensitive. + // + // * REMOVE - Removes one or more attributes from an item. + // + // * ADD - Adds the specified value to the item, if the attribute does not + // already exist. If the attribute does exist, then the behavior of ADD depends + // on the data type of the attribute: If the existing attribute is a number, + // and if Value is also a number, then Value is mathematically added to the + // existing attribute. If Value is a negative number, then it is subtracted + // from the existing attribute. If you use ADD to increment or decrement + // a number value for an item that doesn't exist before the update, DynamoDB + // uses 0 as the initial value. Similarly, if you use ADD for an existing + // item to increment or decrement an attribute value that doesn't exist before + // the update, DynamoDB uses 0 as the initial value. For example, suppose + // that the item you want to update doesn't have an attribute named itemcount, + // but you decide to ADD the number 3 to this attribute anyway. DynamoDB + // will create the itemcount attribute, set its initial value to 0, and finally + // add 3 to it. The result will be a new itemcount attribute in the item, + // with a value of 3. If the existing data type is a set and if Value is + // also a set, then Value is added to the existing set. For example, if the + // attribute value is the set [1,2], and the ADD action specified [3], then + // the final attribute value is [1,2,3]. An error occurs if an ADD action + // is specified for a set attribute and the attribute type specified does + // not match the existing set type. Both sets must have the same primitive + // data type. For example, if the existing data type is a set of strings, + // the Value must also be a set of strings. The ADD action only supports + // Number and set data types. In addition, ADD can only be used on top-level + // attributes, not nested attributes. + // + // * DELETE - Deletes an element from a set. If a set of values is specified, + // then those values are subtracted from the old set. For example, if the + // attribute value was the set [a,b,c] and the DELETE action specifies [a,c], + // then the final attribute value is [b]. Specifying an empty set is an error. + // The DELETE action only supports set data types. In addition, DELETE can + // only be used on top-level attributes, not nested attributes. + // + // You can have many actions in a single expression, such as the following: + // SET a=:value1, b=:value2 DELETE :value3, :value4, :value5 + // + // For more information on update expressions, see Modifying Items and Attributes + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.Modifying.html) + // in the Amazon DynamoDB Developer Guide. + UpdateExpression *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateItemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateItemInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateItemInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateItemInput"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributeUpdates sets the AttributeUpdates field's value. +func (s *UpdateItemInput) SetAttributeUpdates(v map[string]*AttributeValueUpdate) *UpdateItemInput { + s.AttributeUpdates = v + return s +} + +// SetConditionExpression sets the ConditionExpression field's value. +func (s *UpdateItemInput) SetConditionExpression(v string) *UpdateItemInput { + s.ConditionExpression = &v + return s +} + +// SetConditionalOperator sets the ConditionalOperator field's value. +func (s *UpdateItemInput) SetConditionalOperator(v string) *UpdateItemInput { + s.ConditionalOperator = &v + return s +} + +// SetExpected sets the Expected field's value. +func (s *UpdateItemInput) SetExpected(v map[string]*ExpectedAttributeValue) *UpdateItemInput { + s.Expected = v + return s +} + +// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. +func (s *UpdateItemInput) SetExpressionAttributeNames(v map[string]*string) *UpdateItemInput { + s.ExpressionAttributeNames = v + return s +} + +// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value. +func (s *UpdateItemInput) SetExpressionAttributeValues(v map[string]*AttributeValue) *UpdateItemInput { + s.ExpressionAttributeValues = v + return s +} + +// SetKey sets the Key field's value. +func (s *UpdateItemInput) SetKey(v map[string]*AttributeValue) *UpdateItemInput { + s.Key = v + return s +} + +// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. +func (s *UpdateItemInput) SetReturnConsumedCapacity(v string) *UpdateItemInput { + s.ReturnConsumedCapacity = &v + return s +} + +// SetReturnItemCollectionMetrics sets the ReturnItemCollectionMetrics field's value. +func (s *UpdateItemInput) SetReturnItemCollectionMetrics(v string) *UpdateItemInput { + s.ReturnItemCollectionMetrics = &v + return s +} + +// SetReturnValues sets the ReturnValues field's value. +func (s *UpdateItemInput) SetReturnValues(v string) *UpdateItemInput { + s.ReturnValues = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *UpdateItemInput) SetTableName(v string) *UpdateItemInput { + s.TableName = &v + return s +} + +// SetUpdateExpression sets the UpdateExpression field's value. +func (s *UpdateItemInput) SetUpdateExpression(v string) *UpdateItemInput { + s.UpdateExpression = &v + return s +} + +// Represents the output of an UpdateItem operation. +type UpdateItemOutput struct { + _ struct{} `type:"structure"` + + // A map of attribute values as they appear before or after the UpdateItem operation, + // as determined by the ReturnValues parameter. + // + // The Attributes map is only present if the update was successful and ReturnValues + // was specified as something other than NONE in the request. Each element represents + // one attribute. + Attributes map[string]*AttributeValue `type:"map"` + + // The capacity units consumed by the UpdateItem operation. The data returned + // includes the total provisioned throughput consumed, along with statistics + // for the table and any indexes involved in the operation. ConsumedCapacity + // is only returned if the ReturnConsumedCapacity parameter was specified. For + // more information, see Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughput.html#ItemSizeCalculations.Reads) + // in the Amazon DynamoDB Developer Guide. + ConsumedCapacity *ConsumedCapacity `type:"structure"` + + // Information about item collections, if any, that were affected by the UpdateItem + // operation. ItemCollectionMetrics is only returned if the ReturnItemCollectionMetrics + // parameter was specified. If the table does not have any local secondary indexes, + // this information is not returned in the response. + // + // Each ItemCollectionMetrics element consists of: + // + // * ItemCollectionKey - The partition key value of the item collection. + // This is the same as the partition key value of the item itself. + // + // * SizeEstimateRangeGB - An estimate of item collection size, in gigabytes. + // This value is a two-element array containing a lower bound and an upper + // bound for the estimate. The estimate includes the size of all the items + // in the table, plus the size of all attributes projected into all of the + // local secondary indexes on that table. Use this estimate to measure whether + // a local secondary index is approaching its size limit. The estimate is + // subject to change over time; therefore, do not rely on the precision or + // accuracy of the estimate. + ItemCollectionMetrics *ItemCollectionMetrics `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateItemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateItemOutput) GoString() string { + return s.String() +} + +// SetAttributes sets the Attributes field's value. +func (s *UpdateItemOutput) SetAttributes(v map[string]*AttributeValue) *UpdateItemOutput { + s.Attributes = v + return s +} + +// SetConsumedCapacity sets the ConsumedCapacity field's value. +func (s *UpdateItemOutput) SetConsumedCapacity(v *ConsumedCapacity) *UpdateItemOutput { + s.ConsumedCapacity = v + return s +} + +// SetItemCollectionMetrics sets the ItemCollectionMetrics field's value. +func (s *UpdateItemOutput) SetItemCollectionMetrics(v *ItemCollectionMetrics) *UpdateItemOutput { + s.ItemCollectionMetrics = v + return s +} + +// Represents a replica to be modified. +type UpdateReplicationGroupMemberAction struct { + _ struct{} `type:"structure"` + + // Replica-specific global secondary index settings. + GlobalSecondaryIndexes []*ReplicaGlobalSecondaryIndex `min:"1" type:"list"` + + // The KMS key of the replica that should be used for KMS encryption. To specify + // a key, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. + // Note that you should only provide this parameter if the key is different + // from the default DynamoDB KMS key alias/aws/dynamodb. + KMSMasterKeyId *string `type:"string"` + + // Replica-specific provisioned throughput. If not specified, uses the source + // table's provisioned throughput settings. + ProvisionedThroughputOverride *ProvisionedThroughputOverride `type:"structure"` + + // The Region where the replica exists. + // + // RegionName is a required field + RegionName *string `type:"string" required:"true"` + + // Replica-specific table class. If not specified, uses the source table's table + // class. + TableClassOverride *string `type:"string" enum:"TableClass"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateReplicationGroupMemberAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateReplicationGroupMemberAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateReplicationGroupMemberAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateReplicationGroupMemberAction"} + if s.GlobalSecondaryIndexes != nil && len(s.GlobalSecondaryIndexes) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GlobalSecondaryIndexes", 1)) + } + if s.RegionName == nil { + invalidParams.Add(request.NewErrParamRequired("RegionName")) + } + if s.GlobalSecondaryIndexes != nil { + for i, v := range s.GlobalSecondaryIndexes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalSecondaryIndexes", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ProvisionedThroughputOverride != nil { + if err := s.ProvisionedThroughputOverride.Validate(); err != nil { + invalidParams.AddNested("ProvisionedThroughputOverride", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value. +func (s *UpdateReplicationGroupMemberAction) SetGlobalSecondaryIndexes(v []*ReplicaGlobalSecondaryIndex) *UpdateReplicationGroupMemberAction { + s.GlobalSecondaryIndexes = v + return s +} + +// SetKMSMasterKeyId sets the KMSMasterKeyId field's value. +func (s *UpdateReplicationGroupMemberAction) SetKMSMasterKeyId(v string) *UpdateReplicationGroupMemberAction { + s.KMSMasterKeyId = &v + return s +} + +// SetProvisionedThroughputOverride sets the ProvisionedThroughputOverride field's value. +func (s *UpdateReplicationGroupMemberAction) SetProvisionedThroughputOverride(v *ProvisionedThroughputOverride) *UpdateReplicationGroupMemberAction { + s.ProvisionedThroughputOverride = v + return s +} + +// SetRegionName sets the RegionName field's value. +func (s *UpdateReplicationGroupMemberAction) SetRegionName(v string) *UpdateReplicationGroupMemberAction { + s.RegionName = &v + return s +} + +// SetTableClassOverride sets the TableClassOverride field's value. +func (s *UpdateReplicationGroupMemberAction) SetTableClassOverride(v string) *UpdateReplicationGroupMemberAction { + s.TableClassOverride = &v + return s +} + +// Represents the input of an UpdateTable operation. +type UpdateTableInput struct { + _ struct{} `type:"structure"` + + // An array of attributes that describe the key schema for the table and indexes. + // If you are adding a new global secondary index to the table, AttributeDefinitions + // must include the key element(s) of the new index. + AttributeDefinitions []*AttributeDefinition `type:"list"` + + // Controls how you are charged for read and write throughput and how you manage + // capacity. When switching from pay-per-request to provisioned capacity, initial + // provisioned capacity values must be set. The initial provisioned capacity + // values are estimated based on the consumed read and write capacity of your + // table and global secondary indexes over the past 30 minutes. + // + // * PROVISIONED - We recommend using PROVISIONED for predictable workloads. + // PROVISIONED sets the billing mode to Provisioned Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual). + // + // * PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable + // workloads. PAY_PER_REQUEST sets the billing mode to On-Demand Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.OnDemand). + BillingMode *string `type:"string" enum:"BillingMode"` + + // Indicates whether deletion protection is to be enabled (true) or disabled + // (false) on the table. + DeletionProtectionEnabled *bool `type:"boolean"` + + // An array of one or more global secondary indexes for the table. For each + // index in the array, you can request one action: + // + // * Create - add a new global secondary index to the table. + // + // * Update - modify the provisioned throughput settings of an existing global + // secondary index. + // + // * Delete - remove a global secondary index from the table. + // + // You can create or delete only one global secondary index per UpdateTable + // operation. + // + // For more information, see Managing Global Secondary Indexes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GSI.OnlineOps.html) + // in the Amazon DynamoDB Developer Guide. + GlobalSecondaryIndexUpdates []*GlobalSecondaryIndexUpdate `type:"list"` + + // The new provisioned throughput settings for the specified table or index. + ProvisionedThroughput *ProvisionedThroughput `type:"structure"` + + // A list of replica update actions (create, delete, or update) for the table. + // + // This property only applies to Version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) + // of global tables. + ReplicaUpdates []*ReplicationGroupUpdate `min:"1" type:"list"` + + // The new server-side encryption settings for the specified table. + SSESpecification *SSESpecification `type:"structure"` + + // Represents the DynamoDB Streams configuration for the table. + // + // You receive a ResourceInUseException if you try to enable a stream on a table + // that already has a stream, or if you try to disable a stream on a table that + // doesn't have a stream. + StreamSpecification *StreamSpecification `type:"structure"` + + // The table class of the table to be updated. Valid values are STANDARD and + // STANDARD_INFREQUENT_ACCESS. + TableClass *string `type:"string" enum:"TableClass"` + + // The name of the table to be updated. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateTableInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateTableInput"} + if s.ReplicaUpdates != nil && len(s.ReplicaUpdates) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ReplicaUpdates", 1)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + if s.AttributeDefinitions != nil { + for i, v := range s.AttributeDefinitions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AttributeDefinitions", i), err.(request.ErrInvalidParams)) + } + } + } + if s.GlobalSecondaryIndexUpdates != nil { + for i, v := range s.GlobalSecondaryIndexUpdates { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalSecondaryIndexUpdates", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ProvisionedThroughput != nil { + if err := s.ProvisionedThroughput.Validate(); err != nil { + invalidParams.AddNested("ProvisionedThroughput", err.(request.ErrInvalidParams)) + } + } + if s.ReplicaUpdates != nil { + for i, v := range s.ReplicaUpdates { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ReplicaUpdates", i), err.(request.ErrInvalidParams)) + } + } + } + if s.StreamSpecification != nil { + if err := s.StreamSpecification.Validate(); err != nil { + invalidParams.AddNested("StreamSpecification", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributeDefinitions sets the AttributeDefinitions field's value. +func (s *UpdateTableInput) SetAttributeDefinitions(v []*AttributeDefinition) *UpdateTableInput { + s.AttributeDefinitions = v + return s +} + +// SetBillingMode sets the BillingMode field's value. +func (s *UpdateTableInput) SetBillingMode(v string) *UpdateTableInput { + s.BillingMode = &v + return s +} + +// SetDeletionProtectionEnabled sets the DeletionProtectionEnabled field's value. +func (s *UpdateTableInput) SetDeletionProtectionEnabled(v bool) *UpdateTableInput { + s.DeletionProtectionEnabled = &v + return s +} + +// SetGlobalSecondaryIndexUpdates sets the GlobalSecondaryIndexUpdates field's value. +func (s *UpdateTableInput) SetGlobalSecondaryIndexUpdates(v []*GlobalSecondaryIndexUpdate) *UpdateTableInput { + s.GlobalSecondaryIndexUpdates = v + return s +} + +// SetProvisionedThroughput sets the ProvisionedThroughput field's value. +func (s *UpdateTableInput) SetProvisionedThroughput(v *ProvisionedThroughput) *UpdateTableInput { + s.ProvisionedThroughput = v + return s +} + +// SetReplicaUpdates sets the ReplicaUpdates field's value. +func (s *UpdateTableInput) SetReplicaUpdates(v []*ReplicationGroupUpdate) *UpdateTableInput { + s.ReplicaUpdates = v + return s +} + +// SetSSESpecification sets the SSESpecification field's value. +func (s *UpdateTableInput) SetSSESpecification(v *SSESpecification) *UpdateTableInput { + s.SSESpecification = v + return s +} + +// SetStreamSpecification sets the StreamSpecification field's value. +func (s *UpdateTableInput) SetStreamSpecification(v *StreamSpecification) *UpdateTableInput { + s.StreamSpecification = v + return s +} + +// SetTableClass sets the TableClass field's value. +func (s *UpdateTableInput) SetTableClass(v string) *UpdateTableInput { + s.TableClass = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *UpdateTableInput) SetTableName(v string) *UpdateTableInput { + s.TableName = &v + return s +} + +// Represents the output of an UpdateTable operation. +type UpdateTableOutput struct { + _ struct{} `type:"structure"` + + // Represents the properties of the table. + TableDescription *TableDescription `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateTableOutput) GoString() string { + return s.String() +} + +// SetTableDescription sets the TableDescription field's value. +func (s *UpdateTableOutput) SetTableDescription(v *TableDescription) *UpdateTableOutput { + s.TableDescription = v + return s +} + +type UpdateTableReplicaAutoScalingInput struct { + _ struct{} `type:"structure"` + + // Represents the auto scaling settings of the global secondary indexes of the + // replica to be updated. + GlobalSecondaryIndexUpdates []*GlobalSecondaryIndexAutoScalingUpdate `min:"1" type:"list"` + + // Represents the auto scaling settings to be modified for a global table or + // global secondary index. + ProvisionedWriteCapacityAutoScalingUpdate *AutoScalingSettingsUpdate `type:"structure"` + + // Represents the auto scaling settings of replicas of the table that will be + // modified. + ReplicaUpdates []*ReplicaAutoScalingUpdate `min:"1" type:"list"` + + // The name of the global table to be updated. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateTableReplicaAutoScalingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateTableReplicaAutoScalingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateTableReplicaAutoScalingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateTableReplicaAutoScalingInput"} + if s.GlobalSecondaryIndexUpdates != nil && len(s.GlobalSecondaryIndexUpdates) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GlobalSecondaryIndexUpdates", 1)) + } + if s.ReplicaUpdates != nil && len(s.ReplicaUpdates) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ReplicaUpdates", 1)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + if s.GlobalSecondaryIndexUpdates != nil { + for i, v := range s.GlobalSecondaryIndexUpdates { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalSecondaryIndexUpdates", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ProvisionedWriteCapacityAutoScalingUpdate != nil { + if err := s.ProvisionedWriteCapacityAutoScalingUpdate.Validate(); err != nil { + invalidParams.AddNested("ProvisionedWriteCapacityAutoScalingUpdate", err.(request.ErrInvalidParams)) + } + } + if s.ReplicaUpdates != nil { + for i, v := range s.ReplicaUpdates { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ReplicaUpdates", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGlobalSecondaryIndexUpdates sets the GlobalSecondaryIndexUpdates field's value. +func (s *UpdateTableReplicaAutoScalingInput) SetGlobalSecondaryIndexUpdates(v []*GlobalSecondaryIndexAutoScalingUpdate) *UpdateTableReplicaAutoScalingInput { + s.GlobalSecondaryIndexUpdates = v + return s +} + +// SetProvisionedWriteCapacityAutoScalingUpdate sets the ProvisionedWriteCapacityAutoScalingUpdate field's value. +func (s *UpdateTableReplicaAutoScalingInput) SetProvisionedWriteCapacityAutoScalingUpdate(v *AutoScalingSettingsUpdate) *UpdateTableReplicaAutoScalingInput { + s.ProvisionedWriteCapacityAutoScalingUpdate = v + return s +} + +// SetReplicaUpdates sets the ReplicaUpdates field's value. +func (s *UpdateTableReplicaAutoScalingInput) SetReplicaUpdates(v []*ReplicaAutoScalingUpdate) *UpdateTableReplicaAutoScalingInput { + s.ReplicaUpdates = v + return s +} + +// SetTableName sets the TableName field's value. +func (s *UpdateTableReplicaAutoScalingInput) SetTableName(v string) *UpdateTableReplicaAutoScalingInput { + s.TableName = &v + return s +} + +type UpdateTableReplicaAutoScalingOutput struct { + _ struct{} `type:"structure"` + + // Returns information about the auto scaling settings of a table with replicas. + TableAutoScalingDescription *TableAutoScalingDescription `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateTableReplicaAutoScalingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateTableReplicaAutoScalingOutput) GoString() string { + return s.String() +} + +// SetTableAutoScalingDescription sets the TableAutoScalingDescription field's value. +func (s *UpdateTableReplicaAutoScalingOutput) SetTableAutoScalingDescription(v *TableAutoScalingDescription) *UpdateTableReplicaAutoScalingOutput { + s.TableAutoScalingDescription = v + return s +} + +// Represents the input of an UpdateTimeToLive operation. +type UpdateTimeToLiveInput struct { + _ struct{} `type:"structure"` + + // The name of the table to be configured. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` + + // Represents the settings used to enable or disable Time to Live for the specified + // table. + // + // TimeToLiveSpecification is a required field + TimeToLiveSpecification *TimeToLiveSpecification `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateTimeToLiveInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateTimeToLiveInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateTimeToLiveInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateTimeToLiveInput"} + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + if s.TimeToLiveSpecification == nil { + invalidParams.Add(request.NewErrParamRequired("TimeToLiveSpecification")) + } + if s.TimeToLiveSpecification != nil { + if err := s.TimeToLiveSpecification.Validate(); err != nil { + invalidParams.AddNested("TimeToLiveSpecification", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTableName sets the TableName field's value. +func (s *UpdateTimeToLiveInput) SetTableName(v string) *UpdateTimeToLiveInput { + s.TableName = &v + return s +} + +// SetTimeToLiveSpecification sets the TimeToLiveSpecification field's value. +func (s *UpdateTimeToLiveInput) SetTimeToLiveSpecification(v *TimeToLiveSpecification) *UpdateTimeToLiveInput { + s.TimeToLiveSpecification = v + return s +} + +type UpdateTimeToLiveOutput struct { + _ struct{} `type:"structure"` + + // Represents the output of an UpdateTimeToLive operation. + TimeToLiveSpecification *TimeToLiveSpecification `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateTimeToLiveOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateTimeToLiveOutput) GoString() string { + return s.String() +} + +// SetTimeToLiveSpecification sets the TimeToLiveSpecification field's value. +func (s *UpdateTimeToLiveOutput) SetTimeToLiveSpecification(v *TimeToLiveSpecification) *UpdateTimeToLiveOutput { + s.TimeToLiveSpecification = v + return s +} + +// Represents an operation to perform - either DeleteItem or PutItem. You can +// only request one of these operations, not both, in a single WriteRequest. +// If you do need to perform both of these operations, you need to provide two +// separate WriteRequest objects. +type WriteRequest struct { + _ struct{} `type:"structure"` + + // A request to perform a DeleteItem operation. + DeleteRequest *DeleteRequest `type:"structure"` + + // A request to perform a PutItem operation. + PutRequest *PutRequest `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s WriteRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s WriteRequest) GoString() string { + return s.String() +} + +// SetDeleteRequest sets the DeleteRequest field's value. +func (s *WriteRequest) SetDeleteRequest(v *DeleteRequest) *WriteRequest { + s.DeleteRequest = v + return s +} + +// SetPutRequest sets the PutRequest field's value. +func (s *WriteRequest) SetPutRequest(v *PutRequest) *WriteRequest { + s.PutRequest = v + return s +} + +const ( + // AttributeActionAdd is a AttributeAction enum value + AttributeActionAdd = "ADD" + + // AttributeActionPut is a AttributeAction enum value + AttributeActionPut = "PUT" + + // AttributeActionDelete is a AttributeAction enum value + AttributeActionDelete = "DELETE" +) + +// AttributeAction_Values returns all elements of the AttributeAction enum +func AttributeAction_Values() []string { + return []string{ + AttributeActionAdd, + AttributeActionPut, + AttributeActionDelete, + } +} + +const ( + // BackupStatusCreating is a BackupStatus enum value + BackupStatusCreating = "CREATING" + + // BackupStatusDeleted is a BackupStatus enum value + BackupStatusDeleted = "DELETED" + + // BackupStatusAvailable is a BackupStatus enum value + BackupStatusAvailable = "AVAILABLE" +) + +// BackupStatus_Values returns all elements of the BackupStatus enum +func BackupStatus_Values() []string { + return []string{ + BackupStatusCreating, + BackupStatusDeleted, + BackupStatusAvailable, + } +} + +const ( + // BackupTypeUser is a BackupType enum value + BackupTypeUser = "USER" + + // BackupTypeSystem is a BackupType enum value + BackupTypeSystem = "SYSTEM" + + // BackupTypeAwsBackup is a BackupType enum value + BackupTypeAwsBackup = "AWS_BACKUP" +) + +// BackupType_Values returns all elements of the BackupType enum +func BackupType_Values() []string { + return []string{ + BackupTypeUser, + BackupTypeSystem, + BackupTypeAwsBackup, + } +} + +const ( + // BackupTypeFilterUser is a BackupTypeFilter enum value + BackupTypeFilterUser = "USER" + + // BackupTypeFilterSystem is a BackupTypeFilter enum value + BackupTypeFilterSystem = "SYSTEM" + + // BackupTypeFilterAwsBackup is a BackupTypeFilter enum value + BackupTypeFilterAwsBackup = "AWS_BACKUP" + + // BackupTypeFilterAll is a BackupTypeFilter enum value + BackupTypeFilterAll = "ALL" +) + +// BackupTypeFilter_Values returns all elements of the BackupTypeFilter enum +func BackupTypeFilter_Values() []string { + return []string{ + BackupTypeFilterUser, + BackupTypeFilterSystem, + BackupTypeFilterAwsBackup, + BackupTypeFilterAll, + } +} + +const ( + // BatchStatementErrorCodeEnumConditionalCheckFailed is a BatchStatementErrorCodeEnum enum value + BatchStatementErrorCodeEnumConditionalCheckFailed = "ConditionalCheckFailed" + + // BatchStatementErrorCodeEnumItemCollectionSizeLimitExceeded is a BatchStatementErrorCodeEnum enum value + BatchStatementErrorCodeEnumItemCollectionSizeLimitExceeded = "ItemCollectionSizeLimitExceeded" + + // BatchStatementErrorCodeEnumRequestLimitExceeded is a BatchStatementErrorCodeEnum enum value + BatchStatementErrorCodeEnumRequestLimitExceeded = "RequestLimitExceeded" + + // BatchStatementErrorCodeEnumValidationError is a BatchStatementErrorCodeEnum enum value + BatchStatementErrorCodeEnumValidationError = "ValidationError" + + // BatchStatementErrorCodeEnumProvisionedThroughputExceeded is a BatchStatementErrorCodeEnum enum value + BatchStatementErrorCodeEnumProvisionedThroughputExceeded = "ProvisionedThroughputExceeded" + + // BatchStatementErrorCodeEnumTransactionConflict is a BatchStatementErrorCodeEnum enum value + BatchStatementErrorCodeEnumTransactionConflict = "TransactionConflict" + + // BatchStatementErrorCodeEnumThrottlingError is a BatchStatementErrorCodeEnum enum value + BatchStatementErrorCodeEnumThrottlingError = "ThrottlingError" + + // BatchStatementErrorCodeEnumInternalServerError is a BatchStatementErrorCodeEnum enum value + BatchStatementErrorCodeEnumInternalServerError = "InternalServerError" + + // BatchStatementErrorCodeEnumResourceNotFound is a BatchStatementErrorCodeEnum enum value + BatchStatementErrorCodeEnumResourceNotFound = "ResourceNotFound" + + // BatchStatementErrorCodeEnumAccessDenied is a BatchStatementErrorCodeEnum enum value + BatchStatementErrorCodeEnumAccessDenied = "AccessDenied" + + // BatchStatementErrorCodeEnumDuplicateItem is a BatchStatementErrorCodeEnum enum value + BatchStatementErrorCodeEnumDuplicateItem = "DuplicateItem" +) + +// BatchStatementErrorCodeEnum_Values returns all elements of the BatchStatementErrorCodeEnum enum +func BatchStatementErrorCodeEnum_Values() []string { + return []string{ + BatchStatementErrorCodeEnumConditionalCheckFailed, + BatchStatementErrorCodeEnumItemCollectionSizeLimitExceeded, + BatchStatementErrorCodeEnumRequestLimitExceeded, + BatchStatementErrorCodeEnumValidationError, + BatchStatementErrorCodeEnumProvisionedThroughputExceeded, + BatchStatementErrorCodeEnumTransactionConflict, + BatchStatementErrorCodeEnumThrottlingError, + BatchStatementErrorCodeEnumInternalServerError, + BatchStatementErrorCodeEnumResourceNotFound, + BatchStatementErrorCodeEnumAccessDenied, + BatchStatementErrorCodeEnumDuplicateItem, + } +} + +const ( + // BillingModeProvisioned is a BillingMode enum value + BillingModeProvisioned = "PROVISIONED" + + // BillingModePayPerRequest is a BillingMode enum value + BillingModePayPerRequest = "PAY_PER_REQUEST" +) + +// BillingMode_Values returns all elements of the BillingMode enum +func BillingMode_Values() []string { + return []string{ + BillingModeProvisioned, + BillingModePayPerRequest, + } +} + +const ( + // ComparisonOperatorEq is a ComparisonOperator enum value + ComparisonOperatorEq = "EQ" + + // ComparisonOperatorNe is a ComparisonOperator enum value + ComparisonOperatorNe = "NE" + + // ComparisonOperatorIn is a ComparisonOperator enum value + ComparisonOperatorIn = "IN" + + // ComparisonOperatorLe is a ComparisonOperator enum value + ComparisonOperatorLe = "LE" + + // ComparisonOperatorLt is a ComparisonOperator enum value + ComparisonOperatorLt = "LT" + + // ComparisonOperatorGe is a ComparisonOperator enum value + ComparisonOperatorGe = "GE" + + // ComparisonOperatorGt is a ComparisonOperator enum value + ComparisonOperatorGt = "GT" + + // ComparisonOperatorBetween is a ComparisonOperator enum value + ComparisonOperatorBetween = "BETWEEN" + + // ComparisonOperatorNotNull is a ComparisonOperator enum value + ComparisonOperatorNotNull = "NOT_NULL" + + // ComparisonOperatorNull is a ComparisonOperator enum value + ComparisonOperatorNull = "NULL" + + // ComparisonOperatorContains is a ComparisonOperator enum value + ComparisonOperatorContains = "CONTAINS" + + // ComparisonOperatorNotContains is a ComparisonOperator enum value + ComparisonOperatorNotContains = "NOT_CONTAINS" + + // ComparisonOperatorBeginsWith is a ComparisonOperator enum value + ComparisonOperatorBeginsWith = "BEGINS_WITH" +) + +// ComparisonOperator_Values returns all elements of the ComparisonOperator enum +func ComparisonOperator_Values() []string { + return []string{ + ComparisonOperatorEq, + ComparisonOperatorNe, + ComparisonOperatorIn, + ComparisonOperatorLe, + ComparisonOperatorLt, + ComparisonOperatorGe, + ComparisonOperatorGt, + ComparisonOperatorBetween, + ComparisonOperatorNotNull, + ComparisonOperatorNull, + ComparisonOperatorContains, + ComparisonOperatorNotContains, + ComparisonOperatorBeginsWith, + } +} + +const ( + // ConditionalOperatorAnd is a ConditionalOperator enum value + ConditionalOperatorAnd = "AND" + + // ConditionalOperatorOr is a ConditionalOperator enum value + ConditionalOperatorOr = "OR" +) + +// ConditionalOperator_Values returns all elements of the ConditionalOperator enum +func ConditionalOperator_Values() []string { + return []string{ + ConditionalOperatorAnd, + ConditionalOperatorOr, + } +} + +const ( + // ContinuousBackupsStatusEnabled is a ContinuousBackupsStatus enum value + ContinuousBackupsStatusEnabled = "ENABLED" + + // ContinuousBackupsStatusDisabled is a ContinuousBackupsStatus enum value + ContinuousBackupsStatusDisabled = "DISABLED" +) + +// ContinuousBackupsStatus_Values returns all elements of the ContinuousBackupsStatus enum +func ContinuousBackupsStatus_Values() []string { + return []string{ + ContinuousBackupsStatusEnabled, + ContinuousBackupsStatusDisabled, + } +} + +const ( + // ContributorInsightsActionEnable is a ContributorInsightsAction enum value + ContributorInsightsActionEnable = "ENABLE" + + // ContributorInsightsActionDisable is a ContributorInsightsAction enum value + ContributorInsightsActionDisable = "DISABLE" +) + +// ContributorInsightsAction_Values returns all elements of the ContributorInsightsAction enum +func ContributorInsightsAction_Values() []string { + return []string{ + ContributorInsightsActionEnable, + ContributorInsightsActionDisable, + } +} + +const ( + // ContributorInsightsStatusEnabling is a ContributorInsightsStatus enum value + ContributorInsightsStatusEnabling = "ENABLING" + + // ContributorInsightsStatusEnabled is a ContributorInsightsStatus enum value + ContributorInsightsStatusEnabled = "ENABLED" + + // ContributorInsightsStatusDisabling is a ContributorInsightsStatus enum value + ContributorInsightsStatusDisabling = "DISABLING" + + // ContributorInsightsStatusDisabled is a ContributorInsightsStatus enum value + ContributorInsightsStatusDisabled = "DISABLED" + + // ContributorInsightsStatusFailed is a ContributorInsightsStatus enum value + ContributorInsightsStatusFailed = "FAILED" +) + +// ContributorInsightsStatus_Values returns all elements of the ContributorInsightsStatus enum +func ContributorInsightsStatus_Values() []string { + return []string{ + ContributorInsightsStatusEnabling, + ContributorInsightsStatusEnabled, + ContributorInsightsStatusDisabling, + ContributorInsightsStatusDisabled, + ContributorInsightsStatusFailed, + } +} + +const ( + // DestinationStatusEnabling is a DestinationStatus enum value + DestinationStatusEnabling = "ENABLING" + + // DestinationStatusActive is a DestinationStatus enum value + DestinationStatusActive = "ACTIVE" + + // DestinationStatusDisabling is a DestinationStatus enum value + DestinationStatusDisabling = "DISABLING" + + // DestinationStatusDisabled is a DestinationStatus enum value + DestinationStatusDisabled = "DISABLED" + + // DestinationStatusEnableFailed is a DestinationStatus enum value + DestinationStatusEnableFailed = "ENABLE_FAILED" +) + +// DestinationStatus_Values returns all elements of the DestinationStatus enum +func DestinationStatus_Values() []string { + return []string{ + DestinationStatusEnabling, + DestinationStatusActive, + DestinationStatusDisabling, + DestinationStatusDisabled, + DestinationStatusEnableFailed, + } +} + +const ( + // ExportFormatDynamodbJson is a ExportFormat enum value + ExportFormatDynamodbJson = "DYNAMODB_JSON" + + // ExportFormatIon is a ExportFormat enum value + ExportFormatIon = "ION" +) + +// ExportFormat_Values returns all elements of the ExportFormat enum +func ExportFormat_Values() []string { + return []string{ + ExportFormatDynamodbJson, + ExportFormatIon, + } +} + +const ( + // ExportStatusInProgress is a ExportStatus enum value + ExportStatusInProgress = "IN_PROGRESS" + + // ExportStatusCompleted is a ExportStatus enum value + ExportStatusCompleted = "COMPLETED" + + // ExportStatusFailed is a ExportStatus enum value + ExportStatusFailed = "FAILED" +) + +// ExportStatus_Values returns all elements of the ExportStatus enum +func ExportStatus_Values() []string { + return []string{ + ExportStatusInProgress, + ExportStatusCompleted, + ExportStatusFailed, + } +} + +const ( + // GlobalTableStatusCreating is a GlobalTableStatus enum value + GlobalTableStatusCreating = "CREATING" + + // GlobalTableStatusActive is a GlobalTableStatus enum value + GlobalTableStatusActive = "ACTIVE" + + // GlobalTableStatusDeleting is a GlobalTableStatus enum value + GlobalTableStatusDeleting = "DELETING" + + // GlobalTableStatusUpdating is a GlobalTableStatus enum value + GlobalTableStatusUpdating = "UPDATING" +) + +// GlobalTableStatus_Values returns all elements of the GlobalTableStatus enum +func GlobalTableStatus_Values() []string { + return []string{ + GlobalTableStatusCreating, + GlobalTableStatusActive, + GlobalTableStatusDeleting, + GlobalTableStatusUpdating, + } +} + +const ( + // ImportStatusInProgress is a ImportStatus enum value + ImportStatusInProgress = "IN_PROGRESS" + + // ImportStatusCompleted is a ImportStatus enum value + ImportStatusCompleted = "COMPLETED" + + // ImportStatusCancelling is a ImportStatus enum value + ImportStatusCancelling = "CANCELLING" + + // ImportStatusCancelled is a ImportStatus enum value + ImportStatusCancelled = "CANCELLED" + + // ImportStatusFailed is a ImportStatus enum value + ImportStatusFailed = "FAILED" +) + +// ImportStatus_Values returns all elements of the ImportStatus enum +func ImportStatus_Values() []string { + return []string{ + ImportStatusInProgress, + ImportStatusCompleted, + ImportStatusCancelling, + ImportStatusCancelled, + ImportStatusFailed, + } +} + +const ( + // IndexStatusCreating is a IndexStatus enum value + IndexStatusCreating = "CREATING" + + // IndexStatusUpdating is a IndexStatus enum value + IndexStatusUpdating = "UPDATING" + + // IndexStatusDeleting is a IndexStatus enum value + IndexStatusDeleting = "DELETING" + + // IndexStatusActive is a IndexStatus enum value + IndexStatusActive = "ACTIVE" +) + +// IndexStatus_Values returns all elements of the IndexStatus enum +func IndexStatus_Values() []string { + return []string{ + IndexStatusCreating, + IndexStatusUpdating, + IndexStatusDeleting, + IndexStatusActive, + } +} + +const ( + // InputCompressionTypeGzip is a InputCompressionType enum value + InputCompressionTypeGzip = "GZIP" + + // InputCompressionTypeZstd is a InputCompressionType enum value + InputCompressionTypeZstd = "ZSTD" + + // InputCompressionTypeNone is a InputCompressionType enum value + InputCompressionTypeNone = "NONE" +) + +// InputCompressionType_Values returns all elements of the InputCompressionType enum +func InputCompressionType_Values() []string { + return []string{ + InputCompressionTypeGzip, + InputCompressionTypeZstd, + InputCompressionTypeNone, + } +} + +const ( + // InputFormatDynamodbJson is a InputFormat enum value + InputFormatDynamodbJson = "DYNAMODB_JSON" + + // InputFormatIon is a InputFormat enum value + InputFormatIon = "ION" + + // InputFormatCsv is a InputFormat enum value + InputFormatCsv = "CSV" +) + +// InputFormat_Values returns all elements of the InputFormat enum +func InputFormat_Values() []string { + return []string{ + InputFormatDynamodbJson, + InputFormatIon, + InputFormatCsv, + } +} + +const ( + // KeyTypeHash is a KeyType enum value + KeyTypeHash = "HASH" + + // KeyTypeRange is a KeyType enum value + KeyTypeRange = "RANGE" +) + +// KeyType_Values returns all elements of the KeyType enum +func KeyType_Values() []string { + return []string{ + KeyTypeHash, + KeyTypeRange, + } +} + +const ( + // PointInTimeRecoveryStatusEnabled is a PointInTimeRecoveryStatus enum value + PointInTimeRecoveryStatusEnabled = "ENABLED" + + // PointInTimeRecoveryStatusDisabled is a PointInTimeRecoveryStatus enum value + PointInTimeRecoveryStatusDisabled = "DISABLED" +) + +// PointInTimeRecoveryStatus_Values returns all elements of the PointInTimeRecoveryStatus enum +func PointInTimeRecoveryStatus_Values() []string { + return []string{ + PointInTimeRecoveryStatusEnabled, + PointInTimeRecoveryStatusDisabled, + } +} + +const ( + // ProjectionTypeAll is a ProjectionType enum value + ProjectionTypeAll = "ALL" + + // ProjectionTypeKeysOnly is a ProjectionType enum value + ProjectionTypeKeysOnly = "KEYS_ONLY" + + // ProjectionTypeInclude is a ProjectionType enum value + ProjectionTypeInclude = "INCLUDE" +) + +// ProjectionType_Values returns all elements of the ProjectionType enum +func ProjectionType_Values() []string { + return []string{ + ProjectionTypeAll, + ProjectionTypeKeysOnly, + ProjectionTypeInclude, + } +} + +const ( + // ReplicaStatusCreating is a ReplicaStatus enum value + ReplicaStatusCreating = "CREATING" + + // ReplicaStatusCreationFailed is a ReplicaStatus enum value + ReplicaStatusCreationFailed = "CREATION_FAILED" + + // ReplicaStatusUpdating is a ReplicaStatus enum value + ReplicaStatusUpdating = "UPDATING" + + // ReplicaStatusDeleting is a ReplicaStatus enum value + ReplicaStatusDeleting = "DELETING" + + // ReplicaStatusActive is a ReplicaStatus enum value + ReplicaStatusActive = "ACTIVE" + + // ReplicaStatusRegionDisabled is a ReplicaStatus enum value + ReplicaStatusRegionDisabled = "REGION_DISABLED" + + // ReplicaStatusInaccessibleEncryptionCredentials is a ReplicaStatus enum value + ReplicaStatusInaccessibleEncryptionCredentials = "INACCESSIBLE_ENCRYPTION_CREDENTIALS" +) + +// ReplicaStatus_Values returns all elements of the ReplicaStatus enum +func ReplicaStatus_Values() []string { + return []string{ + ReplicaStatusCreating, + ReplicaStatusCreationFailed, + ReplicaStatusUpdating, + ReplicaStatusDeleting, + ReplicaStatusActive, + ReplicaStatusRegionDisabled, + ReplicaStatusInaccessibleEncryptionCredentials, + } +} + +// Determines the level of detail about either provisioned or on-demand throughput +// consumption that is returned in the response: +// +// - INDEXES - The response includes the aggregate ConsumedCapacity for the +// operation, together with ConsumedCapacity for each table and secondary +// index that was accessed. Note that some operations, such as GetItem and +// BatchGetItem, do not access any indexes at all. In these cases, specifying +// INDEXES will only return ConsumedCapacity information for table(s). +// +// - TOTAL - The response includes only the aggregate ConsumedCapacity for +// the operation. +// +// - NONE - No ConsumedCapacity details are included in the response. +const ( + // ReturnConsumedCapacityIndexes is a ReturnConsumedCapacity enum value + ReturnConsumedCapacityIndexes = "INDEXES" + + // ReturnConsumedCapacityTotal is a ReturnConsumedCapacity enum value + ReturnConsumedCapacityTotal = "TOTAL" + + // ReturnConsumedCapacityNone is a ReturnConsumedCapacity enum value + ReturnConsumedCapacityNone = "NONE" +) + +// ReturnConsumedCapacity_Values returns all elements of the ReturnConsumedCapacity enum +func ReturnConsumedCapacity_Values() []string { + return []string{ + ReturnConsumedCapacityIndexes, + ReturnConsumedCapacityTotal, + ReturnConsumedCapacityNone, + } +} + +const ( + // ReturnItemCollectionMetricsSize is a ReturnItemCollectionMetrics enum value + ReturnItemCollectionMetricsSize = "SIZE" + + // ReturnItemCollectionMetricsNone is a ReturnItemCollectionMetrics enum value + ReturnItemCollectionMetricsNone = "NONE" +) + +// ReturnItemCollectionMetrics_Values returns all elements of the ReturnItemCollectionMetrics enum +func ReturnItemCollectionMetrics_Values() []string { + return []string{ + ReturnItemCollectionMetricsSize, + ReturnItemCollectionMetricsNone, + } +} + +const ( + // ReturnValueNone is a ReturnValue enum value + ReturnValueNone = "NONE" + + // ReturnValueAllOld is a ReturnValue enum value + ReturnValueAllOld = "ALL_OLD" + + // ReturnValueUpdatedOld is a ReturnValue enum value + ReturnValueUpdatedOld = "UPDATED_OLD" + + // ReturnValueAllNew is a ReturnValue enum value + ReturnValueAllNew = "ALL_NEW" + + // ReturnValueUpdatedNew is a ReturnValue enum value + ReturnValueUpdatedNew = "UPDATED_NEW" +) + +// ReturnValue_Values returns all elements of the ReturnValue enum +func ReturnValue_Values() []string { + return []string{ + ReturnValueNone, + ReturnValueAllOld, + ReturnValueUpdatedOld, + ReturnValueAllNew, + ReturnValueUpdatedNew, + } +} + +const ( + // ReturnValuesOnConditionCheckFailureAllOld is a ReturnValuesOnConditionCheckFailure enum value + ReturnValuesOnConditionCheckFailureAllOld = "ALL_OLD" + + // ReturnValuesOnConditionCheckFailureNone is a ReturnValuesOnConditionCheckFailure enum value + ReturnValuesOnConditionCheckFailureNone = "NONE" +) + +// ReturnValuesOnConditionCheckFailure_Values returns all elements of the ReturnValuesOnConditionCheckFailure enum +func ReturnValuesOnConditionCheckFailure_Values() []string { + return []string{ + ReturnValuesOnConditionCheckFailureAllOld, + ReturnValuesOnConditionCheckFailureNone, + } +} + +const ( + // S3SseAlgorithmAes256 is a S3SseAlgorithm enum value + S3SseAlgorithmAes256 = "AES256" + + // S3SseAlgorithmKms is a S3SseAlgorithm enum value + S3SseAlgorithmKms = "KMS" +) + +// S3SseAlgorithm_Values returns all elements of the S3SseAlgorithm enum +func S3SseAlgorithm_Values() []string { + return []string{ + S3SseAlgorithmAes256, + S3SseAlgorithmKms, + } +} + +const ( + // SSEStatusEnabling is a SSEStatus enum value + SSEStatusEnabling = "ENABLING" + + // SSEStatusEnabled is a SSEStatus enum value + SSEStatusEnabled = "ENABLED" + + // SSEStatusDisabling is a SSEStatus enum value + SSEStatusDisabling = "DISABLING" + + // SSEStatusDisabled is a SSEStatus enum value + SSEStatusDisabled = "DISABLED" + + // SSEStatusUpdating is a SSEStatus enum value + SSEStatusUpdating = "UPDATING" +) + +// SSEStatus_Values returns all elements of the SSEStatus enum +func SSEStatus_Values() []string { + return []string{ + SSEStatusEnabling, + SSEStatusEnabled, + SSEStatusDisabling, + SSEStatusDisabled, + SSEStatusUpdating, + } +} + +const ( + // SSETypeAes256 is a SSEType enum value + SSETypeAes256 = "AES256" + + // SSETypeKms is a SSEType enum value + SSETypeKms = "KMS" +) + +// SSEType_Values returns all elements of the SSEType enum +func SSEType_Values() []string { + return []string{ + SSETypeAes256, + SSETypeKms, + } +} + +const ( + // ScalarAttributeTypeS is a ScalarAttributeType enum value + ScalarAttributeTypeS = "S" + + // ScalarAttributeTypeN is a ScalarAttributeType enum value + ScalarAttributeTypeN = "N" + + // ScalarAttributeTypeB is a ScalarAttributeType enum value + ScalarAttributeTypeB = "B" +) + +// ScalarAttributeType_Values returns all elements of the ScalarAttributeType enum +func ScalarAttributeType_Values() []string { + return []string{ + ScalarAttributeTypeS, + ScalarAttributeTypeN, + ScalarAttributeTypeB, + } +} + +const ( + // SelectAllAttributes is a Select enum value + SelectAllAttributes = "ALL_ATTRIBUTES" + + // SelectAllProjectedAttributes is a Select enum value + SelectAllProjectedAttributes = "ALL_PROJECTED_ATTRIBUTES" + + // SelectSpecificAttributes is a Select enum value + SelectSpecificAttributes = "SPECIFIC_ATTRIBUTES" + + // SelectCount is a Select enum value + SelectCount = "COUNT" +) + +// Select_Values returns all elements of the Select enum +func Select_Values() []string { + return []string{ + SelectAllAttributes, + SelectAllProjectedAttributes, + SelectSpecificAttributes, + SelectCount, + } +} + +const ( + // StreamViewTypeNewImage is a StreamViewType enum value + StreamViewTypeNewImage = "NEW_IMAGE" + + // StreamViewTypeOldImage is a StreamViewType enum value + StreamViewTypeOldImage = "OLD_IMAGE" + + // StreamViewTypeNewAndOldImages is a StreamViewType enum value + StreamViewTypeNewAndOldImages = "NEW_AND_OLD_IMAGES" + + // StreamViewTypeKeysOnly is a StreamViewType enum value + StreamViewTypeKeysOnly = "KEYS_ONLY" +) + +// StreamViewType_Values returns all elements of the StreamViewType enum +func StreamViewType_Values() []string { + return []string{ + StreamViewTypeNewImage, + StreamViewTypeOldImage, + StreamViewTypeNewAndOldImages, + StreamViewTypeKeysOnly, + } +} + +const ( + // TableClassStandard is a TableClass enum value + TableClassStandard = "STANDARD" + + // TableClassStandardInfrequentAccess is a TableClass enum value + TableClassStandardInfrequentAccess = "STANDARD_INFREQUENT_ACCESS" +) + +// TableClass_Values returns all elements of the TableClass enum +func TableClass_Values() []string { + return []string{ + TableClassStandard, + TableClassStandardInfrequentAccess, + } +} + +const ( + // TableStatusCreating is a TableStatus enum value + TableStatusCreating = "CREATING" + + // TableStatusUpdating is a TableStatus enum value + TableStatusUpdating = "UPDATING" + + // TableStatusDeleting is a TableStatus enum value + TableStatusDeleting = "DELETING" + + // TableStatusActive is a TableStatus enum value + TableStatusActive = "ACTIVE" + + // TableStatusInaccessibleEncryptionCredentials is a TableStatus enum value + TableStatusInaccessibleEncryptionCredentials = "INACCESSIBLE_ENCRYPTION_CREDENTIALS" + + // TableStatusArchiving is a TableStatus enum value + TableStatusArchiving = "ARCHIVING" + + // TableStatusArchived is a TableStatus enum value + TableStatusArchived = "ARCHIVED" +) + +// TableStatus_Values returns all elements of the TableStatus enum +func TableStatus_Values() []string { + return []string{ + TableStatusCreating, + TableStatusUpdating, + TableStatusDeleting, + TableStatusActive, + TableStatusInaccessibleEncryptionCredentials, + TableStatusArchiving, + TableStatusArchived, + } +} + +const ( + // TimeToLiveStatusEnabling is a TimeToLiveStatus enum value + TimeToLiveStatusEnabling = "ENABLING" + + // TimeToLiveStatusDisabling is a TimeToLiveStatus enum value + TimeToLiveStatusDisabling = "DISABLING" + + // TimeToLiveStatusEnabled is a TimeToLiveStatus enum value + TimeToLiveStatusEnabled = "ENABLED" + + // TimeToLiveStatusDisabled is a TimeToLiveStatus enum value + TimeToLiveStatusDisabled = "DISABLED" +) + +// TimeToLiveStatus_Values returns all elements of the TimeToLiveStatus enum +func TimeToLiveStatus_Values() []string { + return []string{ + TimeToLiveStatusEnabling, + TimeToLiveStatusDisabling, + TimeToLiveStatusEnabled, + TimeToLiveStatusDisabled, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/customizations.go new file mode 100644 index 000000000..c019e63df --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/customizations.go @@ -0,0 +1,98 @@ +package dynamodb + +import ( + "bytes" + "hash/crc32" + "io" + "io/ioutil" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" +) + +func init() { + initClient = func(c *client.Client) { + if c.Config.Retryer == nil { + // Only override the retryer with a custom one if the config + // does not already contain a retryer + setCustomRetryer(c) + } + + c.Handlers.Build.PushBack(disableCompression) + c.Handlers.Unmarshal.PushFront(validateCRC32) + } +} + +func setCustomRetryer(c *client.Client) { + maxRetries := aws.IntValue(c.Config.MaxRetries) + if c.Config.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { + maxRetries = 10 + } + + c.Retryer = client.DefaultRetryer{ + NumMaxRetries: maxRetries, + MinRetryDelay: 50 * time.Millisecond, + } +} + +func drainBody(b io.ReadCloser, length int64) (out *bytes.Buffer, err error) { + if length < 0 { + length = 0 + } + buf := bytes.NewBuffer(make([]byte, 0, length)) + + if _, err = buf.ReadFrom(b); err != nil { + return nil, err + } + if err = b.Close(); err != nil { + return nil, err + } + return buf, nil +} + +func disableCompression(r *request.Request) { + r.HTTPRequest.Header.Set("Accept-Encoding", "identity") +} + +func validateCRC32(r *request.Request) { + if r.Error != nil { + return // already have an error, no need to verify CRC + } + + // Checksum validation is off, skip + if aws.BoolValue(r.Config.DisableComputeChecksums) { + return + } + + // Try to get CRC from response + header := r.HTTPResponse.Header.Get("X-Amz-Crc32") + if header == "" { + return // No header, skip + } + + expected, err := strconv.ParseUint(header, 10, 32) + if err != nil { + return // Could not determine CRC value, skip + } + + buf, err := drainBody(r.HTTPResponse.Body, r.HTTPResponse.ContentLength) + if err != nil { // failed to read the response body, skip + return + } + + // Reset body for subsequent reads + r.HTTPResponse.Body = ioutil.NopCloser(bytes.NewReader(buf.Bytes())) + + // Compute the CRC checksum + crc := crc32.ChecksumIEEE(buf.Bytes()) + + if crc != uint32(expected) { + // CRC does not match, set a retryable error + r.Retryable = aws.Bool(true) + r.Error = awserr.New("CRC32CheckFailed", "CRC32 integrity check failed", nil) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc.go new file mode 100644 index 000000000..ab12b274f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc.go @@ -0,0 +1,45 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package dynamodb provides the client and types for making API +// requests to Amazon DynamoDB. +// +// Amazon DynamoDB is a fully managed NoSQL database service that provides fast +// and predictable performance with seamless scalability. DynamoDB lets you +// offload the administrative burdens of operating and scaling a distributed +// database, so that you don't have to worry about hardware provisioning, setup +// and configuration, replication, software patching, or cluster scaling. +// +// With DynamoDB, you can create database tables that can store and retrieve +// any amount of data, and serve any level of request traffic. You can scale +// up or scale down your tables' throughput capacity without downtime or performance +// degradation, and use the Amazon Web Services Management Console to monitor +// resource utilization and performance metrics. +// +// DynamoDB automatically spreads the data and traffic for your tables over +// a sufficient number of servers to handle your throughput and storage requirements, +// while maintaining consistent and fast performance. All of your data is stored +// on solid state disks (SSDs) and automatically replicated across multiple +// Availability Zones in an Amazon Web Services Region, providing built-in high +// availability and data durability. +// +// See https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10 for more information on this service. +// +// See dynamodb package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/dynamodb/ +// +// # Using the Client +// +// To contact Amazon DynamoDB with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the Amazon DynamoDB client DynamoDB for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/dynamodb/#New +package dynamodb diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc_custom.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc_custom.go new file mode 100644 index 000000000..0cca7e4b9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc_custom.go @@ -0,0 +1,27 @@ +/* +AttributeValue Marshaling and Unmarshaling Helpers + +Utility helpers to marshal and unmarshal AttributeValue to and +from Go types can be found in the dynamodbattribute sub package. This package +provides specialized functions for the common ways of working with +AttributeValues. Such as map[string]*AttributeValue, []*AttributeValue, and +directly with *AttributeValue. This is helpful for marshaling Go types for API +operations such as PutItem, and unmarshaling Query and Scan APIs' responses. + +See the dynamodbattribute package documentation for more information. +https://docs.aws.amazon.com/sdk-for-go/api/service/dynamodb/dynamodbattribute/ + +# Expression Builders + +The expression package provides utility types and functions to build DynamoDB +expression for type safe construction of API ExpressionAttributeNames, and +ExpressionAttribute Values. + +The package represents the various DynamoDB Expressions as structs named +accordingly. For example, ConditionBuilder represents a DynamoDB Condition +Expression, an UpdateBuilder represents a DynamoDB Update Expression, and so on. + +See the expression package documentation for more information. +https://docs.aws.amazon.com/sdk-for-go/api/service/dynamodb/expression/ +*/ +package dynamodb diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface/interface.go new file mode 100644 index 000000000..0c97f94ed --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface/interface.go @@ -0,0 +1,303 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package dynamodbiface provides an interface to enable mocking the Amazon DynamoDB service client +// for testing your code. +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. +package dynamodbiface + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/dynamodb" +) + +// DynamoDBAPI provides an interface to enable mocking the +// dynamodb.DynamoDB service client's API operation, +// paginators, and waiters. This make unit testing your code that calls out +// to the SDK's service client's calls easier. +// +// The best way to use this interface is so the SDK's service client's calls +// can be stubbed out for unit testing your code with the SDK without needing +// to inject custom request handlers into the SDK's request pipeline. +// +// // myFunc uses an SDK service client to make a request to +// // Amazon DynamoDB. +// func myFunc(svc dynamodbiface.DynamoDBAPI) bool { +// // Make svc.BatchExecuteStatement request +// } +// +// func main() { +// sess := session.New() +// svc := dynamodb.New(sess) +// +// myFunc(svc) +// } +// +// In your _test.go file: +// +// // Define a mock struct to be used in your unit tests of myFunc. +// type mockDynamoDBClient struct { +// dynamodbiface.DynamoDBAPI +// } +// func (m *mockDynamoDBClient) BatchExecuteStatement(input *dynamodb.BatchExecuteStatementInput) (*dynamodb.BatchExecuteStatementOutput, error) { +// // mock response/functionality +// } +// +// func TestMyFunc(t *testing.T) { +// // Setup Test +// mockSvc := &mockDynamoDBClient{} +// +// myfunc(mockSvc) +// +// // Verify myFunc's functionality +// } +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. Its suggested to use the pattern above for testing, or using +// tooling to generate mocks to satisfy the interfaces. +type DynamoDBAPI interface { + BatchExecuteStatement(*dynamodb.BatchExecuteStatementInput) (*dynamodb.BatchExecuteStatementOutput, error) + BatchExecuteStatementWithContext(aws.Context, *dynamodb.BatchExecuteStatementInput, ...request.Option) (*dynamodb.BatchExecuteStatementOutput, error) + BatchExecuteStatementRequest(*dynamodb.BatchExecuteStatementInput) (*request.Request, *dynamodb.BatchExecuteStatementOutput) + + BatchGetItem(*dynamodb.BatchGetItemInput) (*dynamodb.BatchGetItemOutput, error) + BatchGetItemWithContext(aws.Context, *dynamodb.BatchGetItemInput, ...request.Option) (*dynamodb.BatchGetItemOutput, error) + BatchGetItemRequest(*dynamodb.BatchGetItemInput) (*request.Request, *dynamodb.BatchGetItemOutput) + + BatchGetItemPages(*dynamodb.BatchGetItemInput, func(*dynamodb.BatchGetItemOutput, bool) bool) error + BatchGetItemPagesWithContext(aws.Context, *dynamodb.BatchGetItemInput, func(*dynamodb.BatchGetItemOutput, bool) bool, ...request.Option) error + + BatchWriteItem(*dynamodb.BatchWriteItemInput) (*dynamodb.BatchWriteItemOutput, error) + BatchWriteItemWithContext(aws.Context, *dynamodb.BatchWriteItemInput, ...request.Option) (*dynamodb.BatchWriteItemOutput, error) + BatchWriteItemRequest(*dynamodb.BatchWriteItemInput) (*request.Request, *dynamodb.BatchWriteItemOutput) + + CreateBackup(*dynamodb.CreateBackupInput) (*dynamodb.CreateBackupOutput, error) + CreateBackupWithContext(aws.Context, *dynamodb.CreateBackupInput, ...request.Option) (*dynamodb.CreateBackupOutput, error) + CreateBackupRequest(*dynamodb.CreateBackupInput) (*request.Request, *dynamodb.CreateBackupOutput) + + CreateGlobalTable(*dynamodb.CreateGlobalTableInput) (*dynamodb.CreateGlobalTableOutput, error) + CreateGlobalTableWithContext(aws.Context, *dynamodb.CreateGlobalTableInput, ...request.Option) (*dynamodb.CreateGlobalTableOutput, error) + CreateGlobalTableRequest(*dynamodb.CreateGlobalTableInput) (*request.Request, *dynamodb.CreateGlobalTableOutput) + + CreateTable(*dynamodb.CreateTableInput) (*dynamodb.CreateTableOutput, error) + CreateTableWithContext(aws.Context, *dynamodb.CreateTableInput, ...request.Option) (*dynamodb.CreateTableOutput, error) + CreateTableRequest(*dynamodb.CreateTableInput) (*request.Request, *dynamodb.CreateTableOutput) + + DeleteBackup(*dynamodb.DeleteBackupInput) (*dynamodb.DeleteBackupOutput, error) + DeleteBackupWithContext(aws.Context, *dynamodb.DeleteBackupInput, ...request.Option) (*dynamodb.DeleteBackupOutput, error) + DeleteBackupRequest(*dynamodb.DeleteBackupInput) (*request.Request, *dynamodb.DeleteBackupOutput) + + DeleteItem(*dynamodb.DeleteItemInput) (*dynamodb.DeleteItemOutput, error) + DeleteItemWithContext(aws.Context, *dynamodb.DeleteItemInput, ...request.Option) (*dynamodb.DeleteItemOutput, error) + DeleteItemRequest(*dynamodb.DeleteItemInput) (*request.Request, *dynamodb.DeleteItemOutput) + + DeleteTable(*dynamodb.DeleteTableInput) (*dynamodb.DeleteTableOutput, error) + DeleteTableWithContext(aws.Context, *dynamodb.DeleteTableInput, ...request.Option) (*dynamodb.DeleteTableOutput, error) + DeleteTableRequest(*dynamodb.DeleteTableInput) (*request.Request, *dynamodb.DeleteTableOutput) + + DescribeBackup(*dynamodb.DescribeBackupInput) (*dynamodb.DescribeBackupOutput, error) + DescribeBackupWithContext(aws.Context, *dynamodb.DescribeBackupInput, ...request.Option) (*dynamodb.DescribeBackupOutput, error) + DescribeBackupRequest(*dynamodb.DescribeBackupInput) (*request.Request, *dynamodb.DescribeBackupOutput) + + DescribeContinuousBackups(*dynamodb.DescribeContinuousBackupsInput) (*dynamodb.DescribeContinuousBackupsOutput, error) + DescribeContinuousBackupsWithContext(aws.Context, *dynamodb.DescribeContinuousBackupsInput, ...request.Option) (*dynamodb.DescribeContinuousBackupsOutput, error) + DescribeContinuousBackupsRequest(*dynamodb.DescribeContinuousBackupsInput) (*request.Request, *dynamodb.DescribeContinuousBackupsOutput) + + DescribeContributorInsights(*dynamodb.DescribeContributorInsightsInput) (*dynamodb.DescribeContributorInsightsOutput, error) + DescribeContributorInsightsWithContext(aws.Context, *dynamodb.DescribeContributorInsightsInput, ...request.Option) (*dynamodb.DescribeContributorInsightsOutput, error) + DescribeContributorInsightsRequest(*dynamodb.DescribeContributorInsightsInput) (*request.Request, *dynamodb.DescribeContributorInsightsOutput) + + DescribeEndpoints(*dynamodb.DescribeEndpointsInput) (*dynamodb.DescribeEndpointsOutput, error) + DescribeEndpointsWithContext(aws.Context, *dynamodb.DescribeEndpointsInput, ...request.Option) (*dynamodb.DescribeEndpointsOutput, error) + DescribeEndpointsRequest(*dynamodb.DescribeEndpointsInput) (*request.Request, *dynamodb.DescribeEndpointsOutput) + + DescribeExport(*dynamodb.DescribeExportInput) (*dynamodb.DescribeExportOutput, error) + DescribeExportWithContext(aws.Context, *dynamodb.DescribeExportInput, ...request.Option) (*dynamodb.DescribeExportOutput, error) + DescribeExportRequest(*dynamodb.DescribeExportInput) (*request.Request, *dynamodb.DescribeExportOutput) + + DescribeGlobalTable(*dynamodb.DescribeGlobalTableInput) (*dynamodb.DescribeGlobalTableOutput, error) + DescribeGlobalTableWithContext(aws.Context, *dynamodb.DescribeGlobalTableInput, ...request.Option) (*dynamodb.DescribeGlobalTableOutput, error) + DescribeGlobalTableRequest(*dynamodb.DescribeGlobalTableInput) (*request.Request, *dynamodb.DescribeGlobalTableOutput) + + DescribeGlobalTableSettings(*dynamodb.DescribeGlobalTableSettingsInput) (*dynamodb.DescribeGlobalTableSettingsOutput, error) + DescribeGlobalTableSettingsWithContext(aws.Context, *dynamodb.DescribeGlobalTableSettingsInput, ...request.Option) (*dynamodb.DescribeGlobalTableSettingsOutput, error) + DescribeGlobalTableSettingsRequest(*dynamodb.DescribeGlobalTableSettingsInput) (*request.Request, *dynamodb.DescribeGlobalTableSettingsOutput) + + DescribeImport(*dynamodb.DescribeImportInput) (*dynamodb.DescribeImportOutput, error) + DescribeImportWithContext(aws.Context, *dynamodb.DescribeImportInput, ...request.Option) (*dynamodb.DescribeImportOutput, error) + DescribeImportRequest(*dynamodb.DescribeImportInput) (*request.Request, *dynamodb.DescribeImportOutput) + + DescribeKinesisStreamingDestination(*dynamodb.DescribeKinesisStreamingDestinationInput) (*dynamodb.DescribeKinesisStreamingDestinationOutput, error) + DescribeKinesisStreamingDestinationWithContext(aws.Context, *dynamodb.DescribeKinesisStreamingDestinationInput, ...request.Option) (*dynamodb.DescribeKinesisStreamingDestinationOutput, error) + DescribeKinesisStreamingDestinationRequest(*dynamodb.DescribeKinesisStreamingDestinationInput) (*request.Request, *dynamodb.DescribeKinesisStreamingDestinationOutput) + + DescribeLimits(*dynamodb.DescribeLimitsInput) (*dynamodb.DescribeLimitsOutput, error) + DescribeLimitsWithContext(aws.Context, *dynamodb.DescribeLimitsInput, ...request.Option) (*dynamodb.DescribeLimitsOutput, error) + DescribeLimitsRequest(*dynamodb.DescribeLimitsInput) (*request.Request, *dynamodb.DescribeLimitsOutput) + + DescribeTable(*dynamodb.DescribeTableInput) (*dynamodb.DescribeTableOutput, error) + DescribeTableWithContext(aws.Context, *dynamodb.DescribeTableInput, ...request.Option) (*dynamodb.DescribeTableOutput, error) + DescribeTableRequest(*dynamodb.DescribeTableInput) (*request.Request, *dynamodb.DescribeTableOutput) + + DescribeTableReplicaAutoScaling(*dynamodb.DescribeTableReplicaAutoScalingInput) (*dynamodb.DescribeTableReplicaAutoScalingOutput, error) + DescribeTableReplicaAutoScalingWithContext(aws.Context, *dynamodb.DescribeTableReplicaAutoScalingInput, ...request.Option) (*dynamodb.DescribeTableReplicaAutoScalingOutput, error) + DescribeTableReplicaAutoScalingRequest(*dynamodb.DescribeTableReplicaAutoScalingInput) (*request.Request, *dynamodb.DescribeTableReplicaAutoScalingOutput) + + DescribeTimeToLive(*dynamodb.DescribeTimeToLiveInput) (*dynamodb.DescribeTimeToLiveOutput, error) + DescribeTimeToLiveWithContext(aws.Context, *dynamodb.DescribeTimeToLiveInput, ...request.Option) (*dynamodb.DescribeTimeToLiveOutput, error) + DescribeTimeToLiveRequest(*dynamodb.DescribeTimeToLiveInput) (*request.Request, *dynamodb.DescribeTimeToLiveOutput) + + DisableKinesisStreamingDestination(*dynamodb.DisableKinesisStreamingDestinationInput) (*dynamodb.DisableKinesisStreamingDestinationOutput, error) + DisableKinesisStreamingDestinationWithContext(aws.Context, *dynamodb.DisableKinesisStreamingDestinationInput, ...request.Option) (*dynamodb.DisableKinesisStreamingDestinationOutput, error) + DisableKinesisStreamingDestinationRequest(*dynamodb.DisableKinesisStreamingDestinationInput) (*request.Request, *dynamodb.DisableKinesisStreamingDestinationOutput) + + EnableKinesisStreamingDestination(*dynamodb.EnableKinesisStreamingDestinationInput) (*dynamodb.EnableKinesisStreamingDestinationOutput, error) + EnableKinesisStreamingDestinationWithContext(aws.Context, *dynamodb.EnableKinesisStreamingDestinationInput, ...request.Option) (*dynamodb.EnableKinesisStreamingDestinationOutput, error) + EnableKinesisStreamingDestinationRequest(*dynamodb.EnableKinesisStreamingDestinationInput) (*request.Request, *dynamodb.EnableKinesisStreamingDestinationOutput) + + ExecuteStatement(*dynamodb.ExecuteStatementInput) (*dynamodb.ExecuteStatementOutput, error) + ExecuteStatementWithContext(aws.Context, *dynamodb.ExecuteStatementInput, ...request.Option) (*dynamodb.ExecuteStatementOutput, error) + ExecuteStatementRequest(*dynamodb.ExecuteStatementInput) (*request.Request, *dynamodb.ExecuteStatementOutput) + + ExecuteTransaction(*dynamodb.ExecuteTransactionInput) (*dynamodb.ExecuteTransactionOutput, error) + ExecuteTransactionWithContext(aws.Context, *dynamodb.ExecuteTransactionInput, ...request.Option) (*dynamodb.ExecuteTransactionOutput, error) + ExecuteTransactionRequest(*dynamodb.ExecuteTransactionInput) (*request.Request, *dynamodb.ExecuteTransactionOutput) + + ExportTableToPointInTime(*dynamodb.ExportTableToPointInTimeInput) (*dynamodb.ExportTableToPointInTimeOutput, error) + ExportTableToPointInTimeWithContext(aws.Context, *dynamodb.ExportTableToPointInTimeInput, ...request.Option) (*dynamodb.ExportTableToPointInTimeOutput, error) + ExportTableToPointInTimeRequest(*dynamodb.ExportTableToPointInTimeInput) (*request.Request, *dynamodb.ExportTableToPointInTimeOutput) + + GetItem(*dynamodb.GetItemInput) (*dynamodb.GetItemOutput, error) + GetItemWithContext(aws.Context, *dynamodb.GetItemInput, ...request.Option) (*dynamodb.GetItemOutput, error) + GetItemRequest(*dynamodb.GetItemInput) (*request.Request, *dynamodb.GetItemOutput) + + ImportTable(*dynamodb.ImportTableInput) (*dynamodb.ImportTableOutput, error) + ImportTableWithContext(aws.Context, *dynamodb.ImportTableInput, ...request.Option) (*dynamodb.ImportTableOutput, error) + ImportTableRequest(*dynamodb.ImportTableInput) (*request.Request, *dynamodb.ImportTableOutput) + + ListBackups(*dynamodb.ListBackupsInput) (*dynamodb.ListBackupsOutput, error) + ListBackupsWithContext(aws.Context, *dynamodb.ListBackupsInput, ...request.Option) (*dynamodb.ListBackupsOutput, error) + ListBackupsRequest(*dynamodb.ListBackupsInput) (*request.Request, *dynamodb.ListBackupsOutput) + + ListContributorInsights(*dynamodb.ListContributorInsightsInput) (*dynamodb.ListContributorInsightsOutput, error) + ListContributorInsightsWithContext(aws.Context, *dynamodb.ListContributorInsightsInput, ...request.Option) (*dynamodb.ListContributorInsightsOutput, error) + ListContributorInsightsRequest(*dynamodb.ListContributorInsightsInput) (*request.Request, *dynamodb.ListContributorInsightsOutput) + + ListContributorInsightsPages(*dynamodb.ListContributorInsightsInput, func(*dynamodb.ListContributorInsightsOutput, bool) bool) error + ListContributorInsightsPagesWithContext(aws.Context, *dynamodb.ListContributorInsightsInput, func(*dynamodb.ListContributorInsightsOutput, bool) bool, ...request.Option) error + + ListExports(*dynamodb.ListExportsInput) (*dynamodb.ListExportsOutput, error) + ListExportsWithContext(aws.Context, *dynamodb.ListExportsInput, ...request.Option) (*dynamodb.ListExportsOutput, error) + ListExportsRequest(*dynamodb.ListExportsInput) (*request.Request, *dynamodb.ListExportsOutput) + + ListExportsPages(*dynamodb.ListExportsInput, func(*dynamodb.ListExportsOutput, bool) bool) error + ListExportsPagesWithContext(aws.Context, *dynamodb.ListExportsInput, func(*dynamodb.ListExportsOutput, bool) bool, ...request.Option) error + + ListGlobalTables(*dynamodb.ListGlobalTablesInput) (*dynamodb.ListGlobalTablesOutput, error) + ListGlobalTablesWithContext(aws.Context, *dynamodb.ListGlobalTablesInput, ...request.Option) (*dynamodb.ListGlobalTablesOutput, error) + ListGlobalTablesRequest(*dynamodb.ListGlobalTablesInput) (*request.Request, *dynamodb.ListGlobalTablesOutput) + + ListImports(*dynamodb.ListImportsInput) (*dynamodb.ListImportsOutput, error) + ListImportsWithContext(aws.Context, *dynamodb.ListImportsInput, ...request.Option) (*dynamodb.ListImportsOutput, error) + ListImportsRequest(*dynamodb.ListImportsInput) (*request.Request, *dynamodb.ListImportsOutput) + + ListImportsPages(*dynamodb.ListImportsInput, func(*dynamodb.ListImportsOutput, bool) bool) error + ListImportsPagesWithContext(aws.Context, *dynamodb.ListImportsInput, func(*dynamodb.ListImportsOutput, bool) bool, ...request.Option) error + + ListTables(*dynamodb.ListTablesInput) (*dynamodb.ListTablesOutput, error) + ListTablesWithContext(aws.Context, *dynamodb.ListTablesInput, ...request.Option) (*dynamodb.ListTablesOutput, error) + ListTablesRequest(*dynamodb.ListTablesInput) (*request.Request, *dynamodb.ListTablesOutput) + + ListTablesPages(*dynamodb.ListTablesInput, func(*dynamodb.ListTablesOutput, bool) bool) error + ListTablesPagesWithContext(aws.Context, *dynamodb.ListTablesInput, func(*dynamodb.ListTablesOutput, bool) bool, ...request.Option) error + + ListTagsOfResource(*dynamodb.ListTagsOfResourceInput) (*dynamodb.ListTagsOfResourceOutput, error) + ListTagsOfResourceWithContext(aws.Context, *dynamodb.ListTagsOfResourceInput, ...request.Option) (*dynamodb.ListTagsOfResourceOutput, error) + ListTagsOfResourceRequest(*dynamodb.ListTagsOfResourceInput) (*request.Request, *dynamodb.ListTagsOfResourceOutput) + + PutItem(*dynamodb.PutItemInput) (*dynamodb.PutItemOutput, error) + PutItemWithContext(aws.Context, *dynamodb.PutItemInput, ...request.Option) (*dynamodb.PutItemOutput, error) + PutItemRequest(*dynamodb.PutItemInput) (*request.Request, *dynamodb.PutItemOutput) + + Query(*dynamodb.QueryInput) (*dynamodb.QueryOutput, error) + QueryWithContext(aws.Context, *dynamodb.QueryInput, ...request.Option) (*dynamodb.QueryOutput, error) + QueryRequest(*dynamodb.QueryInput) (*request.Request, *dynamodb.QueryOutput) + + QueryPages(*dynamodb.QueryInput, func(*dynamodb.QueryOutput, bool) bool) error + QueryPagesWithContext(aws.Context, *dynamodb.QueryInput, func(*dynamodb.QueryOutput, bool) bool, ...request.Option) error + + RestoreTableFromBackup(*dynamodb.RestoreTableFromBackupInput) (*dynamodb.RestoreTableFromBackupOutput, error) + RestoreTableFromBackupWithContext(aws.Context, *dynamodb.RestoreTableFromBackupInput, ...request.Option) (*dynamodb.RestoreTableFromBackupOutput, error) + RestoreTableFromBackupRequest(*dynamodb.RestoreTableFromBackupInput) (*request.Request, *dynamodb.RestoreTableFromBackupOutput) + + RestoreTableToPointInTime(*dynamodb.RestoreTableToPointInTimeInput) (*dynamodb.RestoreTableToPointInTimeOutput, error) + RestoreTableToPointInTimeWithContext(aws.Context, *dynamodb.RestoreTableToPointInTimeInput, ...request.Option) (*dynamodb.RestoreTableToPointInTimeOutput, error) + RestoreTableToPointInTimeRequest(*dynamodb.RestoreTableToPointInTimeInput) (*request.Request, *dynamodb.RestoreTableToPointInTimeOutput) + + Scan(*dynamodb.ScanInput) (*dynamodb.ScanOutput, error) + ScanWithContext(aws.Context, *dynamodb.ScanInput, ...request.Option) (*dynamodb.ScanOutput, error) + ScanRequest(*dynamodb.ScanInput) (*request.Request, *dynamodb.ScanOutput) + + ScanPages(*dynamodb.ScanInput, func(*dynamodb.ScanOutput, bool) bool) error + ScanPagesWithContext(aws.Context, *dynamodb.ScanInput, func(*dynamodb.ScanOutput, bool) bool, ...request.Option) error + + TagResource(*dynamodb.TagResourceInput) (*dynamodb.TagResourceOutput, error) + TagResourceWithContext(aws.Context, *dynamodb.TagResourceInput, ...request.Option) (*dynamodb.TagResourceOutput, error) + TagResourceRequest(*dynamodb.TagResourceInput) (*request.Request, *dynamodb.TagResourceOutput) + + TransactGetItems(*dynamodb.TransactGetItemsInput) (*dynamodb.TransactGetItemsOutput, error) + TransactGetItemsWithContext(aws.Context, *dynamodb.TransactGetItemsInput, ...request.Option) (*dynamodb.TransactGetItemsOutput, error) + TransactGetItemsRequest(*dynamodb.TransactGetItemsInput) (*request.Request, *dynamodb.TransactGetItemsOutput) + + TransactWriteItems(*dynamodb.TransactWriteItemsInput) (*dynamodb.TransactWriteItemsOutput, error) + TransactWriteItemsWithContext(aws.Context, *dynamodb.TransactWriteItemsInput, ...request.Option) (*dynamodb.TransactWriteItemsOutput, error) + TransactWriteItemsRequest(*dynamodb.TransactWriteItemsInput) (*request.Request, *dynamodb.TransactWriteItemsOutput) + + UntagResource(*dynamodb.UntagResourceInput) (*dynamodb.UntagResourceOutput, error) + UntagResourceWithContext(aws.Context, *dynamodb.UntagResourceInput, ...request.Option) (*dynamodb.UntagResourceOutput, error) + UntagResourceRequest(*dynamodb.UntagResourceInput) (*request.Request, *dynamodb.UntagResourceOutput) + + UpdateContinuousBackups(*dynamodb.UpdateContinuousBackupsInput) (*dynamodb.UpdateContinuousBackupsOutput, error) + UpdateContinuousBackupsWithContext(aws.Context, *dynamodb.UpdateContinuousBackupsInput, ...request.Option) (*dynamodb.UpdateContinuousBackupsOutput, error) + UpdateContinuousBackupsRequest(*dynamodb.UpdateContinuousBackupsInput) (*request.Request, *dynamodb.UpdateContinuousBackupsOutput) + + UpdateContributorInsights(*dynamodb.UpdateContributorInsightsInput) (*dynamodb.UpdateContributorInsightsOutput, error) + UpdateContributorInsightsWithContext(aws.Context, *dynamodb.UpdateContributorInsightsInput, ...request.Option) (*dynamodb.UpdateContributorInsightsOutput, error) + UpdateContributorInsightsRequest(*dynamodb.UpdateContributorInsightsInput) (*request.Request, *dynamodb.UpdateContributorInsightsOutput) + + UpdateGlobalTable(*dynamodb.UpdateGlobalTableInput) (*dynamodb.UpdateGlobalTableOutput, error) + UpdateGlobalTableWithContext(aws.Context, *dynamodb.UpdateGlobalTableInput, ...request.Option) (*dynamodb.UpdateGlobalTableOutput, error) + UpdateGlobalTableRequest(*dynamodb.UpdateGlobalTableInput) (*request.Request, *dynamodb.UpdateGlobalTableOutput) + + UpdateGlobalTableSettings(*dynamodb.UpdateGlobalTableSettingsInput) (*dynamodb.UpdateGlobalTableSettingsOutput, error) + UpdateGlobalTableSettingsWithContext(aws.Context, *dynamodb.UpdateGlobalTableSettingsInput, ...request.Option) (*dynamodb.UpdateGlobalTableSettingsOutput, error) + UpdateGlobalTableSettingsRequest(*dynamodb.UpdateGlobalTableSettingsInput) (*request.Request, *dynamodb.UpdateGlobalTableSettingsOutput) + + UpdateItem(*dynamodb.UpdateItemInput) (*dynamodb.UpdateItemOutput, error) + UpdateItemWithContext(aws.Context, *dynamodb.UpdateItemInput, ...request.Option) (*dynamodb.UpdateItemOutput, error) + UpdateItemRequest(*dynamodb.UpdateItemInput) (*request.Request, *dynamodb.UpdateItemOutput) + + UpdateTable(*dynamodb.UpdateTableInput) (*dynamodb.UpdateTableOutput, error) + UpdateTableWithContext(aws.Context, *dynamodb.UpdateTableInput, ...request.Option) (*dynamodb.UpdateTableOutput, error) + UpdateTableRequest(*dynamodb.UpdateTableInput) (*request.Request, *dynamodb.UpdateTableOutput) + + UpdateTableReplicaAutoScaling(*dynamodb.UpdateTableReplicaAutoScalingInput) (*dynamodb.UpdateTableReplicaAutoScalingOutput, error) + UpdateTableReplicaAutoScalingWithContext(aws.Context, *dynamodb.UpdateTableReplicaAutoScalingInput, ...request.Option) (*dynamodb.UpdateTableReplicaAutoScalingOutput, error) + UpdateTableReplicaAutoScalingRequest(*dynamodb.UpdateTableReplicaAutoScalingInput) (*request.Request, *dynamodb.UpdateTableReplicaAutoScalingOutput) + + UpdateTimeToLive(*dynamodb.UpdateTimeToLiveInput) (*dynamodb.UpdateTimeToLiveOutput, error) + UpdateTimeToLiveWithContext(aws.Context, *dynamodb.UpdateTimeToLiveInput, ...request.Option) (*dynamodb.UpdateTimeToLiveOutput, error) + UpdateTimeToLiveRequest(*dynamodb.UpdateTimeToLiveInput) (*request.Request, *dynamodb.UpdateTimeToLiveOutput) + + WaitUntilTableExists(*dynamodb.DescribeTableInput) error + WaitUntilTableExistsWithContext(aws.Context, *dynamodb.DescribeTableInput, ...request.WaiterOption) error + + WaitUntilTableNotExists(*dynamodb.DescribeTableInput) error + WaitUntilTableNotExistsWithContext(aws.Context, *dynamodb.DescribeTableInput, ...request.WaiterOption) error +} + +var _ DynamoDBAPI = (*dynamodb.DynamoDB)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go new file mode 100644 index 000000000..d3af3262e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go @@ -0,0 +1,388 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package dynamodb + +import ( + "github.com/aws/aws-sdk-go/private/protocol" +) + +const ( + + // ErrCodeBackupInUseException for service response error code + // "BackupInUseException". + // + // There is another ongoing conflicting backup control plane operation on the + // table. The backup is either being created, deleted or restored to a table. + ErrCodeBackupInUseException = "BackupInUseException" + + // ErrCodeBackupNotFoundException for service response error code + // "BackupNotFoundException". + // + // Backup not found for the given BackupARN. + ErrCodeBackupNotFoundException = "BackupNotFoundException" + + // ErrCodeConditionalCheckFailedException for service response error code + // "ConditionalCheckFailedException". + // + // A condition specified in the operation could not be evaluated. + ErrCodeConditionalCheckFailedException = "ConditionalCheckFailedException" + + // ErrCodeContinuousBackupsUnavailableException for service response error code + // "ContinuousBackupsUnavailableException". + // + // Backups have not yet been enabled for this table. + ErrCodeContinuousBackupsUnavailableException = "ContinuousBackupsUnavailableException" + + // ErrCodeDuplicateItemException for service response error code + // "DuplicateItemException". + // + // There was an attempt to insert an item with the same primary key as an item + // that already exists in the DynamoDB table. + ErrCodeDuplicateItemException = "DuplicateItemException" + + // ErrCodeExportConflictException for service response error code + // "ExportConflictException". + // + // There was a conflict when writing to the specified S3 bucket. + ErrCodeExportConflictException = "ExportConflictException" + + // ErrCodeExportNotFoundException for service response error code + // "ExportNotFoundException". + // + // The specified export was not found. + ErrCodeExportNotFoundException = "ExportNotFoundException" + + // ErrCodeGlobalTableAlreadyExistsException for service response error code + // "GlobalTableAlreadyExistsException". + // + // The specified global table already exists. + ErrCodeGlobalTableAlreadyExistsException = "GlobalTableAlreadyExistsException" + + // ErrCodeGlobalTableNotFoundException for service response error code + // "GlobalTableNotFoundException". + // + // The specified global table does not exist. + ErrCodeGlobalTableNotFoundException = "GlobalTableNotFoundException" + + // ErrCodeIdempotentParameterMismatchException for service response error code + // "IdempotentParameterMismatchException". + // + // DynamoDB rejected the request because you retried a request with a different + // payload but with an idempotent token that was already used. + ErrCodeIdempotentParameterMismatchException = "IdempotentParameterMismatchException" + + // ErrCodeImportConflictException for service response error code + // "ImportConflictException". + // + // There was a conflict when importing from the specified S3 source. This can + // occur when the current import conflicts with a previous import request that + // had the same client token. + ErrCodeImportConflictException = "ImportConflictException" + + // ErrCodeImportNotFoundException for service response error code + // "ImportNotFoundException". + // + // The specified import was not found. + ErrCodeImportNotFoundException = "ImportNotFoundException" + + // ErrCodeIndexNotFoundException for service response error code + // "IndexNotFoundException". + // + // The operation tried to access a nonexistent index. + ErrCodeIndexNotFoundException = "IndexNotFoundException" + + // ErrCodeInternalServerError for service response error code + // "InternalServerError". + // + // An error occurred on the server side. + ErrCodeInternalServerError = "InternalServerError" + + // ErrCodeInvalidExportTimeException for service response error code + // "InvalidExportTimeException". + // + // The specified ExportTime is outside of the point in time recovery window. + ErrCodeInvalidExportTimeException = "InvalidExportTimeException" + + // ErrCodeInvalidRestoreTimeException for service response error code + // "InvalidRestoreTimeException". + // + // An invalid restore time was specified. RestoreDateTime must be between EarliestRestorableDateTime + // and LatestRestorableDateTime. + ErrCodeInvalidRestoreTimeException = "InvalidRestoreTimeException" + + // ErrCodeItemCollectionSizeLimitExceededException for service response error code + // "ItemCollectionSizeLimitExceededException". + // + // An item collection is too large. This exception is only returned for tables + // that have one or more local secondary indexes. + ErrCodeItemCollectionSizeLimitExceededException = "ItemCollectionSizeLimitExceededException" + + // ErrCodeLimitExceededException for service response error code + // "LimitExceededException". + // + // There is no limit to the number of daily on-demand backups that can be taken. + // + // For most purposes, up to 500 simultaneous table operations are allowed per + // account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, + // RestoreTableFromBackup, and RestoreTableToPointInTime. + // + // When you are creating a table with one or more secondary indexes, you can + // have up to 250 such requests running at a time. However, if the table or + // index specifications are complex, then DynamoDB might temporarily reduce + // the number of concurrent operations. + // + // When importing into DynamoDB, up to 50 simultaneous import table operations + // are allowed per account. + // + // There is a soft account quota of 2,500 tables. + ErrCodeLimitExceededException = "LimitExceededException" + + // ErrCodePointInTimeRecoveryUnavailableException for service response error code + // "PointInTimeRecoveryUnavailableException". + // + // Point in time recovery has not yet been enabled for this source table. + ErrCodePointInTimeRecoveryUnavailableException = "PointInTimeRecoveryUnavailableException" + + // ErrCodeProvisionedThroughputExceededException for service response error code + // "ProvisionedThroughputExceededException". + // + // Your request rate is too high. The Amazon Web Services SDKs for DynamoDB + // automatically retry requests that receive this exception. Your request is + // eventually successful, unless your retry queue is too large to finish. Reduce + // the frequency of requests and use exponential backoff. For more information, + // go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) + // in the Amazon DynamoDB Developer Guide. + ErrCodeProvisionedThroughputExceededException = "ProvisionedThroughputExceededException" + + // ErrCodeReplicaAlreadyExistsException for service response error code + // "ReplicaAlreadyExistsException". + // + // The specified replica is already part of the global table. + ErrCodeReplicaAlreadyExistsException = "ReplicaAlreadyExistsException" + + // ErrCodeReplicaNotFoundException for service response error code + // "ReplicaNotFoundException". + // + // The specified replica is no longer part of the global table. + ErrCodeReplicaNotFoundException = "ReplicaNotFoundException" + + // ErrCodeRequestLimitExceeded for service response error code + // "RequestLimitExceeded". + // + // Throughput exceeds the current throughput quota for your account. Please + // contact Amazon Web Services Support (https://aws.amazon.com/support) to request + // a quota increase. + ErrCodeRequestLimitExceeded = "RequestLimitExceeded" + + // ErrCodeResourceInUseException for service response error code + // "ResourceInUseException". + // + // The operation conflicts with the resource's availability. For example, you + // attempted to recreate an existing table, or tried to delete a table currently + // in the CREATING state. + ErrCodeResourceInUseException = "ResourceInUseException" + + // ErrCodeResourceNotFoundException for service response error code + // "ResourceNotFoundException". + // + // The operation tried to access a nonexistent table or index. The resource + // might not be specified correctly, or its status might not be ACTIVE. + ErrCodeResourceNotFoundException = "ResourceNotFoundException" + + // ErrCodeTableAlreadyExistsException for service response error code + // "TableAlreadyExistsException". + // + // A target table with the specified name already exists. + ErrCodeTableAlreadyExistsException = "TableAlreadyExistsException" + + // ErrCodeTableInUseException for service response error code + // "TableInUseException". + // + // A target table with the specified name is either being created or deleted. + ErrCodeTableInUseException = "TableInUseException" + + // ErrCodeTableNotFoundException for service response error code + // "TableNotFoundException". + // + // A source table with the name TableName does not currently exist within the + // subscriber's account or the subscriber is operating in the wrong Amazon Web + // Services Region. + ErrCodeTableNotFoundException = "TableNotFoundException" + + // ErrCodeTransactionCanceledException for service response error code + // "TransactionCanceledException". + // + // The entire transaction request was canceled. + // + // DynamoDB cancels a TransactWriteItems request under the following circumstances: + // + // * A condition in one of the condition expressions is not met. + // + // * A table in the TransactWriteItems request is in a different account + // or region. + // + // * More than one action in the TransactWriteItems operation targets the + // same item. + // + // * There is insufficient provisioned capacity for the transaction to be + // completed. + // + // * An item size becomes too large (larger than 400 KB), or a local secondary + // index (LSI) becomes too large, or a similar validation error occurs because + // of changes made by the transaction. + // + // * There is a user error, such as an invalid data format. + // + // DynamoDB cancels a TransactGetItems request under the following circumstances: + // + // * There is an ongoing TransactGetItems operation that conflicts with a + // concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request. + // In this case the TransactGetItems operation fails with a TransactionCanceledException. + // + // * A table in the TransactGetItems request is in a different account or + // region. + // + // * There is insufficient provisioned capacity for the transaction to be + // completed. + // + // * There is a user error, such as an invalid data format. + // + // If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons + // property. This property is not set for other languages. Transaction cancellation + // reasons are ordered in the order of requested items, if an item has no error + // it will have None code and Null message. + // + // Cancellation reason codes and possible error messages: + // + // * No Errors: Code: None Message: null + // + // * Conditional Check Failed: Code: ConditionalCheckFailed Message: The + // conditional request failed. + // + // * Item Collection Size Limit Exceeded: Code: ItemCollectionSizeLimitExceeded + // Message: Collection size exceeded. + // + // * Transaction Conflict: Code: TransactionConflict Message: Transaction + // is ongoing for the item. + // + // * Provisioned Throughput Exceeded: Code: ProvisionedThroughputExceeded + // Messages: The level of configured provisioned throughput for the table + // was exceeded. Consider increasing your provisioning level with the UpdateTable + // API. This Message is received when provisioned throughput is exceeded + // is on a provisioned DynamoDB table. The level of configured provisioned + // throughput for one or more global secondary indexes of the table was exceeded. + // Consider increasing your provisioning level for the under-provisioned + // global secondary indexes with the UpdateTable API. This message is returned + // when provisioned throughput is exceeded is on a provisioned GSI. + // + // * Throttling Error: Code: ThrottlingError Messages: Throughput exceeds + // the current capacity of your table or index. DynamoDB is automatically + // scaling your table or index so please try again shortly. If exceptions + // persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html. + // This message is returned when writes get throttled on an On-Demand table + // as DynamoDB is automatically scaling the table. Throughput exceeds the + // current capacity for one or more global secondary indexes. DynamoDB is + // automatically scaling your index so please try again shortly. This message + // is returned when writes get throttled on an On-Demand GSI as DynamoDB + // is automatically scaling the GSI. + // + // * Validation Error: Code: ValidationError Messages: One or more parameter + // values were invalid. The update expression attempted to update the secondary + // index key beyond allowed size limits. The update expression attempted + // to update the secondary index key to unsupported type. An operand in the + // update expression has an incorrect data type. Item size to update has + // exceeded the maximum allowed size. Number overflow. Attempting to store + // a number with magnitude larger than supported range. Type mismatch for + // attribute to update. Nesting Levels have exceeded supported limits. The + // document path provided in the update expression is invalid for update. + // The provided expression refers to an attribute that does not exist in + // the item. + ErrCodeTransactionCanceledException = "TransactionCanceledException" + + // ErrCodeTransactionConflictException for service response error code + // "TransactionConflictException". + // + // Operation was rejected because there is an ongoing transaction for the item. + ErrCodeTransactionConflictException = "TransactionConflictException" + + // ErrCodeTransactionInProgressException for service response error code + // "TransactionInProgressException". + // + // The transaction with the given request token is already in progress. + // + // Recommended Settings + // + // This is a general recommendation for handling the TransactionInProgressException. + // These settings help ensure that the client retries will trigger completion + // of the ongoing TransactWriteItems request. + // + // * Set clientExecutionTimeout to a value that allows at least one retry + // to be processed after 5 seconds have elapsed since the first attempt for + // the TransactWriteItems operation. + // + // * Set socketTimeout to a value a little lower than the requestTimeout + // setting. + // + // * requestTimeout should be set based on the time taken for the individual + // retries of a single HTTP request for your use case, but setting it to + // 1 second or higher should work well to reduce chances of retries and TransactionInProgressException + // errors. + // + // * Use exponential backoff when retrying and tune backoff if needed. + // + // Assuming default retry policy (https://github.com/aws/aws-sdk-java/blob/fd409dee8ae23fb8953e0bb4dbde65536a7e0514/aws-java-sdk-core/src/main/java/com/amazonaws/retry/PredefinedRetryPolicies.java#L97), + // example timeout settings based on the guidelines above are as follows: + // + // Example timeline: + // + // * 0-1000 first attempt + // + // * 1000-1500 first sleep/delay (default retry policy uses 500 ms as base + // delay for 4xx errors) + // + // * 1500-2500 second attempt + // + // * 2500-3500 second sleep/delay (500 * 2, exponential backoff) + // + // * 3500-4500 third attempt + // + // * 4500-6500 third sleep/delay (500 * 2^2) + // + // * 6500-7500 fourth attempt (this can trigger inline recovery since 5 seconds + // have elapsed since the first attempt reached TC) + ErrCodeTransactionInProgressException = "TransactionInProgressException" +) + +var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ + "BackupInUseException": newErrorBackupInUseException, + "BackupNotFoundException": newErrorBackupNotFoundException, + "ConditionalCheckFailedException": newErrorConditionalCheckFailedException, + "ContinuousBackupsUnavailableException": newErrorContinuousBackupsUnavailableException, + "DuplicateItemException": newErrorDuplicateItemException, + "ExportConflictException": newErrorExportConflictException, + "ExportNotFoundException": newErrorExportNotFoundException, + "GlobalTableAlreadyExistsException": newErrorGlobalTableAlreadyExistsException, + "GlobalTableNotFoundException": newErrorGlobalTableNotFoundException, + "IdempotentParameterMismatchException": newErrorIdempotentParameterMismatchException, + "ImportConflictException": newErrorImportConflictException, + "ImportNotFoundException": newErrorImportNotFoundException, + "IndexNotFoundException": newErrorIndexNotFoundException, + "InternalServerError": newErrorInternalServerError, + "InvalidExportTimeException": newErrorInvalidExportTimeException, + "InvalidRestoreTimeException": newErrorInvalidRestoreTimeException, + "ItemCollectionSizeLimitExceededException": newErrorItemCollectionSizeLimitExceededException, + "LimitExceededException": newErrorLimitExceededException, + "PointInTimeRecoveryUnavailableException": newErrorPointInTimeRecoveryUnavailableException, + "ProvisionedThroughputExceededException": newErrorProvisionedThroughputExceededException, + "ReplicaAlreadyExistsException": newErrorReplicaAlreadyExistsException, + "ReplicaNotFoundException": newErrorReplicaNotFoundException, + "RequestLimitExceeded": newErrorRequestLimitExceeded, + "ResourceInUseException": newErrorResourceInUseException, + "ResourceNotFoundException": newErrorResourceNotFoundException, + "TableAlreadyExistsException": newErrorTableAlreadyExistsException, + "TableInUseException": newErrorTableInUseException, + "TableNotFoundException": newErrorTableNotFoundException, + "TransactionCanceledException": newErrorTransactionCanceledException, + "TransactionConflictException": newErrorTransactionConflictException, + "TransactionInProgressException": newErrorTransactionInProgressException, +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go new file mode 100644 index 000000000..ce0ed7446 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go @@ -0,0 +1,112 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package dynamodb + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/crr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// DynamoDB provides the API operation methods for making requests to +// Amazon DynamoDB. See this package's package overview docs +// for details on the service. +// +// DynamoDB methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type DynamoDB struct { + *client.Client + endpointCache *crr.EndpointCache +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "dynamodb" // Name of service. + EndpointsID = ServiceName // ID to lookup a service endpoint with. + ServiceID = "DynamoDB" // ServiceID is a unique identifier of a specific service. +) + +// New creates a new instance of the DynamoDB client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// +// mySession := session.Must(session.NewSession()) +// +// // Create a DynamoDB client from just a session. +// svc := dynamodb.New(mySession) +// +// // Create a DynamoDB client with additional configuration +// svc := dynamodb.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *DynamoDB { + c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = EndpointsID + // No Fallback + } + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *DynamoDB { + svc := &DynamoDB{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2012-08-10", + ResolvedRegion: resolvedRegion, + JSONVersion: "1.0", + TargetPrefix: "DynamoDB_20120810", + }, + handlers, + ), + } + svc.endpointCache = crr.NewEndpointCache(10) + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed( + protocol.NewUnmarshalErrorHandler(jsonrpc.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(), + ) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a DynamoDB operation and runs any +// custom request initialization. +func (c *DynamoDB) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/waiters.go new file mode 100644 index 000000000..ae515f7de --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/waiters.go @@ -0,0 +1,107 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package dynamodb + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// WaitUntilTableExists uses the DynamoDB API operation +// DescribeTable to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *DynamoDB) WaitUntilTableExists(input *DescribeTableInput) error { + return c.WaitUntilTableExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilTableExistsWithContext is an extended version of WaitUntilTableExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) WaitUntilTableExistsWithContext(ctx aws.Context, input *DescribeTableInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilTableExists", + MaxAttempts: 25, + Delay: request.ConstantWaiterDelay(20 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathWaiterMatch, Argument: "Table.TableStatus", + Expected: "ACTIVE", + }, + { + State: request.RetryWaiterState, + Matcher: request.ErrorWaiterMatch, + Expected: "ResourceNotFoundException", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeTableInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeTableRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilTableNotExists uses the DynamoDB API operation +// DescribeTable to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *DynamoDB) WaitUntilTableNotExists(input *DescribeTableInput) error { + return c.WaitUntilTableNotExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilTableNotExistsWithContext is an extended version of WaitUntilTableNotExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) WaitUntilTableNotExistsWithContext(ctx aws.Context, input *DescribeTableInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilTableNotExists", + MaxAttempts: 25, + Delay: request.ConstantWaiterDelay(20 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.ErrorWaiterMatch, + Expected: "ResourceNotFoundException", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeTableInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeTableRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go index 2b7e675ab..63729d0a7 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -56,12 +56,11 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // AssumeRole API operation for AWS Security Token Service. // // Returns a set of temporary security credentials that you can use to access -// Amazon Web Services resources that you might not normally have access to. -// These temporary credentials consist of an access key ID, a secret access -// key, and a security token. Typically, you use AssumeRole within your account -// or for cross-account access. For a comparison of AssumeRole with other API -// operations that produce temporary credentials, see Requesting Temporary Security -// Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// Amazon Web Services resources. These temporary credentials consist of an +// access key ID, a secret access key, and a security token. Typically, you +// use AssumeRole within your account or for cross-account access. For a comparison +// of AssumeRole with other API operations that produce temporary credentials, +// see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) // and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // @@ -74,16 +73,16 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // // (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plaintext that you use for both inline -// and managed session policies can't exceed 2,048 characters. Passing policies -// to this operation returns new temporary credentials. The resulting session's -// permissions are the intersection of the role's identity-based policy and -// the session policies. You can use the role's temporary credentials in subsequent -// Amazon Web Services API calls to access resources in the account that owns -// the role. You cannot use session policies to grant more permissions than -// those allowed by the identity-based policy of the role that is being assumed. -// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// inline session policy. You can also specify up to 10 managed policy Amazon +// Resource Names (ARNs) to use as managed session policies. The plaintext that +// you use for both inline and managed session policies can't exceed 2,048 characters. +// Passing policies to this operation returns new temporary credentials. The +// resulting session's permissions are the intersection of the role's identity-based +// policy and the session policies. You can use the role's temporary credentials +// in subsequent Amazon Web Services API calls to access resources in the account +// that owns the role. You cannot use session policies to grant more permissions +// than those allowed by the identity-based policy of the role that is being +// assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // // When you create a role, you create two policies: A role trust policy that @@ -307,16 +306,16 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // // (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plaintext that you use for both inline -// and managed session policies can't exceed 2,048 characters. Passing policies -// to this operation returns new temporary credentials. The resulting session's -// permissions are the intersection of the role's identity-based policy and -// the session policies. You can use the role's temporary credentials in subsequent -// Amazon Web Services API calls to access resources in the account that owns -// the role. You cannot use session policies to grant more permissions than -// those allowed by the identity-based policy of the role that is being assumed. -// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// inline session policy. You can also specify up to 10 managed policy Amazon +// Resource Names (ARNs) to use as managed session policies. The plaintext that +// you use for both inline and managed session policies can't exceed 2,048 characters. +// Passing policies to this operation returns new temporary credentials. The +// resulting session's permissions are the intersection of the role's identity-based +// policy and the session policies. You can use the role's temporary credentials +// in subsequent Amazon Web Services API calls to access resources in the account +// that owns the role. You cannot use session policies to grant more permissions +// than those allowed by the identity-based policy of the role that is being +// assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // // Calling AssumeRoleWithSAML does not require the use of Amazon Web Services @@ -343,11 +342,12 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) // in the IAM User Guide. // -// An Amazon Web Services conversion compresses the passed session policies -// and session tags into a packed binary format that has a separate limit. Your -// request can fail for this limit even if your plaintext meets the other requirements. -// The PackedPolicySize response element indicates by percentage how close the -// policies and tags for your request are to the upper size limit. +// An Amazon Web Services conversion compresses the passed inline session policy, +// managed policy ARNs, and session tags into a packed binary format that has +// a separate limit. Your request can fail for this limit even if your plaintext +// meets the other requirements. The PackedPolicySize response element indicates +// by percentage how close the policies and tags for your request are to the +// upper size limit. // // You can pass a session tag with the same key as a tag that is attached to // the role. When you do, session tags override the role's tags with the same @@ -563,16 +563,16 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // // (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plaintext that you use for both inline -// and managed session policies can't exceed 2,048 characters. Passing policies -// to this operation returns new temporary credentials. The resulting session's -// permissions are the intersection of the role's identity-based policy and -// the session policies. You can use the role's temporary credentials in subsequent -// Amazon Web Services API calls to access resources in the account that owns -// the role. You cannot use session policies to grant more permissions than -// those allowed by the identity-based policy of the role that is being assumed. -// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// inline session policy. You can also specify up to 10 managed policy Amazon +// Resource Names (ARNs) to use as managed session policies. The plaintext that +// you use for both inline and managed session policies can't exceed 2,048 characters. +// Passing policies to this operation returns new temporary credentials. The +// resulting session's permissions are the intersection of the role's identity-based +// policy and the session policies. You can use the role's temporary credentials +// in subsequent Amazon Web Services API calls to access resources in the account +// that owns the role. You cannot use session policies to grant more permissions +// than those allowed by the identity-based policy of the role that is being +// assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // // # Tags @@ -588,11 +588,12 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) // in the IAM User Guide. // -// An Amazon Web Services conversion compresses the passed session policies -// and session tags into a packed binary format that has a separate limit. Your -// request can fail for this limit even if your plaintext meets the other requirements. -// The PackedPolicySize response element indicates by percentage how close the -// policies and tags for your request are to the upper size limit. +// An Amazon Web Services conversion compresses the passed inline session policy, +// managed policy ARNs, and session tags into a packed binary format that has +// a separate limit. Your request can fail for this limit even if your plaintext +// meets the other requirements. The PackedPolicySize response element indicates +// by percentage how close the policies and tags for your request are to the +// upper size limit. // // You can pass a session tag with the same key as a tag that is attached to // the role. When you do, the session tag overrides the role tag with the same @@ -1101,18 +1102,20 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // # Permissions // // You can use the temporary credentials created by GetFederationToken in any -// Amazon Web Services service except the following: +// Amazon Web Services service with the following exceptions: // // - You cannot call any IAM operations using the CLI or the Amazon Web Services -// API. +// API. This limitation does not apply to console sessions. // // - You cannot call any STS operations except GetCallerIdentity. // +// You can use temporary credentials for single sign-on (SSO) to the console. +// // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plaintext that you use for both inline -// and managed session policies can't exceed 2,048 characters. +// inline session policy. You can also specify up to 10 managed policy Amazon +// Resource Names (ARNs) to use as managed session policies. The plaintext that +// you use for both inline and managed session policies can't exceed 2,048 characters. // // Though the session policy parameters are optional, if you do not pass a policy, // then the resulting federated user session has no permissions. When you pass @@ -1424,11 +1427,12 @@ type AssumeRoleInput struct { // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage // return (\u000D) characters. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. Policy *string `min:"1" type:"string"` // The Amazon Resource Names (ARNs) of the IAM managed policies that you want @@ -1441,11 +1445,12 @@ type AssumeRoleInput struct { // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the Amazon Web Services General Reference. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. // // Passing policies to this operation returns new temporary credentials. The // resulting session's permissions are the intersection of the role's identity-based @@ -1520,11 +1525,12 @@ type AssumeRoleInput struct { // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) // in the IAM User Guide. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. // // You can pass a session tag with the same key as a tag that is already attached // to the role. When you do, session tags override a role tag with the same @@ -1843,11 +1849,12 @@ type AssumeRoleWithSAMLInput struct { // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage // return (\u000D) characters. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. Policy *string `min:"1" type:"string"` // The Amazon Resource Names (ARNs) of the IAM managed policies that you want @@ -1860,11 +1867,12 @@ type AssumeRoleWithSAMLInput struct { // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the Amazon Web Services General Reference. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. // // Passing policies to this operation returns new temporary credentials. The // resulting session's permissions are the intersection of the role's identity-based @@ -2190,11 +2198,12 @@ type AssumeRoleWithWebIdentityInput struct { // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage // return (\u000D) characters. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. Policy *string `min:"1" type:"string"` // The Amazon Resource Names (ARNs) of the IAM managed policies that you want @@ -2207,11 +2216,12 @@ type AssumeRoleWithWebIdentityInput struct { // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the Amazon Web Services General Reference. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. // // Passing policies to this operation returns new temporary credentials. The // resulting session's permissions are the intersection of the role's identity-based @@ -2934,8 +2944,8 @@ type GetFederationTokenInput struct { // // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an - // inline session policy. You can also specify up to 10 managed policies to - // use as managed session policies. + // inline session policy. You can also specify up to 10 managed policy Amazon + // Resource Names (ARNs) to use as managed session policies. // // This parameter is optional. However, if you do not pass any session policies, // then the resulting federated user session has no permissions. @@ -2960,11 +2970,12 @@ type GetFederationTokenInput struct { // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage // return (\u000D) characters. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. Policy *string `min:"1" type:"string"` // The Amazon Resource Names (ARNs) of the IAM managed policies that you want @@ -2973,11 +2984,12 @@ type GetFederationTokenInput struct { // // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an - // inline session policy. You can also specify up to 10 managed policies to - // use as managed session policies. The plaintext that you use for both inline - // and managed session policies can't exceed 2,048 characters. You can provide - // up to 10 managed policy ARNs. For more information about ARNs, see Amazon - // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // inline session policy. You can also specify up to 10 managed policy Amazon + // Resource Names (ARNs) to use as managed session policies. The plaintext that + // you use for both inline and managed session policies can't exceed 2,048 characters. + // You can provide up to 10 managed policy ARNs. For more information about + // ARNs, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces + // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the Amazon Web Services General Reference. // // This parameter is optional. However, if you do not pass any session policies, @@ -2997,11 +3009,12 @@ type GetFederationTokenInput struct { // by the policy. These permissions are granted in addition to the permissions // that are granted by the session policies. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. PolicyArns []*PolicyDescriptorType `type:"list"` // A list of session tags. Each session tag consists of a key name and an associated @@ -3015,11 +3028,12 @@ type GetFederationTokenInput struct { // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) // in the IAM User Guide. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. // // You can pass a session tag with the same key as a tag that is already attached // to the user you are federating. When you do, session tags override a user diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md index 792b4a60b..8bf0e5b78 100644 --- a/vendor/github.com/cespare/xxhash/v2/README.md +++ b/vendor/github.com/cespare/xxhash/v2/README.md @@ -3,8 +3,7 @@ [![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2) [![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml) -xxhash is a Go implementation of the 64-bit -[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a +xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a high-quality hashing algorithm that is much faster than anything in the Go standard library. @@ -25,8 +24,11 @@ func (*Digest) WriteString(string) (int, error) func (*Digest) Sum64() uint64 ``` -This implementation provides a fast pure-Go implementation and an even faster -assembly implementation for amd64. +The package is written with optimized pure Go and also contains even faster +assembly implementations for amd64 and arm64. If desired, the `purego` build tag +opts into using the Go code even on those architectures. + +[xxHash]: http://cyan4973.github.io/xxHash/ ## Compatibility @@ -45,19 +47,20 @@ I recommend using the latest release of Go. Here are some quick benchmarks comparing the pure-Go and assembly implementations of Sum64. -| input size | purego | asm | -| --- | --- | --- | -| 5 B | 979.66 MB/s | 1291.17 MB/s | -| 100 B | 7475.26 MB/s | 7973.40 MB/s | -| 4 KB | 17573.46 MB/s | 17602.65 MB/s | -| 10 MB | 17131.46 MB/s | 17142.16 MB/s | +| input size | purego | asm | +| ---------- | --------- | --------- | +| 4 B | 1.3 GB/s | 1.2 GB/s | +| 16 B | 2.9 GB/s | 3.5 GB/s | +| 100 B | 6.9 GB/s | 8.1 GB/s | +| 4 KB | 11.7 GB/s | 16.7 GB/s | +| 10 MB | 12.0 GB/s | 17.3 GB/s | -These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using -the following commands under Go 1.11.2: +These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C +CPU using the following commands under Go 1.19.2: ``` -$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' -$ go test -benchtime 10s -bench '/xxhash,direct,bytes' +benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$') +benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') ``` ## Projects using this package diff --git a/vendor/github.com/cespare/xxhash/v2/testall.sh b/vendor/github.com/cespare/xxhash/v2/testall.sh new file mode 100644 index 000000000..94b9c4439 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/testall.sh @@ -0,0 +1,10 @@ +#!/bin/bash +set -eu -o pipefail + +# Small convenience script for running the tests with various combinations of +# arch/tags. This assumes we're running on amd64 and have qemu available. + +go test ./... +go test -tags purego ./... +GOARCH=arm64 go test +GOARCH=arm64 go test -tags purego diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go index 15c835d54..a9e0d45c9 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go @@ -16,19 +16,11 @@ const ( prime5 uint64 = 2870177450012600261 ) -// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where -// possible in the Go code is worth a small (but measurable) performance boost -// by avoiding some MOVQs. Vars are needed for the asm and also are useful for -// convenience in the Go code in a few places where we need to intentionally -// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the -// result overflows a uint64). -var ( - prime1v = prime1 - prime2v = prime2 - prime3v = prime3 - prime4v = prime4 - prime5v = prime5 -) +// Store the primes in an array as well. +// +// The consts are used when possible in Go code to avoid MOVs but we need a +// contiguous array of the assembly code. +var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} // Digest implements hash.Hash64. type Digest struct { @@ -50,10 +42,10 @@ func New() *Digest { // Reset clears the Digest's state so that it can be reused. func (d *Digest) Reset() { - d.v1 = prime1v + prime2 + d.v1 = primes[0] + prime2 d.v2 = prime2 d.v3 = 0 - d.v4 = -prime1v + d.v4 = -primes[0] d.total = 0 d.n = 0 } @@ -69,21 +61,23 @@ func (d *Digest) Write(b []byte) (n int, err error) { n = len(b) d.total += uint64(n) + memleft := d.mem[d.n&(len(d.mem)-1):] + if d.n+n < 32 { // This new data doesn't even fill the current block. - copy(d.mem[d.n:], b) + copy(memleft, b) d.n += n return } if d.n > 0 { // Finish off the partial block. - copy(d.mem[d.n:], b) + c := copy(memleft, b) d.v1 = round(d.v1, u64(d.mem[0:8])) d.v2 = round(d.v2, u64(d.mem[8:16])) d.v3 = round(d.v3, u64(d.mem[16:24])) d.v4 = round(d.v4, u64(d.mem[24:32])) - b = b[32-d.n:] + b = b[c:] d.n = 0 } @@ -133,21 +127,20 @@ func (d *Digest) Sum64() uint64 { h += d.total - i, end := 0, d.n - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(d.mem[i:i+8])) + b := d.mem[:d.n&(len(d.mem)-1)] + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) h ^= k1 h = rol27(h)*prime1 + prime4 } - if i+4 <= end { - h ^= uint64(u32(d.mem[i:i+4])) * prime1 + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 h = rol23(h)*prime2 + prime3 - i += 4 + b = b[4:] } - for i < end { - h ^= uint64(d.mem[i]) * prime5 + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 h = rol11(h) * prime1 - i++ } h ^= h >> 33 diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s index be8db5bf7..3e8b13257 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s @@ -1,215 +1,209 @@ +//go:build !appengine && gc && !purego // +build !appengine // +build gc // +build !purego #include "textflag.h" -// Register allocation: -// AX h -// SI pointer to advance through b -// DX n -// BX loop end -// R8 v1, k1 -// R9 v2 -// R10 v3 -// R11 v4 -// R12 tmp -// R13 prime1v -// R14 prime2v -// DI prime4v - -// round reads from and advances the buffer pointer in SI. -// It assumes that R13 has prime1v and R14 has prime2v. -#define round(r) \ - MOVQ (SI), R12 \ - ADDQ $8, SI \ - IMULQ R14, R12 \ - ADDQ R12, r \ - ROLQ $31, r \ - IMULQ R13, r - -// mergeRound applies a merge round on the two registers acc and val. -// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v. -#define mergeRound(acc, val) \ - IMULQ R14, val \ - ROLQ $31, val \ - IMULQ R13, val \ - XORQ val, acc \ - IMULQ R13, acc \ - ADDQ DI, acc +// Registers: +#define h AX +#define d AX +#define p SI // pointer to advance through b +#define n DX +#define end BX // loop end +#define v1 R8 +#define v2 R9 +#define v3 R10 +#define v4 R11 +#define x R12 +#define prime1 R13 +#define prime2 R14 +#define prime4 DI + +#define round(acc, x) \ + IMULQ prime2, x \ + ADDQ x, acc \ + ROLQ $31, acc \ + IMULQ prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + IMULQ prime2, x \ + ROLQ $31, x \ + IMULQ prime1, x + +// mergeRound applies a merge round on the two registers acc and x. +// It assumes that prime1, prime2, and prime4 have been loaded. +#define mergeRound(acc, x) \ + round0(x) \ + XORQ x, acc \ + IMULQ prime1, acc \ + ADDQ prime4, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that there is at least one block +// to process. +#define blockLoop() \ +loop: \ + MOVQ +0(p), x \ + round(v1, x) \ + MOVQ +8(p), x \ + round(v2, x) \ + MOVQ +16(p), x \ + round(v3, x) \ + MOVQ +24(p), x \ + round(v4, x) \ + ADDQ $32, p \ + CMPQ p, end \ + JLE loop // func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT, $0-32 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 // Load fixed primes. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - MOVQ ·prime4v(SB), DI + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + MOVQ ·primes+24(SB), prime4 // Load slice. - MOVQ b_base+0(FP), SI - MOVQ b_len+8(FP), DX - LEAQ (SI)(DX*1), BX + MOVQ b_base+0(FP), p + MOVQ b_len+8(FP), n + LEAQ (p)(n*1), end // The first loop limit will be len(b)-32. - SUBQ $32, BX + SUBQ $32, end // Check whether we have at least one block. - CMPQ DX, $32 + CMPQ n, $32 JLT noBlocks // Set up initial state (v1, v2, v3, v4). - MOVQ R13, R8 - ADDQ R14, R8 - MOVQ R14, R9 - XORQ R10, R10 - XORQ R11, R11 - SUBQ R13, R11 - - // Loop until SI > BX. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ SI, BX - JLE blockLoop - - MOVQ R8, AX - ROLQ $1, AX - MOVQ R9, R12 - ROLQ $7, R12 - ADDQ R12, AX - MOVQ R10, R12 - ROLQ $12, R12 - ADDQ R12, AX - MOVQ R11, R12 - ROLQ $18, R12 - ADDQ R12, AX - - mergeRound(AX, R8) - mergeRound(AX, R9) - mergeRound(AX, R10) - mergeRound(AX, R11) + MOVQ prime1, v1 + ADDQ prime2, v1 + MOVQ prime2, v2 + XORQ v3, v3 + XORQ v4, v4 + SUBQ prime1, v4 + + blockLoop() + + MOVQ v1, h + ROLQ $1, h + MOVQ v2, x + ROLQ $7, x + ADDQ x, h + MOVQ v3, x + ROLQ $12, x + ADDQ x, h + MOVQ v4, x + ROLQ $18, x + ADDQ x, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) JMP afterBlocks noBlocks: - MOVQ ·prime5v(SB), AX + MOVQ ·primes+32(SB), h afterBlocks: - ADDQ DX, AX - - // Right now BX has len(b)-32, and we want to loop until SI > len(b)-8. - ADDQ $24, BX - - CMPQ SI, BX - JG fourByte - -wordLoop: - // Calculate k1. - MOVQ (SI), R8 - ADDQ $8, SI - IMULQ R14, R8 - ROLQ $31, R8 - IMULQ R13, R8 - - XORQ R8, AX - ROLQ $27, AX - IMULQ R13, AX - ADDQ DI, AX - - CMPQ SI, BX - JLE wordLoop - -fourByte: - ADDQ $4, BX - CMPQ SI, BX - JG singles - - MOVL (SI), R8 - ADDQ $4, SI - IMULQ R13, R8 - XORQ R8, AX - - ROLQ $23, AX - IMULQ R14, AX - ADDQ ·prime3v(SB), AX - -singles: - ADDQ $4, BX - CMPQ SI, BX + ADDQ n, h + + ADDQ $24, end + CMPQ p, end + JG try4 + +loop8: + MOVQ (p), x + ADDQ $8, p + round0(x) + XORQ x, h + ROLQ $27, h + IMULQ prime1, h + ADDQ prime4, h + + CMPQ p, end + JLE loop8 + +try4: + ADDQ $4, end + CMPQ p, end + JG try1 + + MOVL (p), x + ADDQ $4, p + IMULQ prime1, x + XORQ x, h + + ROLQ $23, h + IMULQ prime2, h + ADDQ ·primes+16(SB), h + +try1: + ADDQ $4, end + CMPQ p, end JGE finalize -singlesLoop: - MOVBQZX (SI), R12 - ADDQ $1, SI - IMULQ ·prime5v(SB), R12 - XORQ R12, AX +loop1: + MOVBQZX (p), x + ADDQ $1, p + IMULQ ·primes+32(SB), x + XORQ x, h + ROLQ $11, h + IMULQ prime1, h - ROLQ $11, AX - IMULQ R13, AX - - CMPQ SI, BX - JL singlesLoop + CMPQ p, end + JL loop1 finalize: - MOVQ AX, R12 - SHRQ $33, R12 - XORQ R12, AX - IMULQ R14, AX - MOVQ AX, R12 - SHRQ $29, R12 - XORQ R12, AX - IMULQ ·prime3v(SB), AX - MOVQ AX, R12 - SHRQ $32, R12 - XORQ R12, AX - - MOVQ AX, ret+24(FP) + MOVQ h, x + SHRQ $33, x + XORQ x, h + IMULQ prime2, h + MOVQ h, x + SHRQ $29, x + XORQ x, h + IMULQ ·primes+16(SB), h + MOVQ h, x + SHRQ $32, x + XORQ x, h + + MOVQ h, ret+24(FP) RET -// writeBlocks uses the same registers as above except that it uses AX to store -// the d pointer. - // func writeBlocks(d *Digest, b []byte) int -TEXT ·writeBlocks(SB), NOSPLIT, $0-40 +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 // Load fixed primes needed for round. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 // Load slice. - MOVQ b_base+8(FP), SI - MOVQ b_len+16(FP), DX - LEAQ (SI)(DX*1), BX - SUBQ $32, BX + MOVQ b_base+8(FP), p + MOVQ b_len+16(FP), n + LEAQ (p)(n*1), end + SUBQ $32, end // Load vN from d. - MOVQ d+0(FP), AX - MOVQ 0(AX), R8 // v1 - MOVQ 8(AX), R9 // v2 - MOVQ 16(AX), R10 // v3 - MOVQ 24(AX), R11 // v4 + MOVQ s+0(FP), d + MOVQ 0(d), v1 + MOVQ 8(d), v2 + MOVQ 16(d), v3 + MOVQ 24(d), v4 // We don't need to check the loop condition here; this function is // always called with at least one block of data to process. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ SI, BX - JLE blockLoop + blockLoop() // Copy vN back to d. - MOVQ R8, 0(AX) - MOVQ R9, 8(AX) - MOVQ R10, 16(AX) - MOVQ R11, 24(AX) - - // The number of bytes written is SI minus the old base pointer. - SUBQ b_base+8(FP), SI - MOVQ SI, ret+32(FP) + MOVQ v1, 0(d) + MOVQ v2, 8(d) + MOVQ v3, 16(d) + MOVQ v4, 24(d) + + // The number of bytes written is p minus the old base pointer. + SUBQ b_base+8(FP), p + MOVQ p, ret+32(FP) RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s new file mode 100644 index 000000000..7e3145a22 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s @@ -0,0 +1,183 @@ +//go:build !appengine && gc && !purego +// +build !appengine +// +build gc +// +build !purego + +#include "textflag.h" + +// Registers: +#define digest R1 +#define h R2 // return value +#define p R3 // input pointer +#define n R4 // input length +#define nblocks R5 // n / 32 +#define prime1 R7 +#define prime2 R8 +#define prime3 R9 +#define prime4 R10 +#define prime5 R11 +#define v1 R12 +#define v2 R13 +#define v3 R14 +#define v4 R15 +#define x1 R20 +#define x2 R21 +#define x3 R22 +#define x4 R23 + +#define round(acc, x) \ + MADD prime2, acc, x, acc \ + ROR $64-31, acc \ + MUL prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + MUL prime2, x \ + ROR $64-31, x \ + MUL prime1, x + +#define mergeRound(acc, x) \ + round0(x) \ + EOR x, acc \ + MADD acc, prime4, prime1, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that n >= 32. +#define blockLoop() \ + LSR $5, n, nblocks \ + PCALIGN $16 \ + loop: \ + LDP.P 16(p), (x1, x2) \ + LDP.P 16(p), (x3, x4) \ + round(v1, x1) \ + round(v2, x2) \ + round(v3, x3) \ + round(v4, x4) \ + SUB $1, nblocks \ + CBNZ nblocks, loop + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + LDP b_base+0(FP), (p, n) + + LDP ·primes+0(SB), (prime1, prime2) + LDP ·primes+16(SB), (prime3, prime4) + MOVD ·primes+32(SB), prime5 + + CMP $32, n + CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 } + BLT afterLoop + + ADD prime1, prime2, v1 + MOVD prime2, v2 + MOVD $0, v3 + NEG prime1, v4 + + blockLoop() + + ROR $64-1, v1, x1 + ROR $64-7, v2, x2 + ADD x1, x2 + ROR $64-12, v3, x3 + ROR $64-18, v4, x4 + ADD x3, x4 + ADD x2, x4, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) + +afterLoop: + ADD n, h + + TBZ $4, n, try8 + LDP.P 16(p), (x1, x2) + + round0(x1) + + // NOTE: here and below, sequencing the EOR after the ROR (using a + // rotated register) is worth a small but measurable speedup for small + // inputs. + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + + round0(x2) + ROR $64-27, h + EOR x2 @> 64-27, h, h + MADD h, prime4, prime1, h + +try8: + TBZ $3, n, try4 + MOVD.P 8(p), x1 + + round0(x1) + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + +try4: + TBZ $2, n, try2 + MOVWU.P 4(p), x2 + + MUL prime1, x2 + ROR $64-23, h + EOR x2 @> 64-23, h, h + MADD h, prime3, prime2, h + +try2: + TBZ $1, n, try1 + MOVHU.P 2(p), x3 + AND $255, x3, x1 + LSR $8, x3, x2 + + MUL prime5, x1 + ROR $64-11, h + EOR x1 @> 64-11, h, h + MUL prime1, h + + MUL prime5, x2 + ROR $64-11, h + EOR x2 @> 64-11, h, h + MUL prime1, h + +try1: + TBZ $0, n, finalize + MOVBU (p), x4 + + MUL prime5, x4 + ROR $64-11, h + EOR x4 @> 64-11, h, h + MUL prime1, h + +finalize: + EOR h >> 33, h + MUL prime2, h + EOR h >> 29, h + MUL prime3, h + EOR h >> 32, h + + MOVD h, ret+24(FP) + RET + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + LDP ·primes+0(SB), (prime1, prime2) + + // Load state. Assume v[1-4] are stored contiguously. + MOVD d+0(FP), digest + LDP 0(digest), (v1, v2) + LDP 16(digest), (v3, v4) + + LDP b_base+8(FP), (p, n) + + blockLoop() + + // Store updated state. + STP (v1, v2), 0(digest) + STP (v3, v4), 16(digest) + + BIC $31, n + MOVD n, ret+32(FP) + RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go similarity index 73% rename from vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go rename to vendor/github.com/cespare/xxhash/v2/xxhash_asm.go index ad14b807f..9216e0a40 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go @@ -1,3 +1,5 @@ +//go:build (amd64 || arm64) && !appengine && gc && !purego +// +build amd64 arm64 // +build !appengine // +build gc // +build !purego diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go index 4a5a82160..26df13bba 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go @@ -1,4 +1,5 @@ -// +build !amd64 appengine !gc purego +//go:build (!amd64 && !arm64) || appengine || !gc || purego +// +build !amd64,!arm64 appengine !gc purego package xxhash @@ -14,10 +15,10 @@ func Sum64(b []byte) uint64 { var h uint64 if n >= 32 { - v1 := prime1v + prime2 + v1 := primes[0] + prime2 v2 := prime2 v3 := uint64(0) - v4 := -prime1v + v4 := -primes[0] for len(b) >= 32 { v1 = round(v1, u64(b[0:8:len(b)])) v2 = round(v2, u64(b[8:16:len(b)])) @@ -36,19 +37,18 @@ func Sum64(b []byte) uint64 { h += uint64(n) - i, end := 0, len(b) - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(b[i:i+8:len(b)])) + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) h ^= k1 h = rol27(h)*prime1 + prime4 } - if i+4 <= end { - h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 h = rol23(h)*prime2 + prime3 - i += 4 + b = b[4:] } - for ; i < end; i++ { - h ^= uint64(b[i]) * prime5 + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 h = rol11(h) * prime1 } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go index fc9bea7a3..e86f1b5fd 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go @@ -1,3 +1,4 @@ +//go:build appengine // +build appengine // This file contains the safe implementations of otherwise unsafe-using code. diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go index 376e0ca2e..1c1638fd8 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go @@ -1,3 +1,4 @@ +//go:build !appengine // +build !appengine // This file encapsulates usage of unsafe. @@ -11,7 +12,7 @@ import ( // In the future it's possible that compiler optimizations will make these // XxxString functions unnecessary by realizing that calls such as -// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205. +// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205. // If that happens, even if we keep these functions they can be replaced with // the trivial safe code. diff --git a/vendor/github.com/coreos/go-systemd/v22/activation/files_unix.go b/vendor/github.com/coreos/go-systemd/v22/activation/files_unix.go new file mode 100644 index 000000000..bf7671dd2 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/activation/files_unix.go @@ -0,0 +1,70 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !windows +// +build !windows + +// Package activation implements primitives for systemd socket activation. +package activation + +import ( + "os" + "strconv" + "strings" + "syscall" +) + +const ( + // listenFdsStart corresponds to `SD_LISTEN_FDS_START`. + listenFdsStart = 3 +) + +// Files returns a slice containing a `os.File` object for each +// file descriptor passed to this process via systemd fd-passing protocol. +// +// The order of the file descriptors is preserved in the returned slice. +// `unsetEnv` is typically set to `true` in order to avoid clashes in +// fd usage and to avoid leaking environment flags to child processes. +func Files(unsetEnv bool) []*os.File { + if unsetEnv { + defer os.Unsetenv("LISTEN_PID") + defer os.Unsetenv("LISTEN_FDS") + defer os.Unsetenv("LISTEN_FDNAMES") + } + + pid, err := strconv.Atoi(os.Getenv("LISTEN_PID")) + if err != nil || pid != os.Getpid() { + return nil + } + + nfds, err := strconv.Atoi(os.Getenv("LISTEN_FDS")) + if err != nil || nfds == 0 { + return nil + } + + names := strings.Split(os.Getenv("LISTEN_FDNAMES"), ":") + + files := make([]*os.File, 0, nfds) + for fd := listenFdsStart; fd < listenFdsStart+nfds; fd++ { + syscall.CloseOnExec(fd) + name := "LISTEN_FD_" + strconv.Itoa(fd) + offset := fd - listenFdsStart + if offset < len(names) && len(names[offset]) > 0 { + name = names[offset] + } + files = append(files, os.NewFile(uintptr(fd), name)) + } + + return files +} diff --git a/vendor/go.opentelemetry.io/otel/metric/unit/doc.go b/vendor/github.com/coreos/go-systemd/v22/activation/files_windows.go similarity index 61% rename from vendor/go.opentelemetry.io/otel/metric/unit/doc.go rename to vendor/github.com/coreos/go-systemd/v22/activation/files_windows.go index f8e723593..d391bf00c 100644 --- a/vendor/go.opentelemetry.io/otel/metric/unit/doc.go +++ b/vendor/github.com/coreos/go-systemd/v22/activation/files_windows.go @@ -1,4 +1,4 @@ -// Copyright The OpenTelemetry Authors +// Copyright 2015 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,9 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package unit provides units. -// -// This package is currently in a pre-GA phase. Backwards incompatible changes -// may be introduced in subsequent minor version releases as we work to track -// the evolving OpenTelemetry specification and user feedback. -package unit // import "go.opentelemetry.io/otel/metric/unit" +package activation + +import "os" + +func Files(unsetEnv bool) []*os.File { + return nil +} diff --git a/vendor/github.com/coreos/go-systemd/v22/activation/listeners.go b/vendor/github.com/coreos/go-systemd/v22/activation/listeners.go new file mode 100644 index 000000000..3dbe2b087 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/activation/listeners.go @@ -0,0 +1,103 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package activation + +import ( + "crypto/tls" + "net" +) + +// Listeners returns a slice containing a net.Listener for each matching socket type +// passed to this process. +// +// The order of the file descriptors is preserved in the returned slice. +// Nil values are used to fill any gaps. For example if systemd were to return file descriptors +// corresponding with "udp, tcp, tcp", then the slice would contain {nil, net.Listener, net.Listener} +func Listeners() ([]net.Listener, error) { + files := Files(true) + listeners := make([]net.Listener, len(files)) + + for i, f := range files { + if pc, err := net.FileListener(f); err == nil { + listeners[i] = pc + f.Close() + } + } + return listeners, nil +} + +// ListenersWithNames maps a listener name to a set of net.Listener instances. +func ListenersWithNames() (map[string][]net.Listener, error) { + files := Files(true) + listeners := map[string][]net.Listener{} + + for _, f := range files { + if pc, err := net.FileListener(f); err == nil { + current, ok := listeners[f.Name()] + if !ok { + listeners[f.Name()] = []net.Listener{pc} + } else { + listeners[f.Name()] = append(current, pc) + } + f.Close() + } + } + return listeners, nil +} + +// TLSListeners returns a slice containing a net.listener for each matching TCP socket type +// passed to this process. +// It uses default Listeners func and forces TCP sockets handlers to use TLS based on tlsConfig. +func TLSListeners(tlsConfig *tls.Config) ([]net.Listener, error) { + listeners, err := Listeners() + + if listeners == nil || err != nil { + return nil, err + } + + if tlsConfig != nil { + for i, l := range listeners { + // Activate TLS only for TCP sockets + if l.Addr().Network() == "tcp" { + listeners[i] = tls.NewListener(l, tlsConfig) + } + } + } + + return listeners, err +} + +// TLSListenersWithNames maps a listener name to a net.Listener with +// the associated TLS configuration. +func TLSListenersWithNames(tlsConfig *tls.Config) (map[string][]net.Listener, error) { + listeners, err := ListenersWithNames() + + if listeners == nil || err != nil { + return nil, err + } + + if tlsConfig != nil { + for _, ll := range listeners { + // Activate TLS only for TCP sockets + for i, l := range ll { + if l.Addr().Network() == "tcp" { + ll[i] = tls.NewListener(l, tlsConfig) + } + } + } + } + + return listeners, err +} diff --git a/vendor/github.com/coreos/go-systemd/v22/activation/packetconns.go b/vendor/github.com/coreos/go-systemd/v22/activation/packetconns.go new file mode 100644 index 000000000..a97206785 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/activation/packetconns.go @@ -0,0 +1,38 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package activation + +import ( + "net" +) + +// PacketConns returns a slice containing a net.PacketConn for each matching socket type +// passed to this process. +// +// The order of the file descriptors is preserved in the returned slice. +// Nil values are used to fill any gaps. For example if systemd were to return file descriptors +// corresponding with "udp, tcp, udp", then the slice would contain {net.PacketConn, nil, net.PacketConn} +func PacketConns() ([]net.PacketConn, error) { + files := Files(true) + conns := make([]net.PacketConn, len(files)) + + for i, f := range files { + if pc, err := net.FilePacketConn(f); err == nil { + conns[i] = pc + f.Close() + } + } + return conns, nil +} diff --git a/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go b/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go index 8d58ca0fb..c5b23a819 100644 --- a/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go +++ b/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !windows // +build !windows // Package journal provides write bindings to the local systemd journal. @@ -53,15 +54,9 @@ var ( onceConn sync.Once ) -func init() { - onceConn.Do(initConn) -} - // Enabled checks whether the local systemd journal is available for logging. func Enabled() bool { - onceConn.Do(initConn) - - if (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) == nil { + if c := getOrInitConn(); c == nil { return false } @@ -74,6 +69,58 @@ func Enabled() bool { return true } +// StderrIsJournalStream returns whether the process stderr is connected +// to the Journal's stream transport. +// +// This can be used for automatic protocol upgrading described in [Journal Native Protocol]. +// +// Returns true if JOURNAL_STREAM environment variable is present, +// and stderr's device and inode numbers match it. +// +// Error is returned if unexpected error occurs: e.g. if JOURNAL_STREAM environment variable +// is present, but malformed, fstat syscall fails, etc. +// +// [Journal Native Protocol]: https://systemd.io/JOURNAL_NATIVE_PROTOCOL/#automatic-protocol-upgrading +func StderrIsJournalStream() (bool, error) { + return fdIsJournalStream(syscall.Stderr) +} + +// StdoutIsJournalStream returns whether the process stdout is connected +// to the Journal's stream transport. +// +// Returns true if JOURNAL_STREAM environment variable is present, +// and stdout's device and inode numbers match it. +// +// Error is returned if unexpected error occurs: e.g. if JOURNAL_STREAM environment variable +// is present, but malformed, fstat syscall fails, etc. +// +// Most users should probably use [StderrIsJournalStream]. +func StdoutIsJournalStream() (bool, error) { + return fdIsJournalStream(syscall.Stdout) +} + +func fdIsJournalStream(fd int) (bool, error) { + journalStream := os.Getenv("JOURNAL_STREAM") + if journalStream == "" { + return false, nil + } + + var expectedStat syscall.Stat_t + _, err := fmt.Sscanf(journalStream, "%d:%d", &expectedStat.Dev, &expectedStat.Ino) + if err != nil { + return false, fmt.Errorf("failed to parse JOURNAL_STREAM=%q: %v", journalStream, err) + } + + var stat syscall.Stat_t + err = syscall.Fstat(fd, &stat) + if err != nil { + return false, err + } + + match := stat.Dev == expectedStat.Dev && stat.Ino == expectedStat.Ino + return match, nil +} + // Send a message to the local systemd journal. vars is a map of journald // fields to values. Fields must be composed of uppercase letters, numbers, // and underscores, but must not start with an underscore. Within these @@ -82,7 +129,7 @@ func Enabled() bool { // (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html) // for more details. vars may be nil. func Send(message string, priority Priority, vars map[string]string) error { - conn := (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) + conn := getOrInitConn() if conn == nil { return errors.New("could not initialize socket to journald") } @@ -126,6 +173,16 @@ func Send(message string, priority Priority, vars map[string]string) error { return nil } +// getOrInitConn attempts to get the global `unixConnPtr` socket, initializing if necessary +func getOrInitConn() *net.UnixConn { + conn := (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) + if conn != nil { + return conn + } + onceConn.Do(initConn) + return (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) +} + func appendVariable(w io.Writer, name, value string) { if err := validVarName(name); err != nil { fmt.Fprintf(os.Stderr, "variable name %s contains invalid character, ignoring\n", name) @@ -194,7 +251,7 @@ func tempFd() (*os.File, error) { } // initConn initializes the global `unixConnPtr` socket. -// It is meant to be called exactly once, at program startup. +// It is automatically called when needed. func initConn() { autobind, err := net.ResolveUnixAddr("unixgram", "") if err != nil { diff --git a/vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go b/vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go index 677aca68e..322e41e74 100644 --- a/vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go +++ b/vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go @@ -33,3 +33,11 @@ func Enabled() bool { func Send(message string, priority Priority, vars map[string]string) error { return errors.New("could not initialize socket to journald") } + +func StderrIsJournalStream() (bool, error) { + return false, nil +} + +func StdoutIsJournalStream() (bool, error) { + return false, nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/chunk.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/chunk.go new file mode 100644 index 000000000..e5eea6198 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/chunk.go @@ -0,0 +1,109 @@ +// This file was taken from Prometheus (https://github.com/prometheus/prometheus). +// The original license header is included below: +// +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package encoding + +import ( + "io" + + "github.com/prometheus/common/model" + + "github.com/cortexproject/cortex/pkg/prom1/storage/metric" +) + +const ( + // ChunkLen is the length of a chunk in bytes. + ChunkLen = 1024 +) + +// Chunk is the interface for all chunks. Chunks are generally not +// goroutine-safe. +type Chunk interface { + // Add adds a SamplePair to the chunks, performs any necessary + // re-encoding, and creates any necessary overflow chunk. + // The returned Chunk is the overflow chunk if it was created. + // The returned Chunk is nil if the sample got appended to the same chunk. + Add(sample model.SamplePair) (Chunk, error) + // NewIterator returns an iterator for the chunks. + // The iterator passed as argument is for re-use. Depending on implementation, + // the iterator can be re-used or a new iterator can be allocated. + NewIterator(Iterator) Iterator + Marshal(io.Writer) error + UnmarshalFromBuf([]byte) error + Encoding() Encoding + + // Len returns the number of samples in the chunk. Implementations may be + // expensive. + Len() int + + // Equals checks if this chunk holds the same data as another. + Equals(Chunk) (bool, error) +} + +// Iterator enables efficient access to the content of a chunk. It is +// generally not safe to use an Iterator concurrently with or after chunk +// mutation. +type Iterator interface { + // Scans the next value in the chunk. Directly after the iterator has + // been created, the next value is the first value in the + // chunk. Otherwise, it is the value following the last value scanned or + // found (by one of the Find... methods). Returns false if either the + // end of the chunk is reached or an error has occurred. + Scan() bool + // Finds the oldest value at or after the provided time. Returns false + // if either the chunk contains no value at or after the provided time, + // or an error has occurred. + FindAtOrAfter(model.Time) bool + // Returns the last value scanned (by the scan method) or found (by one + // of the find... methods). It returns model.ZeroSamplePair before any of + // those methods were called. + Value() model.SamplePair + // Returns a batch of the provisded size; NB not idempotent! Should only be called + // once per Scan. + Batch(size int) Batch + // Returns the last error encountered. In general, an error signals data + // corruption in the chunk and requires quarantining. + Err() error +} + +// BatchSize is samples per batch; this was choose by benchmarking all sizes from +// 1 to 128. +const BatchSize = 12 + +// Batch is a sorted set of (timestamp, value) pairs. They are intended to be +// small, and passed by value. +type Batch struct { + Timestamps [BatchSize]int64 + Values [BatchSize]float64 + Index int + Length int +} + +// RangeValues is a utility function that retrieves all values within the given +// range from an Iterator. +func RangeValues(it Iterator, in metric.Interval) ([]model.SamplePair, error) { + result := []model.SamplePair{} + if !it.FindAtOrAfter(in.OldestInclusive) { + return result, it.Err() + } + for !it.Value().Timestamp.After(in.NewestInclusive) { + result = append(result, it.Value()) + if !it.Scan() { + break + } + } + return result, it.Err() +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/factory.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/factory.go new file mode 100644 index 000000000..e56c34c9e --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/factory.go @@ -0,0 +1,51 @@ +package encoding + +import ( + "fmt" +) + +// Encoding defines which encoding we are using, delta, doubledelta, or varbit +type Encoding byte + +var ( + // DefaultEncoding exported for use in unit tests elsewhere + DefaultEncoding = PrometheusXorChunk +) + +// String implements flag.Value. +func (e Encoding) String() string { + if known, found := encodings[e]; found { + return known.Name + } + return fmt.Sprintf("%d", e) +} + +const ( + // PrometheusXorChunk is a wrapper around Prometheus XOR-encoded chunk. + // 4 is the magic value for backwards-compatibility with previous iota-based constants. + PrometheusXorChunk Encoding = 4 +) + +type encoding struct { + Name string + New func() Chunk +} + +var encodings = map[Encoding]encoding{ + PrometheusXorChunk: { + Name: "PrometheusXorChunk", + New: func() Chunk { + return newPrometheusXorChunk() + }, + }, +} + +// NewForEncoding allows configuring what chunk type you want +func NewForEncoding(encoding Encoding) (Chunk, error) { + enc, ok := encodings[encoding] + if !ok { + return nil, fmt.Errorf("unknown chunk encoding: %v", encoding) + } + + return enc.New(), nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/instrumentation.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/instrumentation.go new file mode 100644 index 000000000..241b88c48 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/instrumentation.go @@ -0,0 +1,91 @@ +// This file was taken from Prometheus (https://github.com/prometheus/prometheus). +// The original license header is included below: +// +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package encoding + +import "github.com/prometheus/client_golang/prometheus" + +// Usually, a separate file for instrumentation is frowned upon. Metrics should +// be close to where they are used. However, the metrics below are set all over +// the place, so we go for a separate instrumentation file in this case. +var ( + Ops = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "chunk_ops_total", + Help: "The total number of chunk operations by their type.", + }, + []string{OpTypeLabel}, + ) + DescOps = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "chunkdesc_ops_total", + Help: "The total number of chunk descriptor operations by their type.", + }, + []string{OpTypeLabel}, + ) + NumMemDescs = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "memory_chunkdescs", + Help: "The current number of chunk descriptors in memory.", + }) +) + +const ( + namespace = "prometheus" + subsystem = "local_storage" + + // OpTypeLabel is the label name for chunk operation types. + OpTypeLabel = "type" + + // Op-types for ChunkOps. + + // CreateAndPin is the label value for create-and-pin chunk ops. + CreateAndPin = "create" // A Desc creation with refCount=1. + // PersistAndUnpin is the label value for persist chunk ops. + PersistAndUnpin = "persist" + // Pin is the label value for pin chunk ops (excludes pin on creation). + Pin = "pin" + // Unpin is the label value for unpin chunk ops (excludes the unpin on persisting). + Unpin = "unpin" + // Transcode is the label value for transcode chunk ops. + Transcode = "transcode" + // Drop is the label value for drop chunk ops. + Drop = "drop" + + // Op-types for ChunkOps and ChunkDescOps. + + // Evict is the label value for evict chunk desc ops. + Evict = "evict" + // Load is the label value for load chunk and chunk desc ops. + Load = "load" +) + +func init() { + prometheus.MustRegister(Ops) + prometheus.MustRegister(DescOps) + prometheus.MustRegister(NumMemDescs) +} + +// NumMemChunks is the total number of chunks in memory. This is a global +// counter, also used internally, so not implemented as metrics. Collected in +// MemorySeriesStorage. +// TODO(beorn7): Having this as an exported global variable is really bad. +var NumMemChunks int64 diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/prometheus_chunk.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/prometheus_chunk.go new file mode 100644 index 000000000..bbc4d4366 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/prometheus_chunk.go @@ -0,0 +1,140 @@ +package encoding + +import ( + "bytes" + "io" + + "github.com/pkg/errors" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/tsdb/chunkenc" +) + +// Wrapper around Prometheus chunk. +type prometheusXorChunk struct { + chunk chunkenc.Chunk +} + +func newPrometheusXorChunk() *prometheusXorChunk { + return &prometheusXorChunk{} +} + +// Add adds another sample to the chunk. While Add works, it is only implemented +// to make tests work, and should not be used in production. In particular, it appends +// all samples to single chunk, and uses new Appender for each Add. +func (p *prometheusXorChunk) Add(m model.SamplePair) (Chunk, error) { + if p.chunk == nil { + p.chunk = chunkenc.NewXORChunk() + } + + app, err := p.chunk.Appender() + if err != nil { + return nil, err + } + + app.Append(int64(m.Timestamp), float64(m.Value)) + return nil, nil +} + +func (p *prometheusXorChunk) NewIterator(iterator Iterator) Iterator { + if p.chunk == nil { + return errorIterator("Prometheus chunk is not set") + } + + if pit, ok := iterator.(*prometheusChunkIterator); ok { + pit.c = p.chunk + pit.it = p.chunk.Iterator(pit.it) + return pit + } + + return &prometheusChunkIterator{c: p.chunk, it: p.chunk.Iterator(nil)} +} + +func (p *prometheusXorChunk) Marshal(i io.Writer) error { + if p.chunk == nil { + return errors.New("chunk data not set") + } + _, err := i.Write(p.chunk.Bytes()) + return err +} + +func (p *prometheusXorChunk) UnmarshalFromBuf(bytes []byte) error { + c, err := chunkenc.FromData(chunkenc.EncXOR, bytes) + if err != nil { + return errors.Wrap(err, "failed to create Prometheus chunk from bytes") + } + + p.chunk = c + return nil +} + +func (p *prometheusXorChunk) Encoding() Encoding { + return PrometheusXorChunk +} + +func (p *prometheusXorChunk) Len() int { + if p.chunk == nil { + return 0 + } + return p.chunk.NumSamples() +} + +type prometheusChunkIterator struct { + c chunkenc.Chunk // we need chunk, because FindAtOrAfter needs to start with fresh iterator. + it chunkenc.Iterator +} + +func (p *prometheusChunkIterator) Scan() bool { + return p.it.Next() != chunkenc.ValNone +} + +func (p *prometheusChunkIterator) FindAtOrAfter(time model.Time) bool { + // FindAtOrAfter must return OLDEST value at given time. That means we need to start with a fresh iterator, + // otherwise we cannot guarantee OLDEST. + p.it = p.c.Iterator(p.it) + return p.it.Seek(int64(time)) != chunkenc.ValNone +} + +func (p *prometheusChunkIterator) Value() model.SamplePair { + ts, val := p.it.At() + return model.SamplePair{ + Timestamp: model.Time(ts), + Value: model.SampleValue(val), + } +} + +func (p *prometheusChunkIterator) Batch(size int) Batch { + var batch Batch + j := 0 + for j < size { + t, v := p.it.At() + batch.Timestamps[j] = t + batch.Values[j] = v + j++ + if j < size && p.it.Next() == chunkenc.ValNone { + break + } + } + batch.Index = 0 + batch.Length = j + return batch +} + +func (p *prometheusChunkIterator) Err() error { + return p.it.Err() +} + +func (p *prometheusXorChunk) Equals(chunk Chunk) (bool, error) { + po, ok := chunk.(*prometheusXorChunk) + if !ok { + return false, errors.New("other chunk is not a prometheusXorChunk") + } + return bytes.Equal(p.chunk.Bytes(), po.chunk.Bytes()), nil +} + +type errorIterator string + +func (e errorIterator) Scan() bool { return false } +func (e errorIterator) FindAtOrAfter(time model.Time) bool { return false } +func (e errorIterator) Value() model.SamplePair { panic("no values") } +func (e errorIterator) Batch(size int) Batch { panic("no values") } +func (e errorIterator) Err() error { return errors.New(string(e)) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortexpb/cortex.pb.go b/vendor/github.com/cortexproject/cortex/pkg/cortexpb/cortex.pb.go index 4a88a7e7c..311430e19 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/cortexpb/cortex.pb.go +++ b/vendor/github.com/cortexproject/cortex/pkg/cortexpb/cortex.pb.go @@ -510,51 +510,50 @@ func init() { func init() { proto.RegisterFile("cortex.proto", fileDescriptor_893a47d0a749d749) } var fileDescriptor_893a47d0a749d749 = []byte{ - // 697 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0x4f, 0x4f, 0xdb, 0x4a, - 0x10, 0xf7, 0xe6, 0x7f, 0x86, 0x90, 0x67, 0xed, 0x43, 0x7a, 0x16, 0x07, 0x27, 0xf8, 0x5d, 0x72, - 0x78, 0x2f, 0x54, 0x54, 0x6d, 0xd5, 0xaa, 0xaa, 0xe4, 0x54, 0x81, 0x22, 0xc8, 0x1f, 0x6d, 0x9c, - 0xa2, 0xf6, 0x12, 0x6d, 0xc2, 0x02, 0x56, 0xed, 0xd8, 0xb5, 0xd7, 0x88, 0xdc, 0x7a, 0xea, 0xb9, - 0xe7, 0x7e, 0x82, 0x7e, 0x82, 0x4a, 0xfd, 0x06, 0x1c, 0x39, 0xa2, 0x1e, 0x50, 0x31, 0x17, 0x8e, - 0x7c, 0x84, 0xca, 0x6b, 0x27, 0x06, 0x55, 0xdc, 0xb8, 0xcd, 0xcc, 0x6f, 0x7e, 0x33, 0xb3, 0xf3, - 0x1b, 0x2d, 0x54, 0x26, 0x8e, 0xc7, 0xd9, 0x49, 0xd3, 0xf5, 0x1c, 0xee, 0xe0, 0x52, 0xec, 0xb9, - 0xe3, 0xd5, 0xff, 0x0f, 0x4d, 0x7e, 0x14, 0x8c, 0x9b, 0x13, 0xc7, 0x5e, 0x3f, 0x74, 0x0e, 0x9d, - 0x75, 0x91, 0x30, 0x0e, 0x0e, 0x84, 0x27, 0x1c, 0x61, 0xc5, 0x44, 0xed, 0x7b, 0x06, 0x2a, 0x7b, - 0x9e, 0xc9, 0x19, 0x61, 0x1f, 0x03, 0xe6, 0x73, 0xdc, 0x07, 0xe0, 0xa6, 0xcd, 0x7c, 0xe6, 0x99, - 0xcc, 0x57, 0x50, 0x3d, 0xdb, 0x58, 0xda, 0x58, 0x69, 0xce, 0xcb, 0x37, 0x0d, 0xd3, 0x66, 0x03, - 0x81, 0xb5, 0x56, 0x4f, 0x2f, 0x6a, 0xd2, 0xcf, 0x8b, 0x1a, 0xee, 0x7b, 0x8c, 0x5a, 0x96, 0x33, - 0x31, 0x16, 0x3c, 0x72, 0xab, 0x06, 0x7e, 0x0e, 0x85, 0x81, 0x13, 0x78, 0x13, 0xa6, 0x64, 0xea, - 0xa8, 0x51, 0xdd, 0x58, 0x4b, 0xab, 0xdd, 0xee, 0xdc, 0x8c, 0x93, 0xda, 0xd3, 0xc0, 0x26, 0x09, - 0x01, 0xbf, 0x80, 0x92, 0xcd, 0x38, 0xdd, 0xa7, 0x9c, 0x2a, 0x59, 0x31, 0x8a, 0x92, 0x92, 0x3b, - 0x8c, 0x7b, 0xe6, 0xa4, 0x93, 0xe0, 0xad, 0xdc, 0xe9, 0x45, 0x0d, 0x91, 0x45, 0x3e, 0x7e, 0x09, - 0xab, 0xfe, 0x07, 0xd3, 0x1d, 0x59, 0x74, 0xcc, 0xac, 0xd1, 0x94, 0xda, 0x6c, 0x74, 0x4c, 0x2d, - 0x73, 0x9f, 0x72, 0xd3, 0x99, 0x2a, 0xd7, 0xc5, 0x3a, 0x6a, 0x94, 0xc8, 0x3f, 0x51, 0xca, 0x6e, - 0x94, 0xd1, 0xa5, 0x36, 0x7b, 0xbb, 0xc0, 0xb5, 0x1a, 0x40, 0x3a, 0x0f, 0x2e, 0x42, 0x56, 0xef, - 0x6f, 0xcb, 0x12, 0x2e, 0x41, 0x8e, 0x0c, 0x77, 0xdb, 0x32, 0xd2, 0xfe, 0x82, 0xe5, 0x64, 0x7a, - 0xdf, 0x75, 0xa6, 0x3e, 0xd3, 0x7e, 0x20, 0x80, 0x74, 0x3b, 0x58, 0x87, 0x82, 0xe8, 0x3c, 0xdf, - 0xe1, 0xdf, 0xe9, 0xe0, 0xa2, 0x5f, 0x9f, 0x9a, 0x5e, 0x6b, 0x25, 0x59, 0x61, 0x45, 0x84, 0xf4, - 0x7d, 0xea, 0x72, 0xe6, 0x91, 0x84, 0x88, 0x1f, 0x41, 0xd1, 0xa7, 0xb6, 0x6b, 0x31, 0x5f, 0xc9, - 0x88, 0x1a, 0x72, 0x5a, 0x63, 0x20, 0x00, 0xf1, 0x68, 0x89, 0xcc, 0xd3, 0xf0, 0x53, 0x28, 0xb3, - 0x13, 0x66, 0xbb, 0x16, 0xf5, 0xfc, 0x64, 0x61, 0x38, 0xe5, 0xb4, 0x13, 0x28, 0x61, 0xa5, 0xa9, - 0xda, 0x13, 0x28, 0x2f, 0x86, 0xc2, 0x18, 0x72, 0xd1, 0xb6, 0x14, 0x54, 0x47, 0x8d, 0x0a, 0x11, - 0x36, 0x5e, 0x81, 0xfc, 0x31, 0xb5, 0x82, 0x58, 0xc2, 0x0a, 0x89, 0x1d, 0x4d, 0x87, 0x42, 0x3c, - 0x47, 0x8a, 0x47, 0x24, 0x94, 0xe0, 0x78, 0x0d, 0x2a, 0xe2, 0x0e, 0x38, 0xb5, 0xdd, 0x91, 0xed, - 0x0b, 0x72, 0x96, 0x2c, 0x2d, 0x62, 0x1d, 0x5f, 0xfb, 0x9a, 0x81, 0xea, 0x5d, 0x21, 0xf1, 0x33, - 0xc8, 0xf1, 0x99, 0x1b, 0x97, 0xaa, 0x6e, 0xfc, 0x7b, 0x9f, 0xe0, 0x89, 0x6b, 0xcc, 0x5c, 0x46, - 0x04, 0x01, 0xff, 0x07, 0xd8, 0x16, 0xb1, 0xd1, 0x01, 0xb5, 0x4d, 0x6b, 0x26, 0x44, 0x17, 0x4d, - 0xcb, 0x44, 0x8e, 0x91, 0x4d, 0x01, 0x44, 0x5a, 0x47, 0xcf, 0x3c, 0x62, 0x96, 0xab, 0xe4, 0x04, - 0x2e, 0xec, 0x28, 0x16, 0x4c, 0x4d, 0xae, 0xe4, 0xe3, 0x58, 0x64, 0x6b, 0x33, 0x80, 0xb4, 0x13, - 0x5e, 0x82, 0xe2, 0xb0, 0xbb, 0xd3, 0xed, 0xed, 0x75, 0x65, 0x29, 0x72, 0x5e, 0xf7, 0x86, 0x5d, - 0xa3, 0x4d, 0x64, 0x84, 0xcb, 0x90, 0xdf, 0xd2, 0x87, 0x5b, 0x6d, 0x39, 0x83, 0x97, 0xa1, 0xfc, - 0x66, 0x7b, 0x60, 0xf4, 0xb6, 0x88, 0xde, 0x91, 0xb3, 0x18, 0x43, 0x55, 0x20, 0x69, 0x2c, 0x17, - 0x51, 0x07, 0xc3, 0x4e, 0x47, 0x27, 0xef, 0xe4, 0x7c, 0x74, 0x55, 0xdb, 0xdd, 0xcd, 0x9e, 0x5c, - 0xc0, 0x15, 0x28, 0x0d, 0x0c, 0xdd, 0x68, 0x0f, 0xda, 0x86, 0x5c, 0xd4, 0x76, 0xa0, 0x10, 0xb7, - 0x7e, 0x80, 0x6b, 0xd2, 0x3e, 0x23, 0x28, 0xcd, 0x2f, 0xe0, 0x21, 0xae, 0xf3, 0xce, 0x49, 0xdc, - 0x2b, 0x79, 0xf6, 0x0f, 0xc9, 0x5b, 0xaf, 0xce, 0x2e, 0x55, 0xe9, 0xfc, 0x52, 0x95, 0x6e, 0x2e, - 0x55, 0xf4, 0x29, 0x54, 0xd1, 0xb7, 0x50, 0x45, 0xa7, 0xa1, 0x8a, 0xce, 0x42, 0x15, 0xfd, 0x0a, - 0x55, 0x74, 0x1d, 0xaa, 0xd2, 0x4d, 0xa8, 0xa2, 0x2f, 0x57, 0xaa, 0x74, 0x76, 0xa5, 0x4a, 0xe7, - 0x57, 0xaa, 0xf4, 0x7e, 0xf1, 0xc3, 0x8d, 0x0b, 0xe2, 0xe7, 0x7a, 0xfc, 0x3b, 0x00, 0x00, 0xff, - 0xff, 0x04, 0x70, 0x7e, 0x13, 0x02, 0x05, 0x00, 0x00, + // 680 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xcf, 0x6e, 0xd3, 0x4e, + 0x10, 0xf6, 0xe6, 0x7f, 0xa6, 0x6e, 0x7e, 0xd6, 0xfe, 0x22, 0x61, 0xf5, 0xe0, 0xa4, 0xe6, 0x92, + 0x03, 0x0a, 0xa8, 0x08, 0x10, 0x08, 0x21, 0x39, 0xc8, 0x2d, 0x55, 0x9b, 0x3f, 0x5a, 0x3b, 0x54, + 0x70, 0x89, 0xb6, 0xe9, 0x52, 0x2c, 0xec, 0xd8, 0xd8, 0x4e, 0xd5, 0xdc, 0x38, 0x71, 0xe6, 0xcc, + 0x13, 0xf0, 0x04, 0x48, 0xbc, 0x41, 0x8f, 0x3d, 0x56, 0x1c, 0x2a, 0xea, 0x5c, 0x7a, 0xec, 0x23, + 0x20, 0xaf, 0x9d, 0xb8, 0x15, 0xea, 0xad, 0xb7, 0x99, 0xf9, 0xe6, 0x9b, 0x99, 0x9d, 0x6f, 0xb4, + 0x20, 0x8e, 0x5d, 0x3f, 0x64, 0xc7, 0x6d, 0xcf, 0x77, 0x43, 0x17, 0x57, 0x12, 0xcf, 0xdb, 0x5f, + 0xab, 0x1f, 0xba, 0x87, 0x2e, 0x0f, 0x3e, 0x8c, 0xad, 0x04, 0x57, 0x7f, 0xe6, 0x40, 0xdc, 0xf3, + 0xad, 0x90, 0x11, 0xf6, 0x79, 0xca, 0x82, 0x10, 0x0f, 0x00, 0x42, 0xcb, 0x61, 0x01, 0xf3, 0x2d, + 0x16, 0xc8, 0xa8, 0x99, 0x6f, 0xad, 0x6c, 0xd4, 0xdb, 0x8b, 0x2a, 0x6d, 0xd3, 0x72, 0x98, 0xc1, + 0xb1, 0xce, 0xda, 0xc9, 0x79, 0x43, 0xf8, 0x7d, 0xde, 0xc0, 0x03, 0x9f, 0x51, 0xdb, 0x76, 0xc7, + 0xe6, 0x92, 0x47, 0xae, 0xd5, 0xc0, 0xcf, 0xa1, 0x64, 0xb8, 0x53, 0x7f, 0xcc, 0xe4, 0x5c, 0x13, + 0xb5, 0x6a, 0x1b, 0xeb, 0x59, 0xb5, 0xeb, 0x9d, 0xdb, 0x49, 0x92, 0x3e, 0x99, 0x3a, 0x24, 0x25, + 0xe0, 0x17, 0x50, 0x71, 0x58, 0x48, 0x0f, 0x68, 0x48, 0xe5, 0x3c, 0x1f, 0x45, 0xce, 0xc8, 0x5d, + 0x16, 0xfa, 0xd6, 0xb8, 0x9b, 0xe2, 0x9d, 0xc2, 0xc9, 0x79, 0x03, 0x91, 0x65, 0x3e, 0x7e, 0x09, + 0x6b, 0xc1, 0x27, 0xcb, 0x1b, 0xd9, 0x74, 0x9f, 0xd9, 0xa3, 0x09, 0x75, 0xd8, 0xe8, 0x88, 0xda, + 0xd6, 0x01, 0x0d, 0x2d, 0x77, 0x22, 0x5f, 0x96, 0x9b, 0xa8, 0x55, 0x21, 0xf7, 0xe2, 0x94, 0xdd, + 0x38, 0xa3, 0x47, 0x1d, 0xf6, 0x76, 0x89, 0xab, 0x0d, 0x80, 0x6c, 0x1e, 0x5c, 0x86, 0xbc, 0x36, + 0xd8, 0x96, 0x04, 0x5c, 0x81, 0x02, 0x19, 0xee, 0xea, 0x12, 0x52, 0xff, 0x83, 0xd5, 0x74, 0xfa, + 0xc0, 0x73, 0x27, 0x01, 0x53, 0x7f, 0x21, 0x80, 0x6c, 0x3b, 0x58, 0x83, 0x12, 0xef, 0xbc, 0xd8, + 0xe1, 0xff, 0xd9, 0xe0, 0xbc, 0xdf, 0x80, 0x5a, 0x7e, 0xa7, 0x9e, 0xae, 0x50, 0xe4, 0x21, 0xed, + 0x80, 0x7a, 0x21, 0xf3, 0x49, 0x4a, 0xc4, 0x8f, 0xa0, 0x1c, 0x50, 0xc7, 0xb3, 0x59, 0x20, 0xe7, + 0x78, 0x0d, 0x29, 0xab, 0x61, 0x70, 0x80, 0x3f, 0x5a, 0x20, 0x8b, 0x34, 0xfc, 0x14, 0xaa, 0xec, + 0x98, 0x39, 0x9e, 0x4d, 0xfd, 0x20, 0x5d, 0x18, 0xce, 0x38, 0x7a, 0x0a, 0xa5, 0xac, 0x2c, 0x55, + 0x7d, 0x02, 0xd5, 0xe5, 0x50, 0x18, 0x43, 0x21, 0xde, 0x96, 0x8c, 0x9a, 0xa8, 0x25, 0x12, 0x6e, + 0xe3, 0x3a, 0x14, 0x8f, 0xa8, 0x3d, 0x4d, 0x24, 0x14, 0x49, 0xe2, 0xa8, 0x1a, 0x94, 0x92, 0x39, + 0x32, 0x3c, 0x26, 0xa1, 0x14, 0xc7, 0xeb, 0x20, 0xf2, 0x3b, 0x08, 0xa9, 0xe3, 0x8d, 0x9c, 0x80, + 0x93, 0xf3, 0x64, 0x65, 0x19, 0xeb, 0x06, 0xea, 0xf7, 0x1c, 0xd4, 0x6e, 0x0a, 0x89, 0x9f, 0x41, + 0x21, 0x9c, 0x79, 0x49, 0xa9, 0xda, 0xc6, 0xfd, 0xdb, 0x04, 0x4f, 0x5d, 0x73, 0xe6, 0x31, 0xc2, + 0x09, 0xf8, 0x01, 0x60, 0x87, 0xc7, 0x46, 0x1f, 0xa8, 0x63, 0xd9, 0x33, 0x2e, 0x3a, 0x6f, 0x5a, + 0x25, 0x52, 0x82, 0x6c, 0x72, 0x20, 0xd6, 0x3a, 0x7e, 0xe6, 0x47, 0x66, 0x7b, 0x72, 0x81, 0xe3, + 0xdc, 0x8e, 0x63, 0xd3, 0x89, 0x15, 0xca, 0xc5, 0x24, 0x16, 0xdb, 0xea, 0x0c, 0x20, 0xeb, 0x84, + 0x57, 0xa0, 0x3c, 0xec, 0xed, 0xf4, 0xfa, 0x7b, 0x3d, 0x49, 0x88, 0x9d, 0xd7, 0xfd, 0x61, 0xcf, + 0xd4, 0x89, 0x84, 0x70, 0x15, 0x8a, 0x5b, 0xda, 0x70, 0x4b, 0x97, 0x72, 0x78, 0x15, 0xaa, 0x6f, + 0xb6, 0x0d, 0xb3, 0xbf, 0x45, 0xb4, 0xae, 0x94, 0xc7, 0x18, 0x6a, 0x1c, 0xc9, 0x62, 0x85, 0x98, + 0x6a, 0x0c, 0xbb, 0x5d, 0x8d, 0xbc, 0x93, 0x8a, 0xf1, 0x55, 0x6d, 0xf7, 0x36, 0xfb, 0x52, 0x09, + 0x8b, 0x50, 0x31, 0x4c, 0xcd, 0xd4, 0x0d, 0xdd, 0x94, 0xca, 0xea, 0x0e, 0x94, 0x92, 0xd6, 0x77, + 0x70, 0x4d, 0xea, 0x57, 0x04, 0x95, 0xc5, 0x05, 0xdc, 0xc5, 0x75, 0xde, 0x38, 0x89, 0x5b, 0x25, + 0xcf, 0xff, 0x23, 0x79, 0xe7, 0xd5, 0xe9, 0x85, 0x22, 0x9c, 0x5d, 0x28, 0xc2, 0xd5, 0x85, 0x82, + 0xbe, 0x44, 0x0a, 0xfa, 0x11, 0x29, 0xe8, 0x24, 0x52, 0xd0, 0x69, 0xa4, 0xa0, 0x3f, 0x91, 0x82, + 0x2e, 0x23, 0x45, 0xb8, 0x8a, 0x14, 0xf4, 0x6d, 0xae, 0x08, 0xa7, 0x73, 0x45, 0x38, 0x9b, 0x2b, + 0xc2, 0xfb, 0xe5, 0x47, 0xb6, 0x5f, 0xe2, 0x3f, 0xd7, 0xe3, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, + 0x5e, 0x23, 0x91, 0x5d, 0xe9, 0x04, 0x00, 0x00, } func (x WriteRequest_SourceEnum) String() string { diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortexpb/cortex.proto b/vendor/github.com/cortexproject/cortex/pkg/cortexpb/cortex.proto index 632422d03..4d683fbcc 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/cortexpb/cortex.proto +++ b/vendor/github.com/cortexproject/cortex/pkg/cortexpb/cortex.proto @@ -4,7 +4,7 @@ package cortexpb; option go_package = "cortexpb"; -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "gogoproto/gogo.proto"; option (gogoproto.marshaler_all) = true; option (gogoproto.unmarshaler_all) = true; diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortexpb/slicesPool.go b/vendor/github.com/cortexproject/cortex/pkg/cortexpb/slicesPool.go new file mode 100644 index 000000000..e28d51d4f --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/cortexpb/slicesPool.go @@ -0,0 +1,61 @@ +package cortexpb + +import ( + "math" + "sync" +) + +const ( + minPoolSizePower = 5 +) + +type byteSlicePools struct { + pools []sync.Pool +} + +func newSlicePool(pools int) *byteSlicePools { + sp := byteSlicePools{} + sp.init(pools) + return &sp +} + +func (sp *byteSlicePools) init(pools int) { + sp.pools = make([]sync.Pool, pools) + for i := 0; i < pools; i++ { + size := int(math.Pow(2, float64(i+minPoolSizePower))) + sp.pools[i] = sync.Pool{ + New: func() interface{} { + buf := make([]byte, 0, size) + return &buf + }, + } + } +} + +func (sp *byteSlicePools) getSlice(size int) *[]byte { + index := int(math.Ceil(math.Log2(float64(size)))) - minPoolSizePower + + if index >= len(sp.pools) { + buf := make([]byte, size) + return &buf + } + + // if the size is < than the minPoolSizePower we return an array from the first pool + if index < 0 { + index = 0 + } + + s := sp.pools[index].Get().(*[]byte) + *s = (*s)[:size] + return s +} + +func (sp *byteSlicePools) reuseSlice(s *[]byte) { + index := int(math.Floor(math.Log2(float64(cap(*s))))) - minPoolSizePower + + if index >= len(sp.pools) || index < 0 { + return + } + + sp.pools[index].Put(s) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortexpb/timeseries.go b/vendor/github.com/cortexproject/cortex/pkg/cortexpb/timeseries.go index 5f2a9788b..5a2fcaf85 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/cortexpb/timeseries.go +++ b/vendor/github.com/cortexproject/cortex/pkg/cortexpb/timeseries.go @@ -37,6 +37,15 @@ var ( } }, } + + writeRequestPool = sync.Pool{ + New: func() interface{} { + return &PreallocWriteRequest{ + WriteRequest: WriteRequest{}, + } + }, + } + bytePool = newSlicePool(20) ) // PreallocConfig configures how structures will be preallocated to optimise @@ -53,6 +62,7 @@ func (PreallocConfig) RegisterFlags(f *flag.FlagSet) { // PreallocWriteRequest is a WriteRequest which preallocs slices on Unmarshal. type PreallocWriteRequest struct { WriteRequest + data *[]byte } // Unmarshal implements proto.Message. @@ -72,6 +82,32 @@ func (p *PreallocTimeseries) Unmarshal(dAtA []byte) error { return p.TimeSeries.Unmarshal(dAtA) } +func (p *PreallocWriteRequest) Marshal() (dAtA []byte, err error) { + size := p.Size() + p.data = bytePool.getSlice(size) + dAtA = *p.data + n, err := p.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func ReuseWriteRequest(req *PreallocWriteRequest) { + if req.data != nil { + bytePool.reuseSlice(req.data) + req.data = nil + } + req.Source = 0 + req.Metadata = nil + req.Timeseries = nil + writeRequestPool.Put(req) +} + +func PreallocWriteRequestFromPool() *PreallocWriteRequest { + return writeRequestPool.Get().(*PreallocWriteRequest) +} + // LabelAdapter is a labels.Label that can be marshalled to/from protos. type LabelAdapter labels.Label diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/client.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/client.go index 6b017a20e..e6c7e334e 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/client.go @@ -1,15 +1,17 @@ package client import ( + "context" "flag" + "github.com/cortexproject/cortex/pkg/cortexpb" + "github.com/cortexproject/cortex/pkg/util/grpcclient" + "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "google.golang.org/grpc" "google.golang.org/grpc/health/grpc_health_v1" - - "github.com/cortexproject/cortex/pkg/util/grpcclient" ) var ingesterClientRequestDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{ @@ -24,6 +26,7 @@ type HealthAndIngesterClient interface { IngesterClient grpc_health_v1.HealthClient Close() error + PushPreAlloc(ctx context.Context, in *cortexpb.PreallocWriteRequest, opts ...grpc.CallOption) (*cortexpb.WriteResponse, error) } type closableHealthAndIngesterClient struct { @@ -32,6 +35,15 @@ type closableHealthAndIngesterClient struct { conn *grpc.ClientConn } +func (c *closableHealthAndIngesterClient) PushPreAlloc(ctx context.Context, in *cortexpb.PreallocWriteRequest, opts ...grpc.CallOption) (*cortexpb.WriteResponse, error) { + out := new(cortexpb.WriteResponse) + err := c.conn.Invoke(ctx, "/cortex.Ingester/Push", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // MakeIngesterClient makes a new IngesterClient func MakeIngesterClient(addr string, cfg Config) (HealthAndIngesterClient, error) { dialOpts, err := cfg.GRPCClientConfig.DialOption(grpcclient.Instrument(ingesterClientRequestDuration)) diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/custom.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/custom.go index e1c0af148..f174d6704 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/custom.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/custom.go @@ -1,5 +1,11 @@ package client +import ( + "encoding/binary" + + "github.com/cortexproject/cortex/pkg/chunk/encoding" +) + // ChunksCount returns the number of chunks in response. func (m *QueryStreamResponse) ChunksCount() int { if len(m.Chunkseries) == 0 { @@ -27,3 +33,17 @@ func (m *QueryStreamResponse) ChunksSize() int { } return size } + +func (m *QueryStreamResponse) SamplesCount() (count int) { + for _, ts := range m.Timeseries { + count += len(ts.Samples) + } + for _, cs := range m.Chunkseries { + for _, c := range cs.Chunks { + if c.Encoding == int32(encoding.PrometheusXorChunk) { + count += int(binary.BigEndian.Uint16(c.Data)) + } + } + } + return +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/ingester.pb.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/ingester.pb.go index 2a042813f..29e03f900 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/ingester.pb.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/ingester.pb.go @@ -735,6 +735,7 @@ type UserStatsResponse struct { NumSeries uint64 `protobuf:"varint,2,opt,name=num_series,json=numSeries,proto3" json:"num_series,omitempty"` ApiIngestionRate float64 `protobuf:"fixed64,3,opt,name=api_ingestion_rate,json=apiIngestionRate,proto3" json:"api_ingestion_rate,omitempty"` RuleIngestionRate float64 `protobuf:"fixed64,4,opt,name=rule_ingestion_rate,json=ruleIngestionRate,proto3" json:"rule_ingestion_rate,omitempty"` + ActiveSeries uint64 `protobuf:"varint,5,opt,name=active_series,json=activeSeries,proto3" json:"active_series,omitempty"` } func (m *UserStatsResponse) Reset() { *m = UserStatsResponse{} } @@ -797,6 +798,13 @@ func (m *UserStatsResponse) GetRuleIngestionRate() float64 { return 0 } +func (m *UserStatsResponse) GetActiveSeries() uint64 { + if m != nil { + return m.ActiveSeries + } + return 0 +} + type UserIDStatsResponse struct { UserId string `protobuf:"bytes,1,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` Data *UserStatsResponse `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` @@ -1444,88 +1452,89 @@ func init() { func init() { proto.RegisterFile("ingester.proto", fileDescriptor_60f6df4f3586b478) } var fileDescriptor_60f6df4f3586b478 = []byte{ - // 1296 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0xcf, 0x73, 0xd3, 0xc6, - 0x17, 0xd7, 0xc6, 0x3f, 0xb0, 0x9f, 0x1d, 0xe3, 0x6c, 0x80, 0x18, 0xf1, 0x45, 0x01, 0xcd, 0xf0, - 0x6d, 0xa6, 0x2d, 0x0e, 0xa4, 0x3f, 0x06, 0xfa, 0x8b, 0x71, 0x20, 0x40, 0x0a, 0x21, 0x20, 0x07, - 0xda, 0xe9, 0x4c, 0x47, 0x23, 0xdb, 0x8b, 0xa3, 0xa2, 0x5f, 0x48, 0xab, 0x0e, 0xf4, 0xd4, 0x99, - 0xfe, 0x01, 0xed, 0xf4, 0xd4, 0x53, 0x67, 0x7a, 0xeb, 0xb9, 0x97, 0xde, 0x7a, 0xe6, 0xc8, 0x91, - 0xe9, 0x81, 0x29, 0xe6, 0xd2, 0x23, 0xfd, 0x0f, 0x3a, 0x5a, 0xad, 0x64, 0x49, 0x91, 0x13, 0x67, - 0x86, 0xf4, 0x66, 0xbd, 0xf7, 0x79, 0x9f, 0xfd, 0xec, 0xbe, 0xb7, 0xfb, 0x9e, 0xa1, 0xa1, 0x5b, - 0x43, 0xe2, 0x51, 0xe2, 0xb6, 0x1d, 0xd7, 0xa6, 0x36, 0x2e, 0xf7, 0x6d, 0x97, 0x92, 0x47, 0xe2, - 0xd9, 0xa1, 0x4e, 0xb7, 0xfd, 0x5e, 0xbb, 0x6f, 0x9b, 0xcb, 0x43, 0x7b, 0x68, 0x2f, 0x33, 0x77, - 0xcf, 0xbf, 0xcf, 0xbe, 0xd8, 0x07, 0xfb, 0x15, 0x86, 0x89, 0x17, 0x13, 0xf0, 0x90, 0xc1, 0x71, - 0xed, 0xaf, 0x48, 0x9f, 0xf2, 0xaf, 0x65, 0xe7, 0xc1, 0x30, 0x72, 0xf4, 0xf8, 0x8f, 0x30, 0x54, - 0xfe, 0x18, 0x6a, 0x0a, 0xd1, 0x06, 0x0a, 0x79, 0xe8, 0x13, 0x8f, 0xe2, 0x36, 0x1c, 0x7a, 0xe8, - 0x13, 0x57, 0x27, 0x5e, 0x0b, 0x9d, 0x2a, 0x2c, 0xd5, 0x56, 0x8e, 0xb4, 0x39, 0xfc, 0x8e, 0x4f, - 0xdc, 0xc7, 0x1c, 0xa6, 0x44, 0x20, 0xf9, 0x12, 0xd4, 0xc3, 0x70, 0xcf, 0xb1, 0x2d, 0x8f, 0xe0, - 0x65, 0x38, 0xe4, 0x12, 0xcf, 0x37, 0x68, 0x14, 0x7f, 0x34, 0x13, 0x1f, 0xe2, 0x94, 0x08, 0x25, - 0xff, 0x84, 0xa0, 0x9e, 0xa4, 0xc6, 0x6f, 0x03, 0xf6, 0xa8, 0xe6, 0x52, 0x95, 0xea, 0x26, 0xf1, - 0xa8, 0x66, 0x3a, 0xaa, 0x19, 0x90, 0xa1, 0xa5, 0x82, 0xd2, 0x64, 0x9e, 0xad, 0xc8, 0xb1, 0xe1, - 0xe1, 0x25, 0x68, 0x12, 0x6b, 0x90, 0xc6, 0xce, 0x30, 0x6c, 0x83, 0x58, 0x83, 0x24, 0xf2, 0x1c, - 0x54, 0x4c, 0x8d, 0xf6, 0xb7, 0x89, 0xeb, 0xb5, 0x0a, 0xe9, 0xad, 0xdd, 0xd4, 0x7a, 0xc4, 0xd8, - 0x08, 0x9d, 0x4a, 0x8c, 0x92, 0x7f, 0x41, 0x70, 0x64, 0xed, 0x11, 0x31, 0x1d, 0x43, 0x73, 0xff, - 0x13, 0x89, 0xe7, 0x77, 0x48, 0x3c, 0x9a, 0x27, 0xd1, 0x4b, 0x68, 0xbc, 0x01, 0xb3, 0xa9, 0x83, - 0xc5, 0x1f, 0x00, 0xb0, 0x95, 0xf2, 0x72, 0xe8, 0xf4, 0xda, 0xc1, 0x72, 0x5d, 0xe6, 0x5b, 0x2d, - 0x3e, 0x79, 0xbe, 0x28, 0x28, 0x09, 0xb4, 0xfc, 0x23, 0x82, 0x79, 0xc6, 0xd6, 0xa5, 0x2e, 0xd1, - 0xcc, 0x98, 0xf3, 0x12, 0xd4, 0xfa, 0xdb, 0xbe, 0xf5, 0x20, 0x45, 0xba, 0x10, 0x49, 0x1b, 0x53, - 0x5e, 0x0e, 0x40, 0x9c, 0x37, 0x19, 0x91, 0x11, 0x35, 0xb3, 0x2f, 0x51, 0x5d, 0x38, 0x9a, 0x49, - 0xc2, 0x6b, 0xd8, 0xe9, 0x1f, 0x08, 0x30, 0x3b, 0xd2, 0x7b, 0x9a, 0xe1, 0x13, 0x2f, 0x4a, 0xec, - 0x49, 0x00, 0x23, 0xb0, 0xaa, 0x96, 0x66, 0x12, 0x96, 0xd0, 0xaa, 0x52, 0x65, 0x96, 0x5b, 0x9a, - 0x49, 0x26, 0xe4, 0x7d, 0x66, 0x1f, 0x79, 0x2f, 0xec, 0x99, 0xf7, 0xe2, 0x29, 0x34, 0x4d, 0xde, - 0x2f, 0xc0, 0x7c, 0x4a, 0x3f, 0x3f, 0x93, 0xd3, 0x50, 0x0f, 0x37, 0xf0, 0x35, 0xb3, 0xb3, 0x53, - 0xa9, 0x2a, 0x35, 0x63, 0x0c, 0x95, 0x3f, 0x81, 0xe3, 0x89, 0xc8, 0x4c, 0xa6, 0xa7, 0x88, 0x7f, - 0x00, 0x73, 0x37, 0xa3, 0x13, 0xf1, 0x0e, 0xf8, 0x46, 0xc8, 0xef, 0xf1, 0x34, 0xf1, 0xc5, 0xb8, - 0xca, 0x45, 0xa8, 0x8d, 0xd3, 0x14, 0x89, 0x84, 0x38, 0x4f, 0x9e, 0xfc, 0x21, 0xb4, 0xc6, 0x61, - 0x99, 0x2d, 0xee, 0x19, 0x8c, 0xa1, 0x79, 0xd7, 0x23, 0x6e, 0x97, 0x6a, 0x34, 0xda, 0x9f, 0xfc, - 0x3b, 0x82, 0xb9, 0x84, 0x91, 0x53, 0x9d, 0x89, 0xde, 0x6f, 0xdd, 0xb6, 0x54, 0x57, 0xa3, 0x61, - 0xc9, 0x20, 0x65, 0x36, 0xb6, 0x2a, 0x1a, 0x25, 0x41, 0x55, 0x59, 0xbe, 0xa9, 0xc6, 0xd5, 0x8f, - 0x96, 0x8a, 0x4a, 0xd5, 0xf2, 0xcd, 0xb0, 0x3a, 0x83, 0xb3, 0xd3, 0x1c, 0x5d, 0xcd, 0x30, 0x15, - 0x18, 0x53, 0x53, 0x73, 0xf4, 0xf5, 0x14, 0x59, 0x1b, 0xe6, 0x5d, 0xdf, 0x20, 0x59, 0x78, 0x91, - 0xc1, 0xe7, 0x02, 0x57, 0x0a, 0x2f, 0x7f, 0x09, 0xf3, 0x81, 0xf0, 0xf5, 0x2b, 0x69, 0xe9, 0x0b, - 0x70, 0xc8, 0xf7, 0x88, 0xab, 0xea, 0x03, 0x5e, 0xe6, 0xe5, 0xe0, 0x73, 0x7d, 0x80, 0xcf, 0x42, - 0x71, 0xa0, 0x51, 0x8d, 0xc9, 0xac, 0xad, 0x1c, 0x8f, 0xea, 0x70, 0xc7, 0xe6, 0x15, 0x06, 0x93, - 0xaf, 0x01, 0x0e, 0x5c, 0x5e, 0x9a, 0xfd, 0x3c, 0x94, 0xbc, 0xc0, 0xc0, 0x6f, 0xe5, 0x89, 0x24, - 0x4b, 0x46, 0x89, 0x12, 0x22, 0xe5, 0xdf, 0x10, 0x48, 0x1b, 0x84, 0xba, 0x7a, 0xdf, 0xbb, 0x6a, - 0xbb, 0xe9, 0xb2, 0x3f, 0xe0, 0x67, 0xf7, 0x02, 0xd4, 0xa3, 0x7b, 0xa5, 0x7a, 0x84, 0xee, 0xfe, - 0xf4, 0xd6, 0x22, 0x68, 0x97, 0x50, 0xf9, 0x06, 0x2c, 0x4e, 0xd4, 0xcc, 0x8f, 0x62, 0x09, 0xca, - 0x26, 0x83, 0xf0, 0xb3, 0x68, 0x8e, 0x5f, 0xa8, 0x30, 0x54, 0xe1, 0x7e, 0xf9, 0x0e, 0x9c, 0x99, - 0x40, 0x96, 0xa9, 0xe0, 0xe9, 0x29, 0x5b, 0x70, 0x8c, 0x53, 0x6e, 0x10, 0xaa, 0x05, 0x09, 0x8b, - 0x0a, 0x7a, 0x13, 0x16, 0x76, 0x78, 0x38, 0xfd, 0xbb, 0x50, 0x31, 0xb9, 0x8d, 0x2f, 0xd0, 0xca, - 0x2e, 0x10, 0xc7, 0xc4, 0x48, 0xf9, 0x1f, 0x04, 0x87, 0x33, 0x9d, 0x20, 0x48, 0xc1, 0x7d, 0xd7, - 0x36, 0xd5, 0x68, 0xc8, 0x19, 0x57, 0x5b, 0x23, 0xb0, 0xaf, 0x73, 0xf3, 0xfa, 0x20, 0x59, 0x8e, - 0x33, 0xa9, 0x72, 0xb4, 0xa0, 0xcc, 0xae, 0x66, 0xd4, 0x10, 0xe7, 0xc7, 0x52, 0xd8, 0x11, 0xdd, - 0xd6, 0x74, 0x77, 0xb5, 0x13, 0xbc, 0xef, 0x7f, 0x3e, 0x5f, 0xdc, 0xd7, 0x18, 0x14, 0xc6, 0x77, - 0x06, 0x9a, 0x43, 0x89, 0xab, 0xf0, 0x55, 0xf0, 0x5b, 0x50, 0x0e, 0x1b, 0x57, 0xab, 0xc8, 0xd6, - 0x9b, 0x8d, 0xaa, 0x20, 0xd9, 0xdb, 0x38, 0x44, 0xfe, 0x1e, 0x41, 0x29, 0xdc, 0xe9, 0x41, 0x95, - 0xa6, 0x08, 0x15, 0x62, 0xf5, 0xed, 0x81, 0x6e, 0x0d, 0xd9, 0x8b, 0x50, 0x52, 0xe2, 0x6f, 0x8c, - 0xf9, 0x4d, 0x0d, 0xae, 0x7e, 0x9d, 0x5f, 0xc7, 0x0e, 0xcc, 0xa6, 0x2a, 0x27, 0x35, 0xf5, 0xa0, - 0xa9, 0xa6, 0x1e, 0x15, 0xea, 0x49, 0x0f, 0x3e, 0x03, 0x45, 0xfa, 0xd8, 0x09, 0x9f, 0xb6, 0xc6, - 0xca, 0x5c, 0x14, 0xcd, 0xdc, 0x5b, 0x8f, 0x1d, 0xa2, 0x30, 0x77, 0xa0, 0x86, 0x35, 0xcd, 0x30, - 0x7d, 0xec, 0x37, 0x3e, 0x02, 0x25, 0xd6, 0x47, 0x98, 0xf4, 0xaa, 0x12, 0x7e, 0xc8, 0xdf, 0x21, - 0x68, 0x8c, 0x2b, 0xe5, 0xaa, 0x6e, 0x90, 0xd7, 0x51, 0x28, 0x22, 0x54, 0xee, 0xeb, 0x06, 0x61, - 0x1a, 0xc2, 0xe5, 0xe2, 0xef, 0xbc, 0x93, 0x7a, 0xf3, 0x53, 0xa8, 0xc6, 0x5b, 0xc0, 0x55, 0x28, - 0xad, 0xdd, 0xb9, 0xdb, 0xb9, 0xd9, 0x14, 0xf0, 0x2c, 0x54, 0x6f, 0x6d, 0x6e, 0xa9, 0xe1, 0x27, - 0xc2, 0x87, 0xa1, 0xa6, 0xac, 0x5d, 0x5b, 0xfb, 0x5c, 0xdd, 0xe8, 0x6c, 0x5d, 0xbe, 0xde, 0x9c, - 0xc1, 0x18, 0x1a, 0xa1, 0xe1, 0xd6, 0x26, 0xb7, 0x15, 0x56, 0x7e, 0xae, 0x40, 0x25, 0xd2, 0x88, - 0x2f, 0x42, 0xf1, 0xb6, 0xef, 0x6d, 0xe3, 0x63, 0xe3, 0x4a, 0xfd, 0xcc, 0xd5, 0x29, 0xe1, 0x37, - 0x4f, 0x5c, 0xd8, 0x61, 0x0f, 0xef, 0x9d, 0x2c, 0xe0, 0xf7, 0xa1, 0xc4, 0x46, 0x1c, 0x9c, 0x3b, - 0x74, 0x8b, 0xf9, 0xa3, 0xb4, 0x2c, 0xe0, 0x2b, 0x50, 0x4b, 0x8c, 0x6d, 0x13, 0xa2, 0x4f, 0xa4, - 0xac, 0xe9, 0x27, 0x45, 0x16, 0xce, 0x21, 0xbc, 0x09, 0x0d, 0xe6, 0x8a, 0xa6, 0x2d, 0x0f, 0xff, - 0x2f, 0x0a, 0xc9, 0x9b, 0x82, 0xc5, 0x93, 0x13, 0xbc, 0xb1, 0xac, 0xeb, 0x50, 0x4b, 0x4c, 0x1a, - 0x58, 0x4c, 0x15, 0x5e, 0x6a, 0xf0, 0x1a, 0x8b, 0xcb, 0x19, 0x6a, 0x64, 0x01, 0xdf, 0xe3, 0x33, - 0x47, 0x72, 0x66, 0xd9, 0x95, 0xef, 0x74, 0x8e, 0x2f, 0x67, 0xcb, 0x6b, 0x00, 0xe3, 0x39, 0x01, - 0x1f, 0x4f, 0x05, 0x25, 0xe7, 0x1b, 0x51, 0xcc, 0x73, 0xc5, 0xf2, 0xba, 0xd0, 0xcc, 0x8e, 0x1b, - 0xbb, 0x91, 0x9d, 0xda, 0xe9, 0xca, 0xd1, 0xb6, 0x0a, 0xd5, 0xb8, 0xe9, 0xe2, 0x56, 0x4e, 0x1f, - 0x0e, 0xc9, 0x26, 0x77, 0x68, 0x59, 0xc0, 0x57, 0xa1, 0xde, 0x31, 0x8c, 0x69, 0x68, 0xc4, 0xa4, - 0xc7, 0xcb, 0xf2, 0x18, 0x71, 0xb7, 0xc8, 0xb6, 0x26, 0xfc, 0xff, 0xf8, 0x41, 0xd8, 0xb5, 0x79, - 0x8b, 0x6f, 0xec, 0x89, 0x8b, 0x57, 0xfb, 0x06, 0x4e, 0xee, 0xda, 0x08, 0xa7, 0x5e, 0xf3, 0xec, - 0x1e, 0xb8, 0x9c, 0x53, 0xdf, 0x82, 0xc3, 0x99, 0xbe, 0x88, 0xa5, 0x0c, 0x4b, 0xa6, 0x95, 0x8a, - 0x8b, 0x13, 0xfd, 0x11, 0xef, 0xea, 0x47, 0x4f, 0x5f, 0x48, 0xc2, 0xb3, 0x17, 0x92, 0xf0, 0xea, - 0x85, 0x84, 0xbe, 0x1d, 0x49, 0xe8, 0xd7, 0x91, 0x84, 0x9e, 0x8c, 0x24, 0xf4, 0x74, 0x24, 0xa1, - 0xbf, 0x46, 0x12, 0xfa, 0x7b, 0x24, 0x09, 0xaf, 0x46, 0x12, 0xfa, 0xe1, 0xa5, 0x24, 0x3c, 0x7d, - 0x29, 0x09, 0xcf, 0x5e, 0x4a, 0xc2, 0x17, 0xe5, 0xbe, 0xa1, 0x13, 0x8b, 0xf6, 0xca, 0xec, 0x9f, - 0xfa, 0x3b, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x93, 0x83, 0xdf, 0x59, 0x2d, 0x10, 0x00, 0x00, + // 1304 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0xcf, 0x6f, 0xd4, 0xd6, + 0x13, 0xf7, 0x4b, 0x76, 0x97, 0xec, 0xec, 0x66, 0xd9, 0xbc, 0x04, 0xb2, 0x98, 0x2f, 0x0e, 0xf8, + 0x2b, 0xda, 0xa8, 0x2d, 0x09, 0xa4, 0x3f, 0x04, 0xfd, 0x85, 0x12, 0x08, 0x90, 0x42, 0x08, 0x38, + 0x81, 0x56, 0x95, 0x2a, 0xcb, 0xd9, 0x7d, 0x24, 0x2e, 0xfe, 0x85, 0xfd, 0x8c, 0xa0, 0xa7, 0x4a, + 0xfd, 0x03, 0x5a, 0xf5, 0xd4, 0x53, 0xa5, 0xde, 0x7a, 0xee, 0x1f, 0xd0, 0x33, 0x47, 0x8e, 0xa8, + 0xaa, 0x50, 0x59, 0x2e, 0x3d, 0xd2, 0xff, 0xa0, 0xf2, 0xfb, 0xe1, 0xb5, 0x1d, 0x6f, 0x12, 0x24, + 0xe8, 0x6d, 0x3d, 0xf3, 0x99, 0xcf, 0xcc, 0x9b, 0x99, 0xf7, 0x66, 0x16, 0x5a, 0xb6, 0xb7, 0x45, + 0x22, 0x4a, 0xc2, 0xb9, 0x20, 0xf4, 0xa9, 0x8f, 0x6b, 0x5d, 0x3f, 0xa4, 0xe4, 0x81, 0x3a, 0xb5, + 0xe5, 0x6f, 0xf9, 0x4c, 0x34, 0x9f, 0xfc, 0xe2, 0x5a, 0xf5, 0xdc, 0x96, 0x4d, 0xb7, 0xe3, 0xcd, + 0xb9, 0xae, 0xef, 0xce, 0x73, 0x60, 0x10, 0xfa, 0x5f, 0x93, 0x2e, 0x15, 0x5f, 0xf3, 0xc1, 0xdd, + 0x2d, 0xa9, 0xd8, 0x14, 0x3f, 0xb8, 0xa9, 0xfe, 0x09, 0x34, 0x0c, 0x62, 0xf5, 0x0c, 0x72, 0x2f, + 0x26, 0x11, 0xc5, 0x73, 0x70, 0xe0, 0x5e, 0x4c, 0x42, 0x9b, 0x44, 0x1d, 0x74, 0x7c, 0x74, 0xb6, + 0xb1, 0x30, 0x35, 0x27, 0xe0, 0x37, 0x63, 0x12, 0x3e, 0x14, 0x30, 0x43, 0x82, 0xf4, 0xf3, 0xd0, + 0xe4, 0xe6, 0x51, 0xe0, 0x7b, 0x11, 0xc1, 0xf3, 0x70, 0x20, 0x24, 0x51, 0xec, 0x50, 0x69, 0x7f, + 0xa8, 0x60, 0xcf, 0x71, 0x86, 0x44, 0xe9, 0x3f, 0x21, 0x68, 0x66, 0xa9, 0xf1, 0x3b, 0x80, 0x23, + 0x6a, 0x85, 0xd4, 0xa4, 0xb6, 0x4b, 0x22, 0x6a, 0xb9, 0x81, 0xe9, 0x26, 0x64, 0x68, 0x76, 0xd4, + 0x68, 0x33, 0xcd, 0x86, 0x54, 0xac, 0x46, 0x78, 0x16, 0xda, 0xc4, 0xeb, 0xe5, 0xb1, 0x23, 0x0c, + 0xdb, 0x22, 0x5e, 0x2f, 0x8b, 0x3c, 0x0d, 0x63, 0xae, 0x45, 0xbb, 0xdb, 0x24, 0x8c, 0x3a, 0xa3, + 0xf9, 0xa3, 0x5d, 0xb3, 0x36, 0x89, 0xb3, 0xca, 0x95, 0x46, 0x8a, 0xd2, 0x7f, 0x41, 0x30, 0xb5, + 0xfc, 0x80, 0xb8, 0x81, 0x63, 0x85, 0xff, 0x49, 0x88, 0x67, 0x76, 0x84, 0x78, 0xa8, 0x2c, 0xc4, + 0x28, 0x13, 0xe3, 0x55, 0x18, 0xcf, 0x25, 0x16, 0x7f, 0x08, 0xc0, 0x3c, 0x95, 0xd5, 0x30, 0xd8, + 0x9c, 0x4b, 0xdc, 0xad, 0x33, 0xdd, 0x52, 0xe5, 0xd1, 0xd3, 0x19, 0xc5, 0xc8, 0xa0, 0xf5, 0x1f, + 0x11, 0x4c, 0x32, 0xb6, 0x75, 0x1a, 0x12, 0xcb, 0x4d, 0x39, 0xcf, 0x43, 0xa3, 0xbb, 0x1d, 0x7b, + 0x77, 0x73, 0xa4, 0xd3, 0x32, 0xb4, 0x01, 0xe5, 0x85, 0x04, 0x24, 0x78, 0xb3, 0x16, 0x85, 0xa0, + 0x46, 0x5e, 0x2a, 0xa8, 0x75, 0x38, 0x54, 0x28, 0xc2, 0x2b, 0x38, 0xe9, 0xef, 0x08, 0x30, 0x4b, + 0xe9, 0x6d, 0xcb, 0x89, 0x49, 0x24, 0x0b, 0x7b, 0x0c, 0xc0, 0x49, 0xa4, 0xa6, 0x67, 0xb9, 0x84, + 0x15, 0xb4, 0x6e, 0xd4, 0x99, 0xe4, 0xba, 0xe5, 0x92, 0x21, 0x75, 0x1f, 0x79, 0x89, 0xba, 0x8f, + 0xee, 0x59, 0xf7, 0xca, 0x71, 0xb4, 0x9f, 0xba, 0x9f, 0x85, 0xc9, 0x5c, 0xfc, 0x22, 0x27, 0x27, + 0xa0, 0xc9, 0x0f, 0x70, 0x9f, 0xc9, 0x59, 0x56, 0xea, 0x46, 0xc3, 0x19, 0x40, 0xf5, 0x4f, 0xe1, + 0x48, 0xc6, 0xb2, 0x50, 0xe9, 0x7d, 0xd8, 0xdf, 0x85, 0x89, 0x6b, 0x32, 0x23, 0xd1, 0x6b, 0xbe, + 0x11, 0xfa, 0xfb, 0xa2, 0x4c, 0xc2, 0x99, 0x88, 0x72, 0x06, 0x1a, 0x83, 0x32, 0xc9, 0x20, 0x21, + 0xad, 0x53, 0xa4, 0x7f, 0x04, 0x9d, 0x81, 0x59, 0xe1, 0x88, 0x7b, 0x1a, 0x63, 0x68, 0xdf, 0x8a, + 0x48, 0xb8, 0x4e, 0x2d, 0x2a, 0xcf, 0xa7, 0xff, 0x89, 0x60, 0x22, 0x23, 0x14, 0x54, 0x27, 0xe5, + 0x33, 0x6d, 0xfb, 0x9e, 0x19, 0x5a, 0x94, 0xb7, 0x0c, 0x32, 0xc6, 0x53, 0xa9, 0x61, 0x51, 0x92, + 0x74, 0x95, 0x17, 0xbb, 0x66, 0xda, 0xfd, 0x68, 0xb6, 0x62, 0xd4, 0xbd, 0xd8, 0xe5, 0xdd, 0x99, + 0xe4, 0xce, 0x0a, 0x6c, 0xb3, 0xc0, 0x34, 0xca, 0x98, 0xda, 0x56, 0x60, 0xaf, 0xe4, 0xc8, 0xe6, + 0x60, 0x32, 0x8c, 0x1d, 0x52, 0x84, 0x57, 0x18, 0x7c, 0x22, 0x51, 0xe5, 0xf1, 0xff, 0x87, 0x71, + 0xab, 0x4b, 0xed, 0xfb, 0x44, 0xfa, 0xaf, 0x32, 0xff, 0x4d, 0x2e, 0xe4, 0x21, 0xe8, 0x5f, 0xc1, + 0x64, 0x72, 0xba, 0x95, 0x8b, 0xf9, 0xf3, 0x4d, 0xc3, 0x81, 0x38, 0x22, 0xa1, 0x69, 0xf7, 0xc4, + 0x5d, 0xa8, 0x25, 0x9f, 0x2b, 0x3d, 0x7c, 0x0a, 0x2a, 0x3d, 0x8b, 0x5a, 0xec, 0x2c, 0x8d, 0x85, + 0x23, 0xb2, 0x59, 0x77, 0x64, 0xc8, 0x60, 0x30, 0xfd, 0x32, 0xe0, 0x44, 0x15, 0xe5, 0xd9, 0xcf, + 0x40, 0x35, 0x4a, 0x04, 0xe2, 0xea, 0x1e, 0xcd, 0xb2, 0x14, 0x22, 0x31, 0x38, 0x52, 0xff, 0x0d, + 0x81, 0xb6, 0x4a, 0x68, 0x68, 0x77, 0xa3, 0x4b, 0x7e, 0x98, 0xbf, 0x1b, 0xaf, 0xf9, 0x6d, 0x3e, + 0x0b, 0x4d, 0x79, 0xf9, 0xcc, 0x88, 0xd0, 0xdd, 0xdf, 0xe7, 0x86, 0x84, 0xae, 0x13, 0xaa, 0x5f, + 0x85, 0x99, 0xa1, 0x31, 0x8b, 0x54, 0xcc, 0x42, 0xcd, 0x65, 0x10, 0x91, 0x8b, 0xf6, 0xe0, 0x19, + 0xe3, 0xa6, 0x86, 0xd0, 0xeb, 0x37, 0xe1, 0xe4, 0x10, 0xb2, 0x42, 0x9b, 0xef, 0x9f, 0xb2, 0x03, + 0x87, 0x05, 0xe5, 0x2a, 0xa1, 0x56, 0x52, 0x30, 0xd9, 0xf5, 0x6b, 0x30, 0xbd, 0x43, 0x23, 0xe8, + 0xdf, 0x83, 0x31, 0x57, 0xc8, 0x84, 0x83, 0x4e, 0xd1, 0x41, 0x6a, 0x93, 0x22, 0xf5, 0x7f, 0x10, + 0x1c, 0x2c, 0x8c, 0x8b, 0xa4, 0x04, 0x77, 0x42, 0xdf, 0x35, 0xe5, 0xc2, 0x33, 0xe8, 0xb6, 0x56, + 0x22, 0x5f, 0x11, 0xe2, 0x95, 0x5e, 0xb6, 0x1d, 0x47, 0x72, 0xed, 0xe8, 0x41, 0x8d, 0xdd, 0x5f, + 0x39, 0x35, 0x27, 0x07, 0xa1, 0xb0, 0x14, 0xdd, 0xb0, 0xec, 0x70, 0x69, 0x31, 0x19, 0x02, 0x7f, + 0x3c, 0x9d, 0x79, 0xa9, 0x5d, 0x89, 0xdb, 0x2f, 0xf6, 0xac, 0x80, 0x92, 0xd0, 0x10, 0x5e, 0xf0, + 0xdb, 0x50, 0xe3, 0xd3, 0xad, 0x53, 0x61, 0xfe, 0xc6, 0x65, 0x17, 0x64, 0x07, 0xa0, 0x80, 0xe8, + 0xdf, 0x23, 0xa8, 0xf2, 0x93, 0xbe, 0xae, 0xd6, 0x54, 0x61, 0x8c, 0x78, 0x5d, 0xbf, 0x67, 0x7b, + 0x5b, 0xec, 0xd9, 0xa8, 0x1a, 0xe9, 0x37, 0xc6, 0xe2, 0xa6, 0x26, 0xef, 0x43, 0x53, 0x5c, 0xc7, + 0x45, 0x18, 0xcf, 0x75, 0x4e, 0x6e, 0x35, 0x42, 0xfb, 0x5a, 0x8d, 0x4c, 0x68, 0x66, 0x35, 0xf8, + 0x24, 0x54, 0xe8, 0xc3, 0x80, 0xbf, 0x7f, 0xad, 0x85, 0x09, 0x69, 0xcd, 0xd4, 0x1b, 0x0f, 0x03, + 0x62, 0x30, 0x75, 0x12, 0x0d, 0x9b, 0xac, 0xbc, 0x7c, 0xec, 0x37, 0x9e, 0x82, 0x2a, 0x1b, 0x36, + 0x2c, 0xf4, 0xba, 0xc1, 0x3f, 0xf4, 0xef, 0x10, 0xb4, 0x06, 0x9d, 0x72, 0xc9, 0x76, 0xc8, 0xab, + 0x68, 0x14, 0x15, 0xc6, 0xee, 0xd8, 0x0e, 0x61, 0x31, 0x70, 0x77, 0xe9, 0x77, 0x59, 0xa6, 0xde, + 0xfa, 0x0c, 0xea, 0xe9, 0x11, 0x70, 0x1d, 0xaa, 0xcb, 0x37, 0x6f, 0x2d, 0x5e, 0x6b, 0x2b, 0x78, + 0x1c, 0xea, 0xd7, 0xd7, 0x36, 0x4c, 0xfe, 0x89, 0xf0, 0x41, 0x68, 0x18, 0xcb, 0x97, 0x97, 0xbf, + 0x30, 0x57, 0x17, 0x37, 0x2e, 0x5c, 0x69, 0x8f, 0x60, 0x0c, 0x2d, 0x2e, 0xb8, 0xbe, 0x26, 0x64, + 0xa3, 0x0b, 0x3f, 0x8f, 0xc1, 0x98, 0x8c, 0x11, 0x9f, 0x83, 0xca, 0x8d, 0x38, 0xda, 0xc6, 0x87, + 0x07, 0x9d, 0xfa, 0x79, 0x68, 0x53, 0x22, 0x6e, 0x9e, 0x3a, 0xbd, 0x43, 0xce, 0xef, 0x9d, 0xae, + 0xe0, 0x0f, 0xa0, 0xca, 0xf6, 0x20, 0x5c, 0xba, 0x99, 0xab, 0xe5, 0xfb, 0xb6, 0xae, 0xe0, 0x8b, + 0xd0, 0xc8, 0xec, 0x76, 0x43, 0xac, 0x8f, 0xe6, 0xa4, 0xf9, 0x27, 0x45, 0x57, 0x4e, 0x23, 0xbc, + 0x06, 0x2d, 0xa6, 0x92, 0x2b, 0x59, 0x84, 0xff, 0x27, 0x4d, 0xca, 0x56, 0x65, 0xf5, 0xd8, 0x10, + 0x6d, 0x1a, 0xd6, 0x15, 0x68, 0x64, 0xd6, 0x11, 0xac, 0xe6, 0x1a, 0x2f, 0xb7, 0x9d, 0x0d, 0x82, + 0x2b, 0xd9, 0x7c, 0x74, 0x05, 0xdf, 0x16, 0x8b, 0x49, 0x76, 0xb1, 0xd9, 0x95, 0xef, 0x44, 0x89, + 0xae, 0xe4, 0xc8, 0xcb, 0x00, 0x83, 0x65, 0x02, 0x1f, 0xc9, 0x19, 0x65, 0x97, 0x20, 0x55, 0x2d, + 0x53, 0xa5, 0xe1, 0xad, 0x43, 0xbb, 0xb8, 0x93, 0xec, 0x46, 0x76, 0x7c, 0xa7, 0xaa, 0x24, 0xb6, + 0x25, 0xa8, 0xa7, 0x43, 0x17, 0x77, 0x4a, 0xe6, 0x30, 0x27, 0x1b, 0x3e, 0xa1, 0x75, 0x05, 0x5f, + 0x82, 0xe6, 0xa2, 0xe3, 0xec, 0x87, 0x46, 0xcd, 0x6a, 0xa2, 0x22, 0x8f, 0x93, 0x4e, 0x8b, 0xe2, + 0x68, 0xc2, 0x6f, 0xa4, 0x0f, 0xc2, 0xae, 0xc3, 0x5b, 0x7d, 0x73, 0x4f, 0x5c, 0xea, 0xed, 0x1b, + 0x38, 0xb6, 0xeb, 0x20, 0xdc, 0xb7, 0xcf, 0x53, 0x7b, 0xe0, 0x4a, 0xb2, 0xbe, 0x01, 0x07, 0x0b, + 0x73, 0x11, 0x6b, 0x05, 0x96, 0xc2, 0x28, 0x55, 0x67, 0x86, 0xea, 0x25, 0xef, 0xd2, 0xc7, 0x8f, + 0x9f, 0x69, 0xca, 0x93, 0x67, 0x9a, 0xf2, 0xe2, 0x99, 0x86, 0xbe, 0xed, 0x6b, 0xe8, 0xd7, 0xbe, + 0x86, 0x1e, 0xf5, 0x35, 0xf4, 0xb8, 0xaf, 0xa1, 0xbf, 0xfa, 0x1a, 0xfa, 0xbb, 0xaf, 0x29, 0x2f, + 0xfa, 0x1a, 0xfa, 0xe1, 0xb9, 0xa6, 0x3c, 0x7e, 0xae, 0x29, 0x4f, 0x9e, 0x6b, 0xca, 0x97, 0xb5, + 0xae, 0x63, 0x13, 0x8f, 0x6e, 0xd6, 0xd8, 0xdf, 0xf9, 0x77, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, + 0x0a, 0x84, 0x23, 0x5a, 0x39, 0x10, 0x00, 0x00, } func (x MatchType) String() string { @@ -1986,6 +1995,9 @@ func (this *UserStatsResponse) Equal(that interface{}) bool { if this.RuleIngestionRate != that1.RuleIngestionRate { return false } + if this.ActiveSeries != that1.ActiveSeries { + return false + } return true } func (this *UserIDStatsResponse) Equal(that interface{}) bool { @@ -2541,12 +2553,13 @@ func (this *UserStatsResponse) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 8) + s := make([]string, 0, 9) s = append(s, "&client.UserStatsResponse{") s = append(s, "IngestionRate: "+fmt.Sprintf("%#v", this.IngestionRate)+",\n") s = append(s, "NumSeries: "+fmt.Sprintf("%#v", this.NumSeries)+",\n") s = append(s, "ApiIngestionRate: "+fmt.Sprintf("%#v", this.ApiIngestionRate)+",\n") s = append(s, "RuleIngestionRate: "+fmt.Sprintf("%#v", this.RuleIngestionRate)+",\n") + s = append(s, "ActiveSeries: "+fmt.Sprintf("%#v", this.ActiveSeries)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -3882,6 +3895,11 @@ func (m *UserStatsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.ActiveSeries != 0 { + i = encodeVarintIngester(dAtA, i, uint64(m.ActiveSeries)) + i-- + dAtA[i] = 0x28 + } if m.RuleIngestionRate != 0 { i -= 8 encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.RuleIngestionRate)))) @@ -4667,6 +4685,9 @@ func (m *UserStatsResponse) Size() (n int) { if m.RuleIngestionRate != 0 { n += 9 } + if m.ActiveSeries != 0 { + n += 1 + sovIngester(uint64(m.ActiveSeries)) + } return n } @@ -5091,6 +5112,7 @@ func (this *UserStatsResponse) String() string { `NumSeries:` + fmt.Sprintf("%v", this.NumSeries) + `,`, `ApiIngestionRate:` + fmt.Sprintf("%v", this.ApiIngestionRate) + `,`, `RuleIngestionRate:` + fmt.Sprintf("%v", this.RuleIngestionRate) + `,`, + `ActiveSeries:` + fmt.Sprintf("%v", this.ActiveSeries) + `,`, `}`, }, "") return s @@ -6714,6 +6736,25 @@ func (m *UserStatsResponse) Unmarshal(dAtA []byte) error { v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) iNdEx += 8 m.RuleIngestionRate = float64(math.Float64frombits(v)) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ActiveSeries", wireType) + } + m.ActiveSeries = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ActiveSeries |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipIngester(dAtA[iNdEx:]) diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/ingester.proto b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/ingester.proto index 982fed7e1..131c07e09 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/ingester.proto +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/ingester.proto @@ -5,7 +5,7 @@ package cortex; option go_package = "client"; -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "gogoproto/gogo.proto"; import "github.com/cortexproject/cortex/pkg/cortexpb/cortex.proto"; option (gogoproto.marshaler_all) = true; @@ -97,6 +97,7 @@ message UserStatsResponse { uint64 num_series = 2; double api_ingestion_rate = 3; double rule_ingestion_rate = 4; + uint64 active_series = 5; } message UserIDStatsResponse { diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/http.go b/vendor/github.com/cortexproject/cortex/pkg/ring/http.go index f23f08b81..81615afb4 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/http.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/http.go @@ -24,6 +24,7 @@ const pageContent = `

Ring Status

Current time: {{ .Now }}

+

Storage updated: {{ .StorageLastUpdated }}

@@ -116,9 +117,10 @@ type ingesterDesc struct { } type httpResponse struct { - Ingesters []ingesterDesc `json:"shards"` - Now time.Time `json:"now"` - ShowTokens bool `json:"-"` + Ingesters []ingesterDesc `json:"shards"` + Now time.Time `json:"now"` + StorageLastUpdated time.Time `json:"storageLastUpdated"` + ShowTokens bool `json:"-"` } func (r *Ring) ServeHTTP(w http.ResponseWriter, req *http.Request) { @@ -149,14 +151,14 @@ func (r *Ring) ServeHTTP(w http.ResponseWriter, req *http.Request) { } sort.Strings(ingesterIDs) - now := time.Now() + storageLastUpdate := r.KVClient.LastUpdateTime(r.key) var ingesters []ingesterDesc _, owned := r.countTokens() for _, id := range ingesterIDs { ing := r.ringDesc.Ingesters[id] heartbeatTimestamp := time.Unix(ing.Timestamp, 0) state := ing.State.String() - if !r.IsHealthy(&ing, Reporting, now) { + if !r.IsHealthy(&ing, Reporting, storageLastUpdate) { state = unhealthy } @@ -182,9 +184,10 @@ func (r *Ring) ServeHTTP(w http.ResponseWriter, req *http.Request) { tokensParam := req.URL.Query().Get("tokens") renderHTTPResponse(w, httpResponse{ - Ingesters: ingesters, - Now: now, - ShowTokens: tokensParam == "true", + Ingesters: ingesters, + Now: time.Now(), + StorageLastUpdated: storageLastUpdate, + ShowTokens: tokensParam == "true", }, pageTemplate, req) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/client.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/client.go index b251a64b8..e0ce28d0b 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/client.go @@ -5,12 +5,14 @@ import ( "flag" "fmt" "sync" + "time" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/cortexproject/cortex/pkg/ring/kv/codec" "github.com/cortexproject/cortex/pkg/ring/kv/consul" + "github.com/cortexproject/cortex/pkg/ring/kv/dynamodb" "github.com/cortexproject/cortex/pkg/ring/kv/etcd" "github.com/cortexproject/cortex/pkg/ring/kv/memberlist" ) @@ -40,9 +42,10 @@ var inmemoryStore Client // Consul, Etcd, Memberlist or MultiClient. It was extracted from Config to keep // single-client config separate from final client-config (with all the wrappers) type StoreConfig struct { - Consul consul.Config `yaml:"consul"` - Etcd etcd.Config `yaml:"etcd"` - Multi MultiConfig `yaml:"multi"` + DynamoDB dynamodb.Config `yaml:"dynamodb"` + Consul consul.Config `yaml:"consul"` + Etcd etcd.Config `yaml:"etcd"` + Multi MultiConfig `yaml:"multi"` // Function that returns memberlist.KV store to use. By using a function, we can delay // initialization of memberlist.KV until it is actually required. @@ -69,6 +72,7 @@ func (cfg *Config) RegisterFlagsWithPrefix(flagsPrefix, defaultPrefix string, f // This needs to be fixed in the future (1.0 release maybe?) when we normalize flags. // At the moment we have consul., and ring.store, going forward it would // be easier to have everything under ring, so ring.consul. + cfg.DynamoDB.RegisterFlags(f, flagsPrefix) cfg.Consul.RegisterFlags(f, flagsPrefix) cfg.Etcd.RegisterFlagsWithPrefix(f, flagsPrefix) cfg.Multi.RegisterFlagsWithPrefix(f, flagsPrefix) @@ -111,6 +115,9 @@ type Client interface { // WatchPrefix calls f whenever any value stored under prefix changes. WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool) + + // LastUpdateTime returns the time a key was last sync by the kv store + LastUpdateTime(key string) time.Time } // NewClient creates a new Client (consul, etcd or inmemory) based on the config, @@ -128,6 +135,9 @@ func createClient(backend string, prefix string, cfg StoreConfig, codec codec.Co var err error switch backend { + case "dynamodb": + client, err = dynamodb.NewClient(cfg.DynamoDB, codec, logger, reg) + case "consul": client, err = consul.NewClient(cfg.Consul, codec, logger, reg) diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/codec/clonable.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/codec/clonable.go new file mode 100644 index 000000000..c3df74c62 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/codec/clonable.go @@ -0,0 +1,6 @@ +package codec + +type Clonable interface { + // Clone should return a deep copy of the state. + Clone() interface{} +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/codec/codec.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/codec/codec.go index 49540fe3a..d701bbe20 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/codec/codec.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/codec/codec.go @@ -1,8 +1,11 @@ package codec import ( + "fmt" + "github.com/gogo/protobuf/proto" "github.com/golang/snappy" + "github.com/pkg/errors" ) // Codec allows KV clients to serialise and deserialise values. @@ -10,6 +13,9 @@ type Codec interface { Decode([]byte) (interface{}, error) Encode(interface{}) ([]byte, error) + DecodeMultiKey(map[string][]byte) (interface{}, error) + EncodeMultiKey(interface{}) (map[string][]byte, error) + // CodecID is a short identifier to communicate what codec should be used to decode the value. // Once in use, this should be stable to avoid confusing other clients. CodecID() string @@ -31,7 +37,34 @@ func (p Proto) CodecID() string { // Decode implements Codec func (p Proto) Decode(bytes []byte) (interface{}, error) { - out := p.factory() + return p.decode(bytes, p.factory()) +} + +// DecodeMultiKey implements Codec +func (p Proto) DecodeMultiKey(data map[string][]byte) (interface{}, error) { + msg := p.factory() + // Don't even try + out, ok := msg.(MultiKey) + if !ok || out == nil { + return nil, fmt.Errorf("invalid type: %T, expected MultiKey", out) + } + + if len(data) > 0 { + res := make(map[string]interface{}, len(data)) + for key, bytes := range data { + decoded, err := p.decode(bytes, out.GetItemFactory()) + if err != nil { + return nil, err + } + res[key] = decoded + } + out.JoinIds(res) + } + + return out, nil +} + +func (p Proto) decode(bytes []byte, out proto.Message) (interface{}, error) { bytes, err := snappy.Decode(nil, bytes) if err != nil { return nil, err @@ -51,6 +84,26 @@ func (p Proto) Encode(msg interface{}) ([]byte, error) { return snappy.Encode(nil, bytes), nil } +// EncodeMultiKey implements Codec +func (p Proto) EncodeMultiKey(msg interface{}) (map[string][]byte, error) { + // Don't even try + r, ok := msg.(MultiKey) + if !ok || r == nil { + return nil, fmt.Errorf("invalid type: %T, expected MultiKey", msg) + } + + objs := r.SplitByID() + res := make(map[string][]byte, len(objs)) + for key, value := range objs { + bytes, err := p.Encode(value) + if err != nil { + return nil, err + } + res[key] = bytes + } + return res, nil +} + // String is a code for strings. type String struct{} @@ -67,3 +120,11 @@ func (String) Decode(bytes []byte) (interface{}, error) { func (String) Encode(msg interface{}) ([]byte, error) { return []byte(msg.(string)), nil } + +func (String) EncodeMultiKey(msg interface{}) (map[string][]byte, error) { + return nil, errors.New("String codec does not support EncodeMultiKey") +} + +func (String) DecodeMultiKey(map[string][]byte) (interface{}, error) { + return nil, errors.New("String codec does not support DecodeMultiKey") +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/codec/multikey.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/codec/multikey.go new file mode 100644 index 000000000..bd8802c4a --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/codec/multikey.go @@ -0,0 +1,25 @@ +package codec + +import ( + "github.com/gogo/protobuf/proto" +) + +type MultiKey interface { + Clonable + + // SplitByID Split interface in array of key and value. THe key is a unique identifier of an instance in the ring. The value is + // interface with its data. The interface resultant need to be a proto.Message + SplitByID() map[string]interface{} + + // JoinIds update the current interface to add receiving key value information. The key is an unique identifier for an instance. + // The value is the information for that instance. + JoinIds(in map[string]interface{}) + + // GetItemFactory method to be used for deserilaize the value information from an instance + GetItemFactory() proto.Message + + // FindDifference returns the difference between two Multikeys. The returns are an interface which also implements Multikey + // with an array of keys which were changed, and an array of strings which are unique identifiers deleted. An error is + // returned when that does not implement the correct codec + FindDifference(that MultiKey) (interface{}, []string, error) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/client.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/client.go index 8d94eae00..ab1c9da22 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/client.go @@ -369,6 +369,10 @@ func (c *Client) Delete(ctx context.Context, key string) error { return err } +func (c *Client) LastUpdateTime(_ string) time.Time { + return time.Now().UTC() +} + func checkLastIndex(index, metaLastIndex uint64) (newIndex uint64, skip bool) { // See https://www.consul.io/api/features/blocking.html#implementation-details for logic behind these checks. if metaLastIndex == 0 { diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/mock.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/mock.go index 6de12058a..7828499de 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/mock.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/mock.go @@ -233,6 +233,10 @@ func (m *mockKV) Delete(key string, q *consul.WriteOptions) (*consul.WriteMeta, return nil, nil } +func (m *mockKV) LastUpdateTime(_ string) time.Time { + return time.Now().UTC() +} + func (m *mockKV) ResetIndex() { m.mtx.Lock() defer m.mtx.Unlock() diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/dynamodb/client.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/dynamodb/client.go new file mode 100644 index 000000000..34797b151 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/dynamodb/client.go @@ -0,0 +1,325 @@ +package dynamodb + +import ( + "context" + "flag" + "fmt" + "sync" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" + + "github.com/cortexproject/cortex/pkg/ring/kv/codec" + "github.com/cortexproject/cortex/pkg/util/backoff" +) + +// Config to create a ConsulClient +type Config struct { + Region string `yaml:"region"` + TableName string `yaml:"table_name"` + TTL time.Duration `yaml:"ttl"` +} + +type Client struct { + kv dynamoDbClient + codec codec.Codec + ddbMetrics *dynamodbMetrics + logger log.Logger + + staleDataLock sync.RWMutex + staleData map[string]staleData +} + +type staleData struct { + data codec.MultiKey + timestamp time.Time +} + +var ( + backoffConfig = backoff.Config{ + MinBackoff: 1 * time.Second, + MaxBackoff: 1 * time.Minute, + MaxRetries: 0, + } + + defaultLoopDelay = 1 * time.Minute +) + +// RegisterFlags adds the flags required to config this to the given FlagSet +// If prefix is not an empty string it should end with a period. +func (cfg *Config) RegisterFlags(f *flag.FlagSet, prefix string) { + f.StringVar(&cfg.Region, prefix+"dynamodb.region", "", "Region to access dynamodb.") + f.StringVar(&cfg.TableName, prefix+"dynamodb.table-name", "", "Table name to use on dynamodb.") + f.DurationVar(&cfg.TTL, prefix+"dynamodb.ttl-time", 0, "Time to expire items on dynamodb.") +} + +func NewClient(cfg Config, cc codec.Codec, logger log.Logger, registerer prometheus.Registerer) (*Client, error) { + dynamoDB, err := newDynamodbKV(cfg, logger) + if err != nil { + return nil, err + } + + ddbMetrics := newDynamoDbMetrics(registerer) + + c := &Client{ + kv: dynamodbInstrumentation{kv: dynamoDB, ddbMetrics: ddbMetrics}, + codec: cc, + logger: ddbLog(logger), + ddbMetrics: ddbMetrics, + staleData: make(map[string]staleData), + } + level.Info(c.logger).Log("dynamodb kv initialized") + return c, nil +} + +func (c *Client) List(ctx context.Context, key string) ([]string, error) { + resp, _, err := c.kv.List(ctx, dynamodbKey{primaryKey: key}) + if err != nil { + level.Warn(c.logger).Log("msg", "error List", "key", key, "err", err) + return nil, err + } + return resp, err +} + +func (c *Client) Get(ctx context.Context, key string) (interface{}, error) { + resp, _, err := c.kv.Query(ctx, dynamodbKey{primaryKey: key}, false) + if err != nil { + level.Warn(c.logger).Log("msg", "error Get", "key", key, "err", err) + return nil, err + } + res, err := c.decodeMultikey(resp) + if err != nil { + return nil, err + } + c.updateStaleData(key, res, time.Now().UTC()) + + return res, nil +} + +func (c *Client) Delete(ctx context.Context, key string) error { + resp, _, err := c.kv.Query(ctx, dynamodbKey{primaryKey: key}, false) + if err != nil { + level.Warn(c.logger).Log("msg", "error Delete", "key", key, "err", err) + return err + } + + for innerKey := range resp { + err := c.kv.Delete(ctx, dynamodbKey{ + primaryKey: key, + sortKey: innerKey, + }) + if err != nil { + level.Warn(c.logger).Log("msg", "error Delete", "key", key, "innerKey", innerKey, "err", err) + return err + } + } + c.deleteStaleData(key) + + return err +} + +func (c *Client) CAS(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error { + bo := backoff.New(ctx, backoffConfig) + for bo.Ongoing() { + resp, _, err := c.kv.Query(ctx, dynamodbKey{primaryKey: key}, false) + if err != nil { + level.Error(c.logger).Log("msg", "error cas query", "key", key, "err", err) + bo.Wait() + continue + } + + current, err := c.decodeMultikey(resp) + if err != nil { + level.Error(c.logger).Log("msg", "error decoding key", "key", key, "err", err) + continue + } + + out, retry, err := f(current.Clone()) + if err != nil { + if !retry { + return err + } + bo.Wait() + continue + } + + // Callback returning nil means it doesn't want to CAS anymore. + if out == nil { + return nil + } + // Don't even try + + r, ok := out.(codec.MultiKey) + if !ok || r == nil { + return fmt.Errorf("invalid type: %T, expected MultiKey", out) + } + + toUpdate, toDelete, err := current.FindDifference(r) + + if err != nil { + level.Error(c.logger).Log("msg", "error getting key", "key", key, "err", err) + continue + } + + buf, err := c.codec.EncodeMultiKey(toUpdate) + if err != nil { + level.Error(c.logger).Log("msg", "error serialising value", "key", key, "err", err) + continue + } + + putRequests := map[dynamodbKey][]byte{} + for childKey, bytes := range buf { + putRequests[dynamodbKey{primaryKey: key, sortKey: childKey}] = bytes + } + + deleteRequests := make([]dynamodbKey, 0, len(toDelete)) + for _, childKey := range toDelete { + deleteRequests = append(deleteRequests, dynamodbKey{primaryKey: key, sortKey: childKey}) + } + + if len(putRequests) > 0 || len(deleteRequests) > 0 { + return c.kv.Batch(ctx, putRequests, deleteRequests) + } + + return nil + } + + return fmt.Errorf("failed to CAS %s", key) +} + +func (c *Client) WatchKey(ctx context.Context, key string, f func(interface{}) bool) { + bo := backoff.New(ctx, backoffConfig) + + for bo.Ongoing() { + out, _, err := c.kv.Query(ctx, dynamodbKey{ + primaryKey: key, + }, false) + if err != nil { + level.Error(c.logger).Log("msg", "error WatchKey", "key", key, "err", err) + + if bo.NumRetries() > 10 { + if staleData := c.getStaleData(key); staleData != nil { + if !f(staleData) { + return + } + bo.Reset() + } + } + bo.Wait() + continue + } + + decoded, err := c.decodeMultikey(out) + if err != nil { + level.Error(c.logger).Log("msg", "error decoding key", "key", key, "err", err) + continue + } + c.updateStaleData(key, decoded, time.Now().UTC()) + + if !f(decoded) { + return + } + + bo.Reset() + select { + case <-ctx.Done(): + return + case <-time.After(defaultLoopDelay): + } + } +} + +func (c *Client) WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool) { + bo := backoff.New(ctx, backoffConfig) + + for bo.Ongoing() { + out, _, err := c.kv.Query(ctx, dynamodbKey{ + primaryKey: prefix, + }, true) + if err != nil { + level.Error(c.logger).Log("msg", "WatchPrefix", "prefix", prefix, "err", err) + bo.Wait() + continue + } + + for key, bytes := range out { + decoded, err := c.codec.Decode(bytes) + if err != nil { + level.Error(c.logger).Log("msg", "error decoding key", "key", key, "err", err) + continue + } + if !f(key, decoded) { + return + } + } + + bo.Reset() + select { + case <-ctx.Done(): + return + case <-time.After(defaultLoopDelay): + } + } +} + +func (c *Client) decodeMultikey(data map[string][]byte) (codec.MultiKey, error) { + res, err := c.codec.DecodeMultiKey(data) + if err != nil { + return nil, err + } + out, ok := res.(codec.MultiKey) + if !ok || out == nil { + return nil, fmt.Errorf("invalid type: %T, expected MultiKey", out) + } + + return out, nil +} + +func (c *Client) LastUpdateTime(key string) time.Time { + c.staleDataLock.RLock() + defer c.staleDataLock.RUnlock() + + data, ok := c.staleData[key] + if !ok { + return time.Time{} + } + + return data.timestamp +} + +func (c *Client) updateStaleData(key string, data codec.MultiKey, timestamp time.Time) { + c.staleDataLock.Lock() + defer c.staleDataLock.Unlock() + + c.staleData[key] = staleData{ + data: data, + timestamp: timestamp, + } +} + +func (c *Client) getStaleData(key string) codec.MultiKey { + c.staleDataLock.RLock() + defer c.staleDataLock.RUnlock() + + data, ok := c.staleData[key] + if !ok { + return nil + } + + newD := data.data.Clone().(codec.MultiKey) + + return newD +} + +func (c *Client) deleteStaleData(key string) { + c.staleDataLock.Lock() + defer c.staleDataLock.Unlock() + + delete(c.staleData, key) +} + +func ddbLog(logger log.Logger) log.Logger { + return log.WithPrefix(logger, "class", "DynamodbKvClient") +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/dynamodb/dynamodb.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/dynamodb/dynamodb.go new file mode 100644 index 000000000..6d2e3d65c --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/dynamodb/dynamodb.go @@ -0,0 +1,268 @@ +package dynamodb + +import ( + "context" + "fmt" + "math" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface" + "github.com/go-kit/log" +) + +const ( + // DdbBatchSizeLimit Current limit of 25 actions per batch + // https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_BatchWriteItem.html + DdbBatchSizeLimit = 25 +) + +type dynamodbKey struct { + primaryKey string + sortKey string +} + +type dynamoDbClient interface { + List(ctx context.Context, key dynamodbKey) ([]string, float64, error) + Query(ctx context.Context, key dynamodbKey, isPrefix bool) (map[string][]byte, float64, error) + Delete(ctx context.Context, key dynamodbKey) error + Put(ctx context.Context, key dynamodbKey, data []byte) error + Batch(ctx context.Context, put map[dynamodbKey][]byte, delete []dynamodbKey) error +} + +type dynamodbKV struct { + ddbClient dynamodbiface.DynamoDBAPI + logger log.Logger + tableName *string + ttlValue time.Duration +} + +var ( + primaryKey = "RingKey" + sortKey = "InstanceKey" + contentData = "Data" + timeToLive = "ttl" +) + +func newDynamodbKV(cfg Config, logger log.Logger) (dynamodbKV, error) { + if err := validateConfigInput(cfg); err != nil { + return dynamodbKV{}, err + } + + sess, err := session.NewSession() + if err != nil { + return dynamodbKV{}, err + } + + if len(cfg.Region) > 0 { + sess.Config = &aws.Config{ + Region: aws.String(cfg.Region), + } + } + + dynamoDB := dynamodb.New(sess) + + ddbKV := &dynamodbKV{ + ddbClient: dynamoDB, + logger: logger, + tableName: aws.String(cfg.TableName), + ttlValue: cfg.TTL, + } + + return *ddbKV, nil +} + +func validateConfigInput(cfg Config) error { + if len(cfg.TableName) < 3 { + return fmt.Errorf("invalid dynamodb table name: %s", cfg.TableName) + } + + return nil +} + +// for testing +func (kv dynamodbKV) getTTL() time.Duration { + return kv.ttlValue +} + +func (kv dynamodbKV) List(ctx context.Context, key dynamodbKey) ([]string, float64, error) { + var keys []string + var totalCapacity float64 + input := &dynamodb.QueryInput{ + TableName: kv.tableName, + ReturnConsumedCapacity: aws.String(dynamodb.ReturnConsumedCapacityTotal), + KeyConditions: map[string]*dynamodb.Condition{ + primaryKey: { + ComparisonOperator: aws.String("EQ"), + AttributeValueList: []*dynamodb.AttributeValue{ + { + S: aws.String(key.primaryKey), + }, + }, + }, + }, + AttributesToGet: []*string{aws.String(sortKey)}, + } + + err := kv.ddbClient.QueryPagesWithContext(ctx, input, func(output *dynamodb.QueryOutput, _ bool) bool { + totalCapacity += getCapacityUnits(output.ConsumedCapacity) + for _, item := range output.Items { + keys = append(keys, item[sortKey].String()) + } + return true + }) + if err != nil { + return nil, totalCapacity, err + } + + return keys, totalCapacity, nil +} + +func (kv dynamodbKV) Query(ctx context.Context, key dynamodbKey, isPrefix bool) (map[string][]byte, float64, error) { + keys := make(map[string][]byte) + var totalCapacity float64 + co := dynamodb.ComparisonOperatorEq + if isPrefix { + co = dynamodb.ComparisonOperatorBeginsWith + } + input := &dynamodb.QueryInput{ + TableName: kv.tableName, + ReturnConsumedCapacity: aws.String(dynamodb.ReturnConsumedCapacityTotal), + KeyConditions: map[string]*dynamodb.Condition{ + primaryKey: { + ComparisonOperator: aws.String(co), + AttributeValueList: []*dynamodb.AttributeValue{ + { + S: aws.String(key.primaryKey), + }, + }, + }, + }, + } + + err := kv.ddbClient.QueryPagesWithContext(ctx, input, func(output *dynamodb.QueryOutput, _ bool) bool { + totalCapacity += getCapacityUnits(output.ConsumedCapacity) + for _, item := range output.Items { + keys[*item[sortKey].S] = item[contentData].B + } + return true + }) + if err != nil { + return nil, totalCapacity, err + } + + return keys, totalCapacity, nil +} + +func (kv dynamodbKV) Delete(ctx context.Context, key dynamodbKey) error { + input := &dynamodb.DeleteItemInput{ + TableName: kv.tableName, + Key: generateItemKey(key), + } + _, err := kv.ddbClient.DeleteItemWithContext(ctx, input) + return err +} + +func (kv dynamodbKV) Put(ctx context.Context, key dynamodbKey, data []byte) error { + input := &dynamodb.PutItemInput{ + TableName: kv.tableName, + Item: kv.generatePutItemRequest(key, data), + } + _, err := kv.ddbClient.PutItemWithContext(ctx, input) + return err +} + +func (kv dynamodbKV) Batch(ctx context.Context, put map[dynamodbKey][]byte, delete []dynamodbKey) error { + writeRequestSize := len(put) + len(delete) + if writeRequestSize == 0 { + return nil + } + + writeRequestsSlices := make([][]*dynamodb.WriteRequest, int(math.Ceil(float64(writeRequestSize)/float64(DdbBatchSizeLimit)))) + for i := 0; i < len(writeRequestsSlices); i++ { + writeRequestsSlices[i] = make([]*dynamodb.WriteRequest, 0, DdbBatchSizeLimit) + } + + currIdx := 0 + for key, data := range put { + item := kv.generatePutItemRequest(key, data) + writeRequestsSlices[currIdx] = append(writeRequestsSlices[currIdx], &dynamodb.WriteRequest{ + PutRequest: &dynamodb.PutRequest{ + Item: item, + }, + }) + if len(writeRequestsSlices[currIdx]) == DdbBatchSizeLimit { + currIdx++ + } + } + + for _, key := range delete { + item := generateItemKey(key) + writeRequestsSlices[currIdx] = append(writeRequestsSlices[currIdx], &dynamodb.WriteRequest{ + DeleteRequest: &dynamodb.DeleteRequest{ + Key: item, + }, + }) + if len(writeRequestsSlices[currIdx]) == DdbBatchSizeLimit { + currIdx++ + } + } + + for _, slice := range writeRequestsSlices { + input := &dynamodb.BatchWriteItemInput{ + RequestItems: map[string][]*dynamodb.WriteRequest{ + *kv.tableName: slice, + }, + } + + resp, err := kv.ddbClient.BatchWriteItemWithContext(ctx, input) + if err != nil { + return err + } + + if resp.UnprocessedItems != nil && len(resp.UnprocessedItems) > 0 { + return fmt.Errorf("error processing batch request for %s requests", resp.UnprocessedItems) + } + } + + return nil +} + +func (kv dynamodbKV) generatePutItemRequest(key dynamodbKey, data []byte) map[string]*dynamodb.AttributeValue { + item := generateItemKey(key) + item[contentData] = &dynamodb.AttributeValue{ + B: data, + } + if kv.getTTL() > 0 { + item[timeToLive] = &dynamodb.AttributeValue{ + N: aws.String(strconv.FormatInt(time.Now().UTC().Add(kv.getTTL()).Unix(), 10)), + } + } + + return item +} + +func generateItemKey(key dynamodbKey) map[string]*dynamodb.AttributeValue { + resp := map[string]*dynamodb.AttributeValue{ + primaryKey: { + S: aws.String(key.primaryKey), + }, + } + if len(key.sortKey) > 0 { + resp[sortKey] = &dynamodb.AttributeValue{ + S: aws.String(key.sortKey), + } + } + + return resp +} + +func getCapacityUnits(cap *dynamodb.ConsumedCapacity) float64 { + if cap != nil && cap.CapacityUnits != nil { + return *cap.CapacityUnits + } + return 0 +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/dynamodb/metrics.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/dynamodb/metrics.go new file mode 100644 index 000000000..d47f2fe39 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/dynamodb/metrics.go @@ -0,0 +1,99 @@ +package dynamodb + +import ( + "context" + "strconv" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + grpcUtils "github.com/weaveworks/common/grpc" + "github.com/weaveworks/common/httpgrpc" + "github.com/weaveworks/common/instrument" +) + +type dynamodbInstrumentation struct { + kv dynamodbKV + ddbMetrics *dynamodbMetrics +} + +type dynamodbMetrics struct { + dynamodbRequestDuration *instrument.HistogramCollector + dynamodbUsageMetrics *prometheus.CounterVec +} + +func newDynamoDbMetrics(registerer prometheus.Registerer) *dynamodbMetrics { + dynamodbRequestDurationCollector := instrument.NewHistogramCollector(promauto.With(registerer).NewHistogramVec(prometheus.HistogramOpts{ + Name: "dynamodb_kv_request_duration_seconds", + Help: "Time spent on dynamodb requests.", + Buckets: prometheus.DefBuckets, + }, []string{"operation", "status_code"})) + + dynamodbUsageMetrics := promauto.With(registerer).NewCounterVec(prometheus.CounterOpts{ + Name: "dynamodb_kv_read_capacity_total", + Help: "Total used read capacity on dynamodb", + }, []string{"operation"}) + + dynamodbMetrics := dynamodbMetrics{ + dynamodbRequestDuration: dynamodbRequestDurationCollector, + dynamodbUsageMetrics: dynamodbUsageMetrics, + } + return &dynamodbMetrics +} + +func (d dynamodbInstrumentation) List(ctx context.Context, key dynamodbKey) ([]string, float64, error) { + var resp []string + var totalCapacity float64 + err := instrument.CollectedRequest(ctx, "List", d.ddbMetrics.dynamodbRequestDuration, errorCode, func(ctx context.Context) error { + var err error + resp, totalCapacity, err = d.kv.List(ctx, key) + return err + }) + d.ddbMetrics.dynamodbUsageMetrics.WithLabelValues("List").Add(totalCapacity) + return resp, totalCapacity, err +} + +func (d dynamodbInstrumentation) Query(ctx context.Context, key dynamodbKey, isPrefix bool) (map[string][]byte, float64, error) { + var resp map[string][]byte + var totalCapacity float64 + err := instrument.CollectedRequest(ctx, "Query", d.ddbMetrics.dynamodbRequestDuration, errorCode, func(ctx context.Context) error { + var err error + resp, totalCapacity, err = d.kv.Query(ctx, key, isPrefix) + return err + }) + d.ddbMetrics.dynamodbUsageMetrics.WithLabelValues("Query").Add(totalCapacity) + return resp, totalCapacity, err +} + +func (d dynamodbInstrumentation) Delete(ctx context.Context, key dynamodbKey) error { + return instrument.CollectedRequest(ctx, "Delete", d.ddbMetrics.dynamodbRequestDuration, errorCode, func(ctx context.Context) error { + return d.kv.Delete(ctx, key) + }) +} + +func (d dynamodbInstrumentation) Put(ctx context.Context, key dynamodbKey, data []byte) error { + return instrument.CollectedRequest(ctx, "Put", d.ddbMetrics.dynamodbRequestDuration, errorCode, func(ctx context.Context) error { + return d.kv.Put(ctx, key, data) + }) +} + +func (d dynamodbInstrumentation) Batch(ctx context.Context, put map[dynamodbKey][]byte, delete []dynamodbKey) error { + return instrument.CollectedRequest(ctx, "Batch", d.ddbMetrics.dynamodbRequestDuration, errorCode, func(ctx context.Context) error { + return d.kv.Batch(ctx, put, delete) + }) +} + +// errorCode converts an error into an error code string. +func errorCode(err error) string { + if err == nil { + return "2xx" + } + + if errResp, ok := httpgrpc.HTTPResponseFromError(err); ok { + statusFamily := int(errResp.Code / 100) + return strconv.Itoa(statusFamily) + "xx" + } else if grpcUtils.IsCanceled(err) { + return "cancel" + } else { + return "error" + } +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/etcd/etcd.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/etcd/etcd.go index 3cea7a0cc..591845fe8 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/etcd/etcd.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/etcd/etcd.go @@ -298,3 +298,7 @@ func (c *Client) Delete(ctx context.Context, key string) error { _, err := c.cli.Delete(ctx, key) return err } + +func (c *Client) LastUpdateTime(_ string) time.Time { + return time.Now().UTC() +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/kv.pb.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/kv.pb.go index 4c2eb9265..29030cfe9 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/kv.pb.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/kv.pb.go @@ -139,22 +139,21 @@ func init() { func init() { proto.RegisterFile("kv.proto", fileDescriptor_2216fe83c9c12408) } var fileDescriptor_2216fe83c9c12408 = []byte{ - // 236 bytes of a gzipped FileDescriptorProto + // 218 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0xc8, 0x2e, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0xca, 0x4d, 0xcd, 0x4d, 0x4a, 0x2d, 0xca, 0xc9, 0x2c, 0x2e, - 0x91, 0xd2, 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0x4f, - 0xcf, 0xd7, 0x07, 0x2b, 0x49, 0x2a, 0x4d, 0x03, 0xf3, 0xc0, 0x1c, 0x30, 0x0b, 0xa2, 0x55, 0xc9, - 0x9e, 0x8b, 0xd7, 0x3b, 0xb5, 0x32, 0x2c, 0x31, 0xa7, 0x34, 0x35, 0xb8, 0x24, 0xbf, 0x28, 0x55, - 0x48, 0x8f, 0x8b, 0xb5, 0x20, 0x31, 0xb3, 0xa8, 0x58, 0x82, 0x51, 0x81, 0x59, 0x83, 0xdb, 0x48, - 0x42, 0x0f, 0x61, 0xb6, 0x1e, 0x4c, 0x65, 0x40, 0x62, 0x66, 0x51, 0x10, 0x44, 0x99, 0x92, 0x0f, - 0x17, 0x0f, 0xb2, 0xb0, 0x90, 0x00, 0x17, 0x73, 0x76, 0x6a, 0xa5, 0x04, 0xa3, 0x02, 0xa3, 0x06, - 0x67, 0x10, 0x88, 0x29, 0x24, 0xc2, 0xc5, 0x5a, 0x06, 0x92, 0x96, 0x60, 0x52, 0x60, 0xd4, 0xe0, - 0x09, 0x82, 0x70, 0x40, 0xa2, 0xc9, 0xf9, 0x29, 0xa9, 0xc9, 0x12, 0xcc, 0x60, 0x95, 0x10, 0x8e, - 0x93, 0xc9, 0x85, 0x87, 0x72, 0x0c, 0x37, 0x1e, 0xca, 0x31, 0x7c, 0x78, 0x28, 0xc7, 0xd8, 0xf0, - 0x48, 0x8e, 0x71, 0xc5, 0x23, 0x39, 0xc6, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, - 0xf0, 0x48, 0x8e, 0xf1, 0xc5, 0x23, 0x39, 0x86, 0x0f, 0x8f, 0xe4, 0x18, 0x27, 0x3c, 0x96, 0x63, - 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x86, 0x24, 0x36, 0xb0, 0x5f, 0x8c, 0x01, 0x01, - 0x00, 0x00, 0xff, 0xff, 0x7a, 0x22, 0xdf, 0xec, 0x12, 0x01, 0x00, 0x00, + 0x91, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x0b, 0xeb, 0x83, 0x58, 0x10, 0x15, 0x4a, 0xf6, 0x5c, + 0xbc, 0xde, 0xa9, 0x95, 0x61, 0x89, 0x39, 0xa5, 0xa9, 0xc1, 0x25, 0xf9, 0x45, 0xa9, 0x42, 0x7a, + 0x5c, 0xac, 0x05, 0x89, 0x99, 0x45, 0xc5, 0x12, 0x8c, 0x0a, 0xcc, 0x1a, 0xdc, 0x46, 0x12, 0x7a, + 0x08, 0x23, 0xf4, 0x60, 0x2a, 0x03, 0x12, 0x33, 0x8b, 0x82, 0x20, 0xca, 0x94, 0x7c, 0xb8, 0x78, + 0x90, 0x85, 0x85, 0x04, 0xb8, 0x98, 0xb3, 0x53, 0x2b, 0x25, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, + 0x40, 0x4c, 0x21, 0x11, 0x2e, 0xd6, 0x32, 0x90, 0xb4, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x4f, 0x10, + 0x84, 0x03, 0x12, 0x4d, 0xce, 0x4f, 0x49, 0x4d, 0x96, 0x60, 0x06, 0xab, 0x84, 0x70, 0x9c, 0x4c, + 0x2e, 0x3c, 0x94, 0x63, 0xb8, 0xf1, 0x50, 0x8e, 0xe1, 0xc3, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, + 0x8c, 0x2b, 0x1e, 0xc9, 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, + 0x72, 0x8c, 0x2f, 0x1e, 0xc9, 0x31, 0x7c, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0xc3, 0x85, + 0xc7, 0x72, 0x0c, 0x37, 0x1e, 0xcb, 0x31, 0x24, 0xb1, 0x81, 0xfd, 0x62, 0x0c, 0x08, 0x00, 0x00, + 0xff, 0xff, 0xbe, 0xcc, 0xb6, 0xd8, 0xf9, 0x00, 0x00, 0x00, } func (this *KeyValueStore) Equal(that interface{}) bool { diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/kv.proto b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/kv.proto index cc5f12463..700b2f530 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/kv.proto +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/kv.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package memberlist; -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "gogoproto/gogo.proto"; option (gogoproto.marshaler_all) = true; option (gogoproto.unmarshaler_all) = true; diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_client.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_client.go index db38617ab..6b1e1744d 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_client.go @@ -105,6 +105,10 @@ func (c *Client) WatchPrefix(ctx context.Context, prefix string, f func(string, c.kv.WatchPrefix(ctx, prefix, c.codec, f) } +func (c *Client) LastUpdateTime(_ string) time.Time { + return time.Now().UTC() +} + // We want to use KV in Running and Stopping states. func (c *Client) awaitKVRunningOrStopping(ctx context.Context) error { s := c.kv.State() @@ -314,7 +318,7 @@ type valueDesc struct { func (v valueDesc) Clone() (result valueDesc) { result = v if v.value != nil { - result.value = v.value.Clone() + result.value = v.value.Clone().(Mergeable) } return } @@ -1253,7 +1257,7 @@ func (m *KV) mergeValueForKey(key string, incomingValue Mergeable, casVersion ui // The "changes" returned by Merge() can contain references to the "result" // state. Therefore, make sure we clone it before releasing the lock. - change = change.Clone() + change = change.Clone().(Mergeable) return change, newVersion, nil } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/mergeable.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/mergeable.go index 2c02acfa4..f2120c1d9 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/mergeable.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/mergeable.go @@ -1,10 +1,16 @@ package memberlist -import "time" +import ( + "time" + + "github.com/cortexproject/cortex/pkg/ring/kv/codec" +) // Mergeable is an interface that values used in gossiping KV Client must implement. // It allows merging of different states, obtained via gossiping or CAS function. type Mergeable interface { + codec.Clonable + // Merge with other value in place. Returns change, that can be sent to other clients. // If merge doesn't result in any change, returns nil. // Error can be returned if merging with given 'other' value is not possible. @@ -35,17 +41,14 @@ type Mergeable interface { // used when doing CAS operation) Merge(other Mergeable, localCAS bool) (change Mergeable, error error) - // Describes the content of this mergeable value. Used by memberlist client to decide if + // MergeContent Describes the content of this mergeable value. Used by memberlist client to decide if // one change-value can invalidate some other value, that was received previously. // Invalidation can happen only if output of MergeContent is a superset of some other MergeContent. MergeContent() []string - // Remove tombstones older than given limit from this mergeable. + // RemoveTombstones Remove tombstones older than given limit from this mergeable. // If limit is zero time, remove all tombstones. Memberlist client calls this method with zero limit each // time when client is accessing value from the store. It can be used to hide tombstones from the clients. // Returns the total number of tombstones present and the number of removed tombstones by this invocation. RemoveTombstones(limit time.Time) (total, removed int) - - // Clone should return a deep copy of the state. - Clone() Mergeable } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/metrics.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/metrics.go index 66fe9fa91..38ec3b59c 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/metrics.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/metrics.go @@ -3,6 +3,7 @@ package kv import ( "context" "strconv" + "time" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" @@ -106,3 +107,7 @@ func (m metrics) WatchPrefix(ctx context.Context, prefix string, f func(string, return nil }) } + +func (m metrics) LastUpdateTime(key string) time.Time { + return m.c.LastUpdateTime(key) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/mock.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/mock.go index 5b446d854..cbe23106a 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/mock.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/mock.go @@ -2,6 +2,7 @@ package kv import ( "context" + "time" "github.com/go-kit/log" "github.com/go-kit/log/level" @@ -37,3 +38,7 @@ func (m mockClient) WatchKey(ctx context.Context, key string, f func(interface{} func (m mockClient) WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool) { } + +func (m mockClient) LastUpdateTime(key string) time.Time { + return time.Now().UTC() +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/multi.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/multi.go index 8a3382e98..e4ac994d7 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/multi.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/multi.go @@ -335,6 +335,11 @@ func (m *MultiClient) WatchPrefix(ctx context.Context, prefix string, f func(str }) } +func (m *MultiClient) LastUpdateTime(key string) time.Time { + _, kv := m.getPrimaryClient() + return kv.client.LastUpdateTime(key) +} + func (m *MultiClient) writeToSecondary(ctx context.Context, primary kvclient, key string, newValue interface{}) { if m.mirrorTimeout > 0 { var cfn context.CancelFunc diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/prefix.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/prefix.go index 5775cf17b..aba9b7a09 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/prefix.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/prefix.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "strings" + "time" ) type prefixedKVClient struct { @@ -61,3 +62,7 @@ func (c *prefixedKVClient) Get(ctx context.Context, key string) (interface{}, er func (c *prefixedKVClient) Delete(ctx context.Context, key string) error { return c.client.Delete(ctx, c.prefix+key) } + +func (c *prefixedKVClient) LastUpdateTime(key string) time.Time { + return c.client.LastUpdateTime(c.prefix + key) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go b/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go index f49a5b65d..9bf69bff7 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go @@ -247,7 +247,7 @@ func (i *Lifecycler) checkRingHealthForReadiness(ctx context.Context) error { } if i.cfg.ReadinessCheckRingHealth { - if err := ringDesc.IsReady(time.Now(), i.cfg.RingConfig.HeartbeatTimeout); err != nil { + if err := ringDesc.IsReady(i.KVStore.LastUpdateTime(i.RingKey), i.cfg.RingConfig.HeartbeatTimeout); err != nil { level.Warn(i.logger).Log("msg", "found an existing instance(s) with a problem in the ring, "+ "this instance cannot become ready until this problem is resolved. "+ "The /ring http endpoint on the distributor (or single binary) provides visibility into the ring.", @@ -260,7 +260,7 @@ func (i *Lifecycler) checkRingHealthForReadiness(ctx context.Context) error { return fmt.Errorf("instance %s not found in the ring", i.ID) } - if err := instance.IsReady(time.Now(), i.cfg.RingConfig.HeartbeatTimeout); err != nil { + if err := instance.IsReady(i.KVStore.LastUpdateTime(i.RingKey), i.cfg.RingConfig.HeartbeatTimeout); err != nil { return err } } @@ -622,7 +622,7 @@ func (i *Lifecycler) initRing(ctx context.Context) error { // OR unregister_on_shutdown=false // if autoJoinOnStartup, move it into ACTIVE to ensure the ingester joins the ring. // else set to PENDING - if instanceDesc.State == LEAVING && len(instanceDesc.Tokens) == i.cfg.NumTokens { + if instanceDesc.State == LEAVING && len(instanceDesc.Tokens) != 0 { if i.autoJoinOnStartup { instanceDesc.State = ACTIVE } else { @@ -818,13 +818,13 @@ func (i *Lifecycler) updateCounters(ringDesc *Desc) { zones := map[string]struct{}{} if ringDesc != nil { - now := time.Now() + lastUpdated := i.KVStore.LastUpdateTime(i.RingKey) for _, ingester := range ringDesc.Ingesters { zones[ingester.Zone] = struct{}{} // Count the number of healthy instances for Write operation. - if ingester.IsHealthy(Write, i.cfg.RingConfig.HeartbeatTimeout, now) { + if ingester.IsHealthy(Write, i.cfg.RingConfig.HeartbeatTimeout, lastUpdated) { healthyInstancesCount++ } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/model.go b/vendor/github.com/cortexproject/cortex/pkg/ring/model.go index 8b07cb09d..88fcdd2ad 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/model.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/model.go @@ -100,10 +100,10 @@ func (d *Desc) FindIngestersByState(state InstanceState) []InstanceDesc { // IsReady returns no error when all instance are ACTIVE and healthy, // and the ring has some tokens. -func (d *Desc) IsReady(now time.Time, heartbeatTimeout time.Duration) error { +func (d *Desc) IsReady(storageLastUpdated time.Time, heartbeatTimeout time.Duration) error { numTokens := 0 for _, instance := range d.Ingesters { - if err := instance.IsReady(now, heartbeatTimeout); err != nil { + if err := instance.IsReady(storageLastUpdated, heartbeatTimeout); err != nil { return err } numTokens += len(instance.Tokens) @@ -133,24 +133,24 @@ func (i *InstanceDesc) GetRegisteredAt() time.Time { return time.Unix(i.RegisteredTimestamp, 0) } -func (i *InstanceDesc) IsHealthy(op Operation, heartbeatTimeout time.Duration, now time.Time) bool { +func (i *InstanceDesc) IsHealthy(op Operation, heartbeatTimeout time.Duration, storageLastUpdated time.Time) bool { healthy := op.IsInstanceInStateHealthy(i.State) - return healthy && i.IsHeartbeatHealthy(heartbeatTimeout, now) + return healthy && i.IsHeartbeatHealthy(heartbeatTimeout, storageLastUpdated) } // IsHeartbeatHealthy returns whether the heartbeat timestamp for the ingester is within the // specified timeout period. A timeout of zero disables the timeout; the heartbeat is ignored. -func (i *InstanceDesc) IsHeartbeatHealthy(heartbeatTimeout time.Duration, now time.Time) bool { +func (i *InstanceDesc) IsHeartbeatHealthy(heartbeatTimeout time.Duration, storageLastUpdated time.Time) bool { if heartbeatTimeout == 0 { return true } - return now.Sub(time.Unix(i.Timestamp, 0)) <= heartbeatTimeout + return storageLastUpdated.Sub(time.Unix(i.Timestamp, 0)) <= heartbeatTimeout } // IsReady returns no error if the instance is ACTIVE and healthy. -func (i *InstanceDesc) IsReady(now time.Time, heartbeatTimeout time.Duration) error { - if !i.IsHeartbeatHealthy(heartbeatTimeout, now) { +func (i *InstanceDesc) IsReady(storageLastUpdated time.Time, heartbeatTimeout time.Duration) error { + if !i.IsHeartbeatHealthy(heartbeatTimeout, storageLastUpdated) { return fmt.Errorf("instance %s past heartbeat timeout", i.Addr) } if i.State != ACTIVE { @@ -439,7 +439,7 @@ func (d *Desc) RemoveTombstones(limit time.Time) (total, removed int) { } // Clone returns a deep copy of the ring state. -func (d *Desc) Clone() memberlist.Mergeable { +func (d *Desc) Clone() interface{} { return proto.Clone(d).(*Desc) } @@ -649,3 +649,91 @@ func MergeTokensByZone(zones map[string][][]uint32) map[string][]uint32 { } return out } + +func (d *Desc) SplitByID() map[string]interface{} { + out := make(map[string]interface{}, len(d.Ingesters)) + for key := range d.Ingesters { + in := d.Ingesters[key] + out[key] = &in + } + return out +} + +func (d *Desc) JoinIds(in map[string]interface{}) { + for key, value := range in { + d.Ingesters[key] = *(value.(*InstanceDesc)) + } +} + +func (d *Desc) GetItemFactory() proto.Message { + return &InstanceDesc{} +} + +func (d *Desc) FindDifference(o codec.MultiKey) (interface{}, []string, error) { + out, ok := o.(*Desc) + if !ok { + // This method only deals with non-nil rings. + return nil, nil, fmt.Errorf("expected *ring.Desc, got %T", out) + } + + toUpdated := NewDesc() + toDelete := make([]string, 0) + tokensChanged := false + + // If both are null + if d == nil && out == nil { + return toUpdated, toDelete, nil + } + + // If new data is empty + if out == nil { + for k := range d.Ingesters { + toDelete = append(toDelete, k) + } + return toUpdated, toDelete, nil + } + + //If existent data is empty + if d == nil { + for key, value := range out.Ingesters { + toUpdated.Ingesters[key] = value + } + return toUpdated, toDelete, nil + } + + //If new added + for name, oing := range out.Ingesters { + if _, ok := d.Ingesters[name]; !ok { + tokensChanged = true + toUpdated.Ingesters[name] = oing + } + } + + // If removed or updated + for name, ing := range d.Ingesters { + oing, ok := out.Ingesters[name] + if !ok { + toDelete = append(toDelete, name) + } else if !ing.Equal(oing) { + if !tokensEqual(ing.Tokens, oing.Tokens) { + tokensChanged = true + } + toUpdated.Ingesters[name] = oing + } + } + + // resolveConflicts allocates a lot of memory, so if we can avoid it, do that. + if tokensChanged && conflictingTokensExist(out.Ingesters) { + resolveConflicts(out.Ingesters) + + //Recheck if any instance was updated by the resolveConflict + for name, oing := range out.Ingesters { + ing, ok := d.Ingesters[name] + if !ok || !ing.Equal(oing) { + toUpdated.Ingesters[name] = oing + } + } + } + + return toUpdated, toDelete, nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/replication_strategy.go b/vendor/github.com/cortexproject/cortex/pkg/ring/replication_strategy.go index 32eff1384..df57448a8 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/replication_strategy.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/replication_strategy.go @@ -10,7 +10,7 @@ type ReplicationStrategy interface { // Filter out unhealthy instances and checks if there're enough instances // for an operation to succeed. Returns an error if there are not enough // instances. - Filter(instances []InstanceDesc, op Operation, replicationFactor int, heartbeatTimeout time.Duration, zoneAwarenessEnabled bool) (healthy []InstanceDesc, maxFailures int, err error) + Filter(instances []InstanceDesc, op Operation, replicationFactor int, heartbeatTimeout time.Duration, zoneAwarenessEnabled bool, storageLastUpdate time.Time) (healthy []InstanceDesc, maxFailures int, err error) } type defaultReplicationStrategy struct{} @@ -25,13 +25,11 @@ func NewDefaultReplicationStrategy() ReplicationStrategy { // - Filters out unhealthy instances so the one doesn't even try to write to them. // - Checks there are enough instances for an operation to succeed. // The instances argument may be overwritten. -func (s *defaultReplicationStrategy) Filter(instances []InstanceDesc, op Operation, replicationFactor int, heartbeatTimeout time.Duration, zoneAwarenessEnabled bool) ([]InstanceDesc, int, error) { - now := time.Now() - +func (s *defaultReplicationStrategy) Filter(instances []InstanceDesc, op Operation, replicationFactor int, heartbeatTimeout time.Duration, zoneAwarenessEnabled bool, storageLastUpdate time.Time) ([]InstanceDesc, int, error) { // Skip those that have not heartbeated in a while. var unhealthy []string for i := 0; i < len(instances); { - if instances[i].IsHealthy(op, heartbeatTimeout, now) { + if instances[i].IsHealthy(op, heartbeatTimeout, storageLastUpdate) { i++ } else { unhealthy = append(unhealthy, instances[i].Addr) @@ -74,12 +72,11 @@ func NewIgnoreUnhealthyInstancesReplicationStrategy() ReplicationStrategy { return &ignoreUnhealthyInstancesReplicationStrategy{} } -func (r *ignoreUnhealthyInstancesReplicationStrategy) Filter(instances []InstanceDesc, op Operation, _ int, heartbeatTimeout time.Duration, _ bool) (healthy []InstanceDesc, maxFailures int, err error) { - now := time.Now() +func (r *ignoreUnhealthyInstancesReplicationStrategy) Filter(instances []InstanceDesc, op Operation, _ int, heartbeatTimeout time.Duration, _ bool, storageLastUpdate time.Time) (healthy []InstanceDesc, maxFailures int, err error) { // Filter out unhealthy instances. var unhealthy []string for i := 0; i < len(instances); { - if instances[i].IsHealthy(op, heartbeatTimeout, now) { + if instances[i].IsHealthy(op, heartbeatTimeout, storageLastUpdate) { i++ } else { unhealthy = append(unhealthy, instances[i].Addr) @@ -99,8 +96,8 @@ func (r *ignoreUnhealthyInstancesReplicationStrategy) Filter(instances []Instanc return instances, len(instances) - 1, nil } -func (r *Ring) IsHealthy(instance *InstanceDesc, op Operation, now time.Time) bool { - return instance.IsHealthy(op, r.cfg.HeartbeatTimeout, now) +func (r *Ring) IsHealthy(instance *InstanceDesc, op Operation, storageLastUpdate time.Time) bool { + return instance.IsHealthy(op, r.cfg.HeartbeatTimeout, storageLastUpdate) } // ReplicationFactor of the ring. diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.go b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.go index 51bc6231f..9f4cab46b 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.go @@ -19,7 +19,7 @@ import ( "github.com/cortexproject/cortex/pkg/ring/kv" shardUtil "github.com/cortexproject/cortex/pkg/ring/shard" - "github.com/cortexproject/cortex/pkg/ring/util" + "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" utilmath "github.com/cortexproject/cortex/pkg/util/math" "github.com/cortexproject/cortex/pkg/util/services" @@ -395,7 +395,7 @@ func (r *Ring) Get(key uint32, op Operation, bufDescs []InstanceDesc, bufHosts, instances = append(instances, instance) } - healthyInstances, maxFailure, err := r.strategy.Filter(instances, op, r.cfg.ReplicationFactor, r.cfg.HeartbeatTimeout, r.cfg.ZoneAwarenessEnabled) + healthyInstances, maxFailure, err := r.strategy.Filter(instances, op, r.cfg.ReplicationFactor, r.cfg.HeartbeatTimeout, r.cfg.ZoneAwarenessEnabled, r.KVClient.LastUpdateTime(r.key)) if err != nil { return ReplicationSet{}, err } @@ -415,10 +415,10 @@ func (r *Ring) GetAllHealthy(op Operation) (ReplicationSet, error) { return ReplicationSet{}, ErrEmptyRing } - now := time.Now() + storageLastUpdate := r.KVClient.LastUpdateTime(r.key) instances := make([]InstanceDesc, 0, len(r.ringDesc.Ingesters)) for _, instance := range r.ringDesc.Ingesters { - if r.IsHealthy(&instance, op, now) { + if r.IsHealthy(&instance, op, storageLastUpdate) { instances = append(instances, instance) } } @@ -441,10 +441,10 @@ func (r *Ring) GetReplicationSetForOperation(op Operation) (ReplicationSet, erro // Build the initial replication set, excluding unhealthy instances. healthyInstances := make([]InstanceDesc, 0, len(r.ringDesc.Ingesters)) zoneFailures := make(map[string]struct{}) - now := time.Now() + storageLastUpdate := r.KVClient.LastUpdateTime(r.key) for _, instance := range r.ringDesc.Ingesters { - if r.IsHealthy(&instance, op, now) { + if r.IsHealthy(&instance, op, storageLastUpdate) { healthyInstances = append(healthyInstances, instance) } else { zoneFailures[instance.Zone] = struct{}{} @@ -559,7 +559,7 @@ func (r *Ring) updateRingMetrics(compareResult CompareResult) { for _, instance := range r.ringDesc.Ingesters { s := instance.State.String() - if !r.IsHealthy(&instance, Reporting, time.Now()) { + if !r.IsHealthy(&instance, Reporting, r.KVClient.LastUpdateTime(r.key)) { s = unhealthy } numByState[s]++ @@ -744,6 +744,7 @@ func (r *Ring) shuffleShard(identifier string, size int, lookbackPeriod time.Dur ringTokens: shardDesc.GetTokens(), ringTokensByZone: shardTokensByZone, ringZones: getZones(shardTokensByZone), + KVClient: r.KVClient, // We reference the original map as is in order to avoid copying. It's safe to do // because this map is immutable by design and it's a superset of the actual instances diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.pb.go b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.pb.go index 453ac4c2e..bba74142c 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.pb.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.pb.go @@ -210,34 +210,33 @@ func init() { func init() { proto.RegisterFile("ring.proto", fileDescriptor_26381ed67e202a6e) } var fileDescriptor_26381ed67e202a6e = []byte{ - // 425 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x92, 0xc1, 0x6e, 0xd3, 0x40, - 0x18, 0x84, 0xf7, 0x8f, 0x37, 0xae, 0xf3, 0x87, 0x56, 0xd6, 0x16, 0x21, 0x53, 0xa1, 0xc5, 0xea, - 0xc9, 0x20, 0xe1, 0x8a, 0xc0, 0x01, 0x21, 0x71, 0x68, 0xa9, 0x41, 0xb6, 0xa2, 0x50, 0x99, 0xa8, - 0x57, 0xe4, 0x24, 0x8b, 0xb1, 0x4a, 0xec, 0xca, 0xde, 0x20, 0x95, 0x13, 0x8f, 0xc0, 0x0b, 0x70, - 0xe7, 0x51, 0x7a, 0xcc, 0x09, 0xf5, 0x84, 0x88, 0x73, 0xe1, 0xd8, 0x47, 0x40, 0xbb, 0x6e, 0x65, - 0x72, 0x9b, 0xcf, 0x33, 0xff, 0x8c, 0x2d, 0x19, 0xb1, 0xcc, 0xf2, 0xd4, 0x3f, 0x2f, 0x0b, 0x59, - 0x30, 0xaa, 0xf4, 0xde, 0x93, 0x34, 0x93, 0x9f, 0x16, 0x13, 0x7f, 0x5a, 0xcc, 0x0f, 0xd2, 0x22, - 0x2d, 0x0e, 0xb4, 0x39, 0x59, 0x7c, 0xd4, 0xa4, 0x41, 0xab, 0xe6, 0x68, 0xff, 0x07, 0x20, 0x3d, - 0x16, 0xd5, 0x94, 0xbd, 0xc2, 0x5e, 0x96, 0xa7, 0xa2, 0x92, 0xa2, 0xac, 0x1c, 0x70, 0x0d, 0xaf, - 0x3f, 0xb8, 0xef, 0xeb, 0x76, 0x65, 0xfb, 0xe1, 0xad, 0x17, 0xe4, 0xb2, 0xbc, 0x38, 0xa2, 0x97, - 0xbf, 0x1f, 0x92, 0xb8, 0xbd, 0xd8, 0x3b, 0xc1, 0x9d, 0xcd, 0x08, 0xb3, 0xd1, 0x38, 0x13, 0x17, - 0x0e, 0xb8, 0xe0, 0xf5, 0x62, 0x25, 0x99, 0x87, 0xdd, 0x2f, 0xc9, 0xe7, 0x85, 0x70, 0x3a, 0x2e, - 0x78, 0xfd, 0x01, 0x6b, 0xea, 0xc3, 0xbc, 0x92, 0x49, 0x3e, 0x15, 0x6a, 0x26, 0x6e, 0x02, 0x2f, - 0x3b, 0x2f, 0x20, 0xa2, 0x56, 0xc7, 0x36, 0xf6, 0x7f, 0x01, 0xde, 0xf9, 0x3f, 0xc1, 0x18, 0xd2, - 0x64, 0x36, 0x2b, 0x6f, 0x7a, 0xb5, 0x66, 0x0f, 0xb0, 0x27, 0xb3, 0xb9, 0xa8, 0x64, 0x32, 0x3f, - 0xd7, 0xe5, 0x46, 0xdc, 0x3e, 0x60, 0x8f, 0xb0, 0x5b, 0xc9, 0x44, 0x0a, 0xc7, 0x70, 0xc1, 0xdb, - 0x19, 0xec, 0x6e, 0xce, 0xbe, 0x57, 0x56, 0xdc, 0x24, 0xd8, 0x3d, 0x34, 0x65, 0x71, 0x26, 0xf2, - 0xca, 0x31, 0x5d, 0xc3, 0xdb, 0x8e, 0x6f, 0x48, 0x8d, 0x7e, 0x2d, 0x72, 0xe1, 0x6c, 0x35, 0xa3, - 0x4a, 0xb3, 0xa7, 0x78, 0xb7, 0x14, 0x69, 0xa6, 0xbe, 0x58, 0xcc, 0x3e, 0xb4, 0xfb, 0x96, 0xde, - 0xdf, 0x6d, 0xbd, 0xf1, 0xad, 0x15, 0x51, 0x8b, 0xda, 0xdd, 0x88, 0x5a, 0x5d, 0xdb, 0x7c, 0x3c, - 0xc4, 0xed, 0x8d, 0x57, 0x60, 0x88, 0xe6, 0xe1, 0xeb, 0x71, 0x78, 0x1a, 0xd8, 0x84, 0xf5, 0x71, - 0x6b, 0x18, 0x1c, 0x9e, 0x86, 0xa3, 0xb7, 0x36, 0x28, 0x38, 0x09, 0x46, 0xc7, 0x0a, 0x3a, 0x0a, - 0xa2, 0x77, 0xe1, 0x48, 0x81, 0xc1, 0x2c, 0xa4, 0xc3, 0xe0, 0xcd, 0xd8, 0xa6, 0x47, 0xcf, 0x97, - 0x2b, 0x4e, 0xae, 0x56, 0x9c, 0x5c, 0xaf, 0x38, 0x7c, 0xab, 0x39, 0xfc, 0xac, 0x39, 0x5c, 0xd6, - 0x1c, 0x96, 0x35, 0x87, 0x3f, 0x35, 0x87, 0xbf, 0x35, 0x27, 0xd7, 0x35, 0x87, 0xef, 0x6b, 0x4e, - 0x96, 0x6b, 0x4e, 0xae, 0xd6, 0x9c, 0x4c, 0x4c, 0xfd, 0x0f, 0x3c, 0xfb, 0x17, 0x00, 0x00, 0xff, - 0xff, 0x97, 0x76, 0x41, 0xaf, 0x46, 0x02, 0x00, 0x00, + // 409 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x92, 0xc1, 0x8a, 0xd3, 0x50, + 0x18, 0x85, 0xef, 0x9f, 0xdc, 0x64, 0xd2, 0xbf, 0xce, 0x10, 0xee, 0x0c, 0x12, 0x07, 0xb9, 0x86, + 0x59, 0x45, 0x17, 0x15, 0xab, 0x0b, 0x11, 0x5c, 0xcc, 0x38, 0x51, 0x12, 0x4a, 0x1d, 0x62, 0x99, + 0xad, 0xc4, 0xe9, 0x25, 0x84, 0xb1, 0x49, 0x49, 0xae, 0x42, 0x5d, 0xf9, 0x08, 0xbe, 0x80, 0x7b, + 0x1f, 0xa5, 0xcb, 0xae, 0xa4, 0x2b, 0xb1, 0xe9, 0xc6, 0x65, 0x1f, 0x41, 0x6e, 0xd2, 0x92, 0xe9, + 0xee, 0x9c, 0xff, 0x9c, 0x9c, 0x2f, 0x81, 0x20, 0x16, 0x69, 0x96, 0xf4, 0xa6, 0x45, 0x2e, 0x73, + 0x46, 0x95, 0x3e, 0x3d, 0x49, 0xf2, 0x24, 0xaf, 0x0f, 0x4f, 0x95, 0x6a, 0xb2, 0xb3, 0x9f, 0x80, + 0xf4, 0x52, 0x94, 0x37, 0xec, 0x35, 0x76, 0xd2, 0x2c, 0x11, 0xa5, 0x14, 0x45, 0xe9, 0x80, 0xab, + 0x7b, 0xdd, 0xfe, 0x83, 0x5e, 0x3d, 0xa2, 0xe2, 0x5e, 0xb0, 0xcb, 0xfc, 0x4c, 0x16, 0xb3, 0x0b, + 0x3a, 0xff, 0xf3, 0x88, 0x44, 0xed, 0x13, 0xa7, 0x57, 0x78, 0xb4, 0x5f, 0x61, 0x36, 0xea, 0xb7, + 0x62, 0xe6, 0x80, 0x0b, 0x5e, 0x27, 0x52, 0x92, 0x79, 0x68, 0x7c, 0x8d, 0x3f, 0x7f, 0x11, 0x8e, + 0xe6, 0x82, 0xd7, 0xed, 0xb3, 0x66, 0x3e, 0xc8, 0x4a, 0x19, 0x67, 0x37, 0x42, 0x61, 0xa2, 0xa6, + 0xf0, 0x4a, 0x7b, 0x09, 0x21, 0xb5, 0x34, 0x5b, 0x3f, 0xfb, 0x0d, 0x78, 0xef, 0x6e, 0x83, 0x31, + 0xa4, 0xf1, 0x78, 0x5c, 0x6c, 0x77, 0x6b, 0xcd, 0x1e, 0x62, 0x47, 0xa6, 0x13, 0x51, 0xca, 0x78, + 0x32, 0xad, 0xc7, 0xf5, 0xa8, 0x3d, 0xb0, 0xc7, 0x68, 0x94, 0x32, 0x96, 0xc2, 0xd1, 0x5d, 0xf0, + 0x8e, 0xfa, 0xc7, 0xfb, 0xd8, 0x0f, 0x2a, 0x8a, 0x9a, 0x06, 0xbb, 0x8f, 0xa6, 0xcc, 0x6f, 0x45, + 0x56, 0x3a, 0xa6, 0xab, 0x7b, 0x87, 0xd1, 0xd6, 0x29, 0xe8, 0xb7, 0x3c, 0x13, 0xce, 0x41, 0x03, + 0x55, 0x9a, 0x3d, 0xc3, 0x93, 0x42, 0x24, 0xa9, 0xfa, 0x62, 0x31, 0xfe, 0xd8, 0xf2, 0xad, 0x9a, + 0x7f, 0xdc, 0x66, 0xa3, 0x5d, 0x14, 0x52, 0x8b, 0xda, 0x46, 0x48, 0x2d, 0xc3, 0x36, 0x9f, 0x0c, + 0xf0, 0x70, 0xef, 0x15, 0x18, 0xa2, 0x79, 0xfe, 0x66, 0x14, 0x5c, 0xfb, 0x36, 0x61, 0x5d, 0x3c, + 0x18, 0xf8, 0xe7, 0xd7, 0xc1, 0xf0, 0x9d, 0x0d, 0xca, 0x5c, 0xf9, 0xc3, 0x4b, 0x65, 0x34, 0x65, + 0xc2, 0xf7, 0xc1, 0x50, 0x19, 0x9d, 0x59, 0x48, 0x07, 0xfe, 0xdb, 0x91, 0x4d, 0x2f, 0x5e, 0x2c, + 0x56, 0x9c, 0x2c, 0x57, 0x9c, 0x6c, 0x56, 0x1c, 0xbe, 0x57, 0x1c, 0x7e, 0x55, 0x1c, 0xe6, 0x15, + 0x87, 0x45, 0xc5, 0xe1, 0x6f, 0xc5, 0xe1, 0x5f, 0xc5, 0xc9, 0xa6, 0xe2, 0xf0, 0x63, 0xcd, 0xc9, + 0x62, 0xcd, 0xc9, 0x72, 0xcd, 0xc9, 0x27, 0xb3, 0xfe, 0x07, 0x9e, 0xff, 0x0f, 0x00, 0x00, 0xff, + 0xff, 0xd3, 0x1c, 0x09, 0x3a, 0x2d, 0x02, 0x00, 0x00, } func (x InstanceState) String() string { diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.proto b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.proto index 8f464dc2c..5dfeea8fa 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.proto +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package ring; -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "gogoproto/gogo.proto"; option (gogoproto.marshaler_all) = true; option (gogoproto.unmarshaler_all) = true; diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/util/string_utils.go b/vendor/github.com/cortexproject/cortex/pkg/ring/util/string_utils.go deleted file mode 100644 index 39868e1d1..000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/util/string_utils.go +++ /dev/null @@ -1,12 +0,0 @@ -package util - -// StringsContain returns true if the search value is within the list of input values. -func StringsContain(values []string, search string) bool { - for _, v := range values { - if search == v { - return true - } - } - - return false -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/s3/bucket_client.go b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/s3/bucket_client.go index 89d101015..ed76d835e 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/s3/bucket_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/s3/bucket_client.go @@ -1,12 +1,24 @@ package s3 import ( + "context" + "fmt" + "io" + "time" + "github.com/go-kit/log" + "github.com/go-kit/log/level" "github.com/prometheus/common/model" "github.com/thanos-io/objstore" "github.com/thanos-io/objstore/providers/s3" + + "github.com/cortexproject/cortex/pkg/util/backoff" ) +var defaultOperationRetries = 5 +var defaultRetryMinBackoff = 5 * time.Second +var defaultRetryMaxBackoff = 1 * time.Minute + // NewBucketClient creates a new S3 bucket client func NewBucketClient(cfg Config, name string, logger log.Logger) (objstore.Bucket, error) { s3Cfg, err := newS3Config(cfg) @@ -14,7 +26,17 @@ func NewBucketClient(cfg Config, name string, logger log.Logger) (objstore.Bucke return nil, err } - return s3.NewBucketWithConfig(logger, s3Cfg, name) + bucket, err := s3.NewBucketWithConfig(logger, s3Cfg, name) + if err != nil { + return nil, err + } + return &BucketWithRetries{ + logger: logger, + bucket: bucket, + operationRetries: defaultOperationRetries, + retryMinBackoff: defaultRetryMinBackoff, + retryMaxBackoff: defaultRetryMaxBackoff, + }, nil } // NewBucketReaderClient creates a new S3 bucket client @@ -24,7 +46,17 @@ func NewBucketReaderClient(cfg Config, name string, logger log.Logger) (objstore return nil, err } - return s3.NewBucketWithConfig(logger, s3Cfg, name) + bucket, err := s3.NewBucketWithConfig(logger, s3Cfg, name) + if err != nil { + return nil, err + } + return &BucketWithRetries{ + logger: logger, + bucket: bucket, + operationRetries: defaultOperationRetries, + retryMinBackoff: defaultRetryMinBackoff, + retryMaxBackoff: defaultRetryMaxBackoff, + }, nil } func newS3Config(cfg Config) (s3.Config, error) { @@ -62,3 +94,106 @@ func newS3Config(cfg Config) (s3.Config, error) { AWSSDKAuth: cfg.AccessKeyID == "", }, nil } + +type BucketWithRetries struct { + logger log.Logger + bucket objstore.Bucket + operationRetries int + retryMinBackoff time.Duration + retryMaxBackoff time.Duration +} + +func (b *BucketWithRetries) retry(ctx context.Context, f func() error, operationInfo string) error { + var lastErr error + retries := backoff.New(ctx, backoff.Config{ + MinBackoff: b.retryMinBackoff, + MaxBackoff: b.retryMaxBackoff, + MaxRetries: b.operationRetries, + }) + for retries.Ongoing() { + lastErr = f() + if lastErr == nil { + return nil + } + if b.bucket.IsObjNotFoundErr(lastErr) { + return lastErr + } + retries.Wait() + } + if lastErr != nil { + level.Error(b.logger).Log("msg", "bucket operation fail after retries", "err", lastErr, "operation", operationInfo) + return lastErr + } + return retries.Err() +} + +func (b *BucketWithRetries) Name() string { + return b.bucket.Name() +} + +func (b *BucketWithRetries) Iter(ctx context.Context, dir string, f func(string) error, options ...objstore.IterOption) error { + return b.retry(ctx, func() error { + return b.bucket.Iter(ctx, dir, f, options...) + }, fmt.Sprintf("Iter %s", dir)) +} + +func (b *BucketWithRetries) Get(ctx context.Context, name string) (reader io.ReadCloser, err error) { + err = b.retry(ctx, func() error { + reader, err = b.bucket.Get(ctx, name) + return err + }, fmt.Sprintf("Get %s", name)) + return +} + +func (b *BucketWithRetries) GetRange(ctx context.Context, name string, off, length int64) (closer io.ReadCloser, err error) { + err = b.retry(ctx, func() error { + closer, err = b.bucket.GetRange(ctx, name, off, length) + return err + }, fmt.Sprintf("GetRange %s (off: %d, length: %d)", name, off, length)) + return +} + +func (b *BucketWithRetries) Exists(ctx context.Context, name string) (exists bool, err error) { + err = b.retry(ctx, func() error { + exists, err = b.bucket.Exists(ctx, name) + return err + }, fmt.Sprintf("Exists %s", name)) + return +} + +func (b *BucketWithRetries) Upload(ctx context.Context, name string, r io.Reader) error { + rs, ok := r.(io.ReadSeeker) + if !ok { + // Skip retry if incoming Reader is not seekable to avoid + // loading entire content into memory + return b.bucket.Upload(ctx, name, r) + } + return b.retry(ctx, func() error { + if _, err := rs.Seek(0, io.SeekStart); err != nil { + return err + } + return b.bucket.Upload(ctx, name, rs) + }, fmt.Sprintf("Upload %s", name)) +} + +func (b *BucketWithRetries) Attributes(ctx context.Context, name string) (attributes objstore.ObjectAttributes, err error) { + err = b.retry(ctx, func() error { + attributes, err = b.bucket.Attributes(ctx, name) + return err + }, fmt.Sprintf("Attributes %s", name)) + return +} + +func (b *BucketWithRetries) Delete(ctx context.Context, name string) error { + return b.retry(ctx, func() error { + return b.bucket.Delete(ctx, name) + }, fmt.Sprintf("Delete %s", name)) +} + +func (b *BucketWithRetries) IsObjNotFoundErr(err error) bool { + return b.bucket.IsObjNotFoundErr(err) +} + +func (b *BucketWithRetries) Close() error { + return b.bucket.Close() +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/caching_bucket.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/caching_bucket.go index a0454adc1..b468e31d5 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/caching_bucket.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/caching_bucket.go @@ -23,25 +23,26 @@ import ( const ( CacheBackendMemcached = "memcached" + CacheBackendRedis = "redis" ) type CacheBackend struct { Backend string `yaml:"backend"` Memcached MemcachedClientConfig `yaml:"memcached"` + Redis RedisClientConfig `yaml:"redis"` } // Validate the config. func (cfg *CacheBackend) Validate() error { - if cfg.Backend != "" && cfg.Backend != CacheBackendMemcached { + switch cfg.Backend { + case CacheBackendMemcached: + return cfg.Memcached.Validate() + case CacheBackendRedis: + return cfg.Redis.Validate() + case "": + default: return fmt.Errorf("unsupported cache backend: %s", cfg.Backend) } - - if cfg.Backend == CacheBackendMemcached { - if err := cfg.Memcached.Validate(); err != nil { - return err - } - } - return nil } @@ -58,6 +59,7 @@ func (cfg *ChunksCacheConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix st f.StringVar(&cfg.Backend, prefix+"backend", "", fmt.Sprintf("Backend for chunks cache, if not empty. Supported values: %s.", CacheBackendMemcached)) cfg.Memcached.RegisterFlagsWithPrefix(f, prefix+"memcached.") + cfg.Redis.RegisterFlagsWithPrefix(f, prefix+"redis.") f.Int64Var(&cfg.SubrangeSize, prefix+"subrange-size", 16000, "Size of each subrange that bucket object is split into for better caching.") f.IntVar(&cfg.MaxGetRangeRequests, prefix+"max-get-range-requests", 3, "Maximum number of sub-GetRange requests that a single GetRange request can be split into when fetching chunks. Zero or negative value = unlimited number of sub-requests.") @@ -89,6 +91,7 @@ func (cfg *MetadataCacheConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix f.StringVar(&cfg.Backend, prefix+"backend", "", fmt.Sprintf("Backend for metadata cache, if not empty. Supported values: %s.", CacheBackendMemcached)) cfg.Memcached.RegisterFlagsWithPrefix(f, prefix+"memcached.") + cfg.Redis.RegisterFlagsWithPrefix(f, prefix+"redis.") f.DurationVar(&cfg.TenantsListTTL, prefix+"tenants-list-ttl", 15*time.Minute, "How long to cache list of tenants in the bucket.") f.DurationVar(&cfg.TenantBlocksListTTL, prefix+"tenant-blocks-list-ttl", 5*time.Minute, "How long to cache list of blocks for each tenant.") @@ -111,7 +114,7 @@ func CreateCachingBucket(chunksConfig ChunksCacheConfig, metadataConfig Metadata cfg := cache.NewCachingBucketConfig() cachingConfigured := false - chunksCache, err := createCache("chunks-cache", chunksConfig.Backend, chunksConfig.Memcached, logger, reg) + chunksCache, err := createCache("chunks-cache", &chunksConfig.CacheBackend, logger, reg) if err != nil { return nil, errors.Wrapf(err, "chunks-cache") } @@ -121,7 +124,7 @@ func CreateCachingBucket(chunksConfig ChunksCacheConfig, metadataConfig Metadata cfg.CacheGetRange("chunks", chunksCache, isTSDBChunkFile, chunksConfig.SubrangeSize, chunksConfig.AttributesTTL, chunksConfig.SubrangeTTL, chunksConfig.MaxGetRangeRequests) } - metadataCache, err := createCache("metadata-cache", metadataConfig.Backend, metadataConfig.Memcached, logger, reg) + metadataCache, err := createCache("metadata-cache", &metadataConfig.CacheBackend, logger, reg) if err != nil { return nil, errors.Wrapf(err, "metadata-cache") } @@ -149,22 +152,29 @@ func CreateCachingBucket(chunksConfig ChunksCacheConfig, metadataConfig Metadata return storecache.NewCachingBucket(bkt, cfg, logger, reg) } -func createCache(cacheName string, backend string, memcached MemcachedClientConfig, logger log.Logger, reg prometheus.Registerer) (cache.Cache, error) { - switch backend { +func createCache(cacheName string, cacheBackend *CacheBackend, logger log.Logger, reg prometheus.Registerer) (cache.Cache, error) { + switch cacheBackend.Backend { case "": // No caching. return nil, nil case CacheBackendMemcached: var client cacheutil.MemcachedClient - client, err := cacheutil.NewMemcachedClientWithConfig(logger, cacheName, memcached.ToMemcachedClientConfig(), reg) + client, err := cacheutil.NewMemcachedClientWithConfig(logger, cacheName, cacheBackend.Memcached.ToMemcachedClientConfig(), reg) if err != nil { return nil, errors.Wrapf(err, "failed to create memcached client") } return cache.NewMemcachedCache(cacheName, logger, client, reg), nil + case CacheBackendRedis: + redisCache, err := cacheutil.NewRedisClientWithConfig(logger, cacheName, cacheBackend.Redis.ToRedisClientConfig(), reg) + if err != nil { + return nil, errors.Wrapf(err, "failed to create redis client") + } + return cache.NewRedisCache(cacheName, logger, redisCache, reg), nil + default: - return nil, errors.Errorf("unsupported cache type for cache %s: %s", cacheName, backend) + return nil, errors.Errorf("unsupported cache type for cache %s: %s", cacheName, cacheBackend.Backend) } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go index 2fcc0cded..48753108f 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go @@ -8,8 +8,9 @@ import ( "github.com/alecthomas/units" "github.com/pkg/errors" + "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb/chunks" - "github.com/prometheus/prometheus/tsdb/wal" + "github.com/prometheus/prometheus/tsdb/wlog" "github.com/thanos-io/thanos/pkg/store" "github.com/cortexproject/cortex/pkg/storage/bucket" @@ -24,10 +25,6 @@ const ( // set when shipping blocks to the storage. IngesterIDExternalLabel = "__ingester_id__" - // ShardIDExternalLabel is the external label containing the shard ID - // and can be used to shard blocks. - ShardIDExternalLabel = "__shard_id__" - // How often are open TSDBs checked for being idle and closed. DefaultCloseIdleTSDBInterval = 5 * time.Minute @@ -49,6 +46,7 @@ var ( errInvalidCompactionConcurrency = errors.New("invalid TSDB compaction concurrency") errInvalidWALSegmentSizeBytes = errors.New("invalid TSDB WAL segment size bytes") errInvalidStripeSize = errors.New("invalid TSDB stripe size") + errInvalidOutOfOrderCapMax = errors.New("invalid TSDB OOO chunks capacity (in samples)") errEmptyBlockranges = errors.New("empty block ranges for TSDB") ) @@ -136,6 +134,8 @@ type TSDBConfig struct { WALSegmentSizeBytes int `yaml:"wal_segment_size_bytes"` FlushBlocksOnShutdown bool `yaml:"flush_blocks_on_shutdown"` CloseIdleTSDBTimeout time.Duration `yaml:"close_idle_tsdb_timeout"` + // The size of the in-memory queue used before flushing chunks to the disk. + HeadChunksWriteQueueSize int `yaml:"head_chunks_write_queue_size"` // MaxTSDBOpeningConcurrencyOnStartup limits the number of concurrently opening TSDB's during startup. MaxTSDBOpeningConcurrencyOnStartup int `yaml:"max_tsdb_opening_concurrency_on_startup"` @@ -147,8 +147,14 @@ type TSDBConfig struct { // How often to check for idle TSDBs for closing. DefaultCloseIdleTSDBInterval is not suitable for testing, so tests can override. CloseIdleTSDBInterval time.Duration `yaml:"-"` - // Positive value enables experiemental support for exemplars. 0 or less to disable. + // Positive value enables experimental support for exemplars. 0 or less to disable. MaxExemplars int `yaml:"max_exemplars"` + + // Enable snapshotting of in-memory TSDB data on disk when shutting down. + MemorySnapshotOnShutdown bool `yaml:"memory_snapshot_on_shutdown"` + + // OutOfOrderCapMax is maximum capacity for OOO chunks (in samples). + OutOfOrderCapMax int64 `yaml:"out_of_order_cap_max"` } // RegisterFlags registers the TSDBConfig flags. @@ -169,10 +175,13 @@ func (cfg *TSDBConfig) RegisterFlags(f *flag.FlagSet) { f.IntVar(&cfg.HeadChunksWriteBufferSize, "blocks-storage.tsdb.head-chunks-write-buffer-size-bytes", chunks.DefaultWriteBufferSize, "The write buffer size used by the head chunks mapper. Lower values reduce memory utilisation on clusters with a large number of tenants at the cost of increased disk I/O operations.") f.IntVar(&cfg.StripeSize, "blocks-storage.tsdb.stripe-size", 16384, "The number of shards of series to use in TSDB (must be a power of 2). Reducing this will decrease memory footprint, but can negatively impact performance.") f.BoolVar(&cfg.WALCompressionEnabled, "blocks-storage.tsdb.wal-compression-enabled", false, "True to enable TSDB WAL compression.") - f.IntVar(&cfg.WALSegmentSizeBytes, "blocks-storage.tsdb.wal-segment-size-bytes", wal.DefaultSegmentSize, "TSDB WAL segments files max size (bytes).") + f.IntVar(&cfg.WALSegmentSizeBytes, "blocks-storage.tsdb.wal-segment-size-bytes", wlog.DefaultSegmentSize, "TSDB WAL segments files max size (bytes).") f.BoolVar(&cfg.FlushBlocksOnShutdown, "blocks-storage.tsdb.flush-blocks-on-shutdown", false, "True to flush blocks to storage on shutdown. If false, incomplete blocks will be reused after restart.") f.DurationVar(&cfg.CloseIdleTSDBTimeout, "blocks-storage.tsdb.close-idle-tsdb-timeout", 0, "If TSDB has not received any data for this duration, and all blocks from TSDB have been shipped, TSDB is closed and deleted from local disk. If set to positive value, this value should be equal or higher than -querier.query-ingesters-within flag to make sure that TSDB is not closed prematurely, which could cause partial query results. 0 or negative value disables closing of idle TSDB.") - f.IntVar(&cfg.MaxExemplars, "blocks-storage.tsdb.max-exemplars", 0, "Enables support for exemplars in TSDB and sets the maximum number that will be stored. 0 or less means disabled.") + f.IntVar(&cfg.HeadChunksWriteQueueSize, "blocks-storage.tsdb.head-chunks-write-queue-size", chunks.DefaultWriteQueueSize, "The size of the in-memory queue used before flushing chunks to the disk.") + f.IntVar(&cfg.MaxExemplars, "blocks-storage.tsdb.max-exemplars", 0, "Deprecated, use maxExemplars in limits instead. If the MaxExemplars value in limits is set to zero, cortex will fallback on this value. This setting enables support for exemplars in TSDB and sets the maximum number that will be stored. 0 or less means disabled.") + f.BoolVar(&cfg.MemorySnapshotOnShutdown, "blocks-storage.tsdb.memory-snapshot-on-shutdown", false, "True to enable snapshotting of in-memory TSDB data on disk when shutting down.") + f.Int64Var(&cfg.OutOfOrderCapMax, "blocks-storage.tsdb.out-of-order-cap-max", tsdb.DefaultOutOfOrderCapMax, "[EXPERIMENTAL] Configures the maximum number of samples per chunk that can be out-of-order.") } // Validate the config. @@ -209,6 +218,10 @@ func (cfg *TSDBConfig) Validate() error { return errInvalidWALSegmentSizeBytes } + if cfg.OutOfOrderCapMax <= 0 { + return errInvalidOutOfOrderCapMax + } + return nil } @@ -236,6 +249,7 @@ type BucketStoreConfig struct { ChunksCache ChunksCacheConfig `yaml:"chunks_cache"` MetadataCache MetadataCacheConfig `yaml:"metadata_cache"` IgnoreDeletionMarksDelay time.Duration `yaml:"ignore_deletion_mark_delay"` + IgnoreBlocksWithin time.Duration `yaml:"ignore_blocks_within"` BucketIndex BucketIndexConfig `yaml:"bucket_index"` // Chunk pool. @@ -279,6 +293,7 @@ func (cfg *BucketStoreConfig) RegisterFlags(f *flag.FlagSet) { f.DurationVar(&cfg.IgnoreDeletionMarksDelay, "blocks-storage.bucket-store.ignore-deletion-marks-delay", time.Hour*6, "Duration after which the blocks marked for deletion will be filtered out while fetching blocks. "+ "The idea of ignore-deletion-marks-delay is to ignore blocks that are marked for deletion with some delay. This ensures store can still serve blocks that are meant to be deleted but do not have a replacement yet. "+ "Default is 6h, half of the default value for -compactor.deletion-delay.") + f.DurationVar(&cfg.IgnoreBlocksWithin, "blocks-storage.bucket-store.ignore-blocks-within", 0, "The blocks created since `now() - ignore_blocks_within` will not be synced. This should be used together with `-querier.query-store-after` to filter out the blocks that are too new to be queried. A reasonable value for this flag would be `-querier.query-store-after - blocks-storage.bucket-store.bucket-index.max-stale-period` to give some buffer. 0 to disable.") f.IntVar(&cfg.PostingOffsetsInMemSampling, "blocks-storage.bucket-store.posting-offsets-in-mem-sampling", store.DefaultPostingOffsetInMemorySampling, "Controls what is the ratio of postings offsets that the store will hold in memory.") f.BoolVar(&cfg.IndexHeaderLazyLoadingEnabled, "blocks-storage.bucket-store.index-header-lazy-loading-enabled", false, "If enabled, store-gateway will lazily memory-map an index-header only once required by a query.") f.DurationVar(&cfg.IndexHeaderLazyLoadingIdleTimeout, "blocks-storage.bucket-store.index-header-lazy-loading-idle-timeout", 20*time.Minute, "If index-header lazy loading is enabled and this setting is > 0, the store-gateway will release memory-mapped index-headers after 'idle timeout' inactivity.") diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/index_cache.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/index_cache.go index 60b347d14..ca4a54842 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/index_cache.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/index_cache.go @@ -23,6 +23,9 @@ const ( // IndexCacheBackendMemcached is the value for the memcached index cache backend. IndexCacheBackendMemcached = "memcached" + // IndexCacheBackendRedis is the value for the redis index cache backend. + IndexCacheBackendRedis = "redis" + // IndexCacheBackendDefault is the value for the default index cache backend. IndexCacheBackendDefault = IndexCacheBackendInMemory @@ -30,7 +33,7 @@ const ( ) var ( - supportedIndexCacheBackends = []string{IndexCacheBackendInMemory, IndexCacheBackendMemcached} + supportedIndexCacheBackends = []string{IndexCacheBackendInMemory, IndexCacheBackendMemcached, IndexCacheBackendRedis} errUnsupportedIndexCacheBackend = errors.New("unsupported index cache backend") errNoIndexCacheAddresses = errors.New("no index cache backend addresses") @@ -40,6 +43,7 @@ type IndexCacheConfig struct { Backend string `yaml:"backend"` InMemory InMemoryIndexCacheConfig `yaml:"inmemory"` Memcached MemcachedClientConfig `yaml:"memcached"` + Redis RedisClientConfig `yaml:"redis"` } func (cfg *IndexCacheConfig) RegisterFlags(f *flag.FlagSet) { @@ -51,6 +55,7 @@ func (cfg *IndexCacheConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix str cfg.InMemory.RegisterFlagsWithPrefix(f, prefix+"inmemory.") cfg.Memcached.RegisterFlagsWithPrefix(f, prefix+"memcached.") + cfg.Redis.RegisterFlagsWithPrefix(f, prefix+"redis.") } // Validate the config. @@ -63,6 +68,10 @@ func (cfg *IndexCacheConfig) Validate() error { if err := cfg.Memcached.Validate(); err != nil { return err } + } else if cfg.Backend == IndexCacheBackendRedis { + if err := cfg.Redis.Validate(); err != nil { + return err + } } return nil @@ -83,6 +92,8 @@ func NewIndexCache(cfg IndexCacheConfig, logger log.Logger, registerer prometheu return newInMemoryIndexCache(cfg.InMemory, logger, registerer) case IndexCacheBackendMemcached: return newMemcachedIndexCache(cfg.Memcached, logger, registerer) + case IndexCacheBackendRedis: + return newRedisIndexCache(cfg.Redis, logger, registerer) default: return nil, errUnsupportedIndexCacheBackend } @@ -111,3 +122,12 @@ func newMemcachedIndexCache(cfg MemcachedClientConfig, logger log.Logger, regist return storecache.NewMemcachedIndexCache(logger, client, registerer) } + +func newRedisIndexCache(cfg RedisClientConfig, logger log.Logger, registerer prometheus.Registerer) (storecache.IndexCache, error) { + client, err := cacheutil.NewRedisClientWithConfig(logger, "index-cache", cfg.ToRedisClientConfig(), registerer) + if err != nil { + return nil, errors.Wrapf(err, "create index cache redis client") + } + + return storecache.NewRemoteIndexCache(logger, client, registerer) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/redis_client_config.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/redis_client_config.go new file mode 100644 index 000000000..9c0937d43 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/redis_client_config.go @@ -0,0 +1,107 @@ +package tsdb + +import ( + "flag" + "time" + + "github.com/pkg/errors" + "github.com/thanos-io/thanos/pkg/cacheutil" + + "github.com/cortexproject/cortex/pkg/util/tls" +) + +type RedisClientConfig struct { + Addresses string `yaml:"addresses"` + Username string `yaml:"username"` + Password string `yaml:"password"` + DB int `yaml:"db"` + MasterName string `yaml:"master_name"` + + PoolSize int `yaml:"pool_size"` + MinIdleConns int `yaml:"min_idle_conns"` + MaxGetMultiConcurrency int `yaml:"max_get_multi_concurrency"` + GetMultiBatchSize int `yaml:"get_multi_batch_size"` + MaxSetMultiConcurrency int `yaml:"max_set_multi_concurrency"` + SetMultiBatchSize int `yaml:"set_multi_batch_size"` + + DialTimeout time.Duration `yaml:"dial_timeout"` + ReadTimeout time.Duration `yaml:"read_timeout"` + WriteTimeout time.Duration `yaml:"write_timeout"` + IdleTimeout time.Duration `yaml:"idle_timeout"` + MaxConnAge time.Duration `yaml:"max_conn_age"` + + TLSEnabled bool `yaml:"tls_enabled"` + TLS tls.ClientConfig `yaml:",inline"` + + // If not zero then client-side caching is enabled. + // Client-side caching is when data is stored in memory + // instead of fetching data each time. + // See https://redis.io/docs/manual/client-side-caching/ for info. + CacheSize int `yaml:"cache_size"` +} + +func (cfg *RedisClientConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix string) { + f.StringVar(&cfg.Addresses, prefix+"addresses", "", "Comma separated list of redis addresses. Supported prefixes are: dns+ (looked up as an A/AAAA query), dnssrv+ (looked up as a SRV query, dnssrvnoa+ (looked up as a SRV query, with no A/AAAA lookup made after that).") + f.StringVar(&cfg.Username, prefix+"username", "", "Redis username.") + f.StringVar(&cfg.Password, prefix+"password", "", "Redis password.") + f.IntVar(&cfg.DB, prefix+"db", 0, "Database to be selected after connecting to the server.") + f.DurationVar(&cfg.DialTimeout, prefix+"dial-timeout", time.Second*5, "Client dial timeout.") + f.DurationVar(&cfg.ReadTimeout, prefix+"read-timeout", time.Second*3, "Client read timeout.") + f.DurationVar(&cfg.WriteTimeout, prefix+"write-timeout", time.Second*3, "Client write timeout.") + f.DurationVar(&cfg.IdleTimeout, prefix+"idle-timeout", time.Minute*5, "Amount of time after which client closes idle connections. Should be less than server's timeout. -1 disables idle timeout check.") + f.DurationVar(&cfg.MaxConnAge, prefix+"max-conn-age", 0, "Connection age at which client retires (closes) the connection. Default 0 is to not close aged connections.") + f.IntVar(&cfg.PoolSize, prefix+"pool-size", 100, "Maximum number of socket connections.") + f.IntVar(&cfg.MinIdleConns, prefix+"min-idle-conns", 10, "Specifies the minimum number of idle connections, which is useful when it is slow to establish new connections.") + f.IntVar(&cfg.MaxGetMultiConcurrency, prefix+"max-get-multi-concurrency", 100, "The maximum number of concurrent GetMulti() operations. If set to 0, concurrency is unlimited.") + f.IntVar(&cfg.GetMultiBatchSize, prefix+"get-multi-batch-size", 100, "The maximum size per batch for mget.") + f.IntVar(&cfg.MaxSetMultiConcurrency, prefix+"max-set-multi-concurrency", 100, "The maximum number of concurrent SetMulti() operations. If set to 0, concurrency is unlimited.") + f.IntVar(&cfg.SetMultiBatchSize, prefix+"set-multi-batch-size", 100, "The maximum size per batch for pipeline set.") + f.StringVar(&cfg.MasterName, prefix+"master-name", "", "Specifies the master's name. Must be not empty for Redis Sentinel.") + f.IntVar(&cfg.CacheSize, prefix+"cache-size", 0, "If not zero then client-side caching is enabled. Client-side caching is when data is stored in memory instead of fetching data each time. See https://redis.io/docs/manual/client-side-caching/ for more info.") + f.BoolVar(&cfg.TLSEnabled, prefix+"tls-enabled", false, "Whether to enable tls for redis connection.") + cfg.TLS.RegisterFlagsWithPrefix(prefix, f) +} + +// Validate the config. +func (cfg *RedisClientConfig) Validate() error { + if cfg.Addresses == "" { + return errNoIndexCacheAddresses + } + + if cfg.TLSEnabled { + if (cfg.TLS.CertPath != "") != (cfg.TLS.KeyPath != "") { + return errors.New("both client key and certificate must be provided") + } + } + + return nil +} + +func (cfg *RedisClientConfig) ToRedisClientConfig() cacheutil.RedisClientConfig { + return cacheutil.RedisClientConfig{ + Addr: cfg.Addresses, + Username: cfg.Username, + Password: cfg.Password, + DB: cfg.DB, + MasterName: cfg.MasterName, + DialTimeout: cfg.DialTimeout, + ReadTimeout: cfg.ReadTimeout, + WriteTimeout: cfg.WriteTimeout, + PoolSize: cfg.PoolSize, + MinIdleConns: cfg.MinIdleConns, + IdleTimeout: cfg.IdleTimeout, + MaxConnAge: cfg.MaxConnAge, + MaxGetMultiConcurrency: cfg.MaxGetMultiConcurrency, + GetMultiBatchSize: cfg.GetMultiBatchSize, + MaxSetMultiConcurrency: cfg.MaxSetMultiConcurrency, + SetMultiBatchSize: cfg.SetMultiBatchSize, + TLSEnabled: cfg.TLSEnabled, + TLSConfig: cacheutil.TLSConfig{ + CAFile: cfg.TLS.CAPath, + KeyFile: cfg.TLS.KeyPath, + CertFile: cfg.TLS.CertPath, + ServerName: cfg.TLS.ServerName, + InsecureSkipVerify: cfg.TLS.InsecureSkipVerify, + }, + } +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/grpcclient.go b/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/grpcclient.go index 9e848ab06..cfec645c9 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/grpcclient.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/grpcclient.go @@ -13,6 +13,8 @@ import ( "github.com/cortexproject/cortex/pkg/util/backoff" "github.com/cortexproject/cortex/pkg/util/grpcencoding/snappy" + "github.com/cortexproject/cortex/pkg/util/grpcencoding/snappyblock" + "github.com/cortexproject/cortex/pkg/util/grpcencoding/zstd" "github.com/cortexproject/cortex/pkg/util/tls" ) @@ -40,7 +42,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { f.IntVar(&cfg.MaxRecvMsgSize, prefix+".grpc-max-recv-msg-size", 100<<20, "gRPC client max receive message size (bytes).") f.IntVar(&cfg.MaxSendMsgSize, prefix+".grpc-max-send-msg-size", 16<<20, "gRPC client max send message size (bytes).") - f.StringVar(&cfg.GRPCCompression, prefix+".grpc-compression", "", "Use compression when sending messages. Supported values are: 'gzip', 'snappy' and '' (disable compression)") + f.StringVar(&cfg.GRPCCompression, prefix+".grpc-compression", "", "Use compression when sending messages. Supported values are: 'gzip', 'snappy', 'snappy-block' ,'zstd' and '' (disable compression)") f.Float64Var(&cfg.RateLimit, prefix+".grpc-client-rate-limit", 0., "Rate limit for gRPC client; 0 means disabled.") f.IntVar(&cfg.RateLimitBurst, prefix+".grpc-client-rate-limit-burst", 0, "Rate limit burst for gRPC client.") f.BoolVar(&cfg.BackoffOnRatelimits, prefix+".backoff-on-ratelimits", false, "Enable backoff and retry when we hit ratelimits.") @@ -53,7 +55,7 @@ func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { func (cfg *Config) Validate(log log.Logger) error { switch cfg.GRPCCompression { - case gzip.Name, snappy.Name, "": + case gzip.Name, snappy.Name, zstd.Name, snappyblock.Name, "": // valid default: return errors.Errorf("unsupported compression type: %s", cfg.GRPCCompression) diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/grpcencoding/snappyblock/snappyblock.go b/vendor/github.com/cortexproject/cortex/pkg/util/grpcencoding/snappyblock/snappyblock.go new file mode 100644 index 000000000..a40e8429d --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/util/grpcencoding/snappyblock/snappyblock.go @@ -0,0 +1,143 @@ +package snappyblock + +import ( + "bytes" + "io" + "sync" + + "github.com/golang/snappy" + "google.golang.org/grpc/encoding" +) + +// Name is the name registered for the snappy compressor. +const Name = "snappy-block" + +func init() { + encoding.RegisterCompressor(newCompressor()) +} + +type compressor struct { + writersPool sync.Pool + readersPool sync.Pool +} + +func newCompressor() *compressor { + c := &compressor{} + c.readersPool = sync.Pool{ + New: func() interface{} { + return &reader{ + pool: &c.readersPool, + cbuff: bytes.NewBuffer(make([]byte, 0, 512)), + } + }, + } + c.writersPool = sync.Pool{ + New: func() interface{} { + return &writeCloser{ + pool: &c.writersPool, + buff: bytes.NewBuffer(make([]byte, 0, 512)), + } + }, + } + return c +} + +func (c *compressor) Name() string { + return Name +} + +func (c *compressor) Compress(w io.Writer) (io.WriteCloser, error) { + wr := c.writersPool.Get().(*writeCloser) + wr.Reset(w) + return wr, nil +} + +func (c *compressor) Decompress(r io.Reader) (io.Reader, error) { + dr := c.readersPool.Get().(*reader) + err := dr.Reset(r) + if err != nil { + return nil, err + } + + return dr, nil +} + +// DecompressedSize If a Compressor implements DecompressedSize(compressedBytes []byte) int, +// gRPC will call it to determine the size of the buffer allocated for the +// result of decompression. +// Return -1 to indicate unknown size. +// +// This is an EXPERIMENTAL feature of grpc-go. +func (c *compressor) DecompressedSize(compressedBytes []byte) int { + decompressedSize, err := snappy.DecodedLen(compressedBytes) + if err != nil { + return -1 + } + return decompressedSize +} + +type writeCloser struct { + i io.Writer + pool *sync.Pool + buff *bytes.Buffer + + dst []byte +} + +func (w *writeCloser) Reset(i io.Writer) { + w.i = i +} + +func (w *writeCloser) Write(p []byte) (n int, err error) { + return w.buff.Write(p) +} + +func (w *writeCloser) Close() error { + defer func() { + w.buff.Reset() + w.dst = w.dst[0:cap(w.dst)] + w.pool.Put(w) + }() + + if w.i != nil { + w.dst = snappy.Encode(w.dst, w.buff.Bytes()) + _, err := w.i.Write(w.dst) + return err + } + + return nil +} + +type reader struct { + pool *sync.Pool + cbuff *bytes.Buffer + dbuff *bytes.Buffer + dst []byte +} + +func (r *reader) Reset(ir io.Reader) error { + _, err := r.cbuff.ReadFrom(ir) + + if err != nil { + return err + } + + r.dst, err = snappy.Decode(r.dst, r.cbuff.Bytes()) + + if err != nil { + return err + } + + r.dbuff = bytes.NewBuffer(r.dst) + return nil +} + +func (r *reader) Read(p []byte) (n int, err error) { + n, err = r.dbuff.Read(p) + if err == io.EOF { + r.cbuff.Reset() + r.dst = r.dst[0:cap(r.dst)] + r.pool.Put(r) + } + return n, err +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/grpcencoding/zstd/zstd.go b/vendor/github.com/cortexproject/cortex/pkg/util/grpcencoding/zstd/zstd.go new file mode 100644 index 000000000..c55c9f1a9 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/util/grpcencoding/zstd/zstd.go @@ -0,0 +1,86 @@ +package zstd + +import ( + "bytes" + "io" + + "github.com/klauspost/compress/zstd" + "google.golang.org/grpc/encoding" +) + +const Name = "zstd" + +type compressor struct { + encoder *zstd.Encoder + decoder *zstd.Decoder +} + +func init() { + encoding.RegisterCompressor(newCompressor()) +} + +func newCompressor() *compressor { + enc, _ := zstd.NewWriter(nil) + dec, _ := zstd.NewReader(nil) + c := &compressor{ + encoder: enc, + decoder: dec, + } + return c +} + +// SetLevel updates the registered compressor to use a particular compression +// level. NOTE: this function must only be called from an init function, and +// is not threadsafe. +func SetLevel(level zstd.EncoderLevel) error { + c := encoding.GetCompressor(Name).(*compressor) + + enc, err := zstd.NewWriter(nil, zstd.WithEncoderLevel(level)) + if err != nil { + return err + } + + c.encoder = enc + return nil +} + +func (c *compressor) Compress(w io.Writer) (io.WriteCloser, error) { + return &zstdWriteCloser{ + enc: c.encoder, + writer: w, + }, nil +} + +type zstdWriteCloser struct { + enc *zstd.Encoder + writer io.Writer // Compressed data will be written here. + buf bytes.Buffer // Buffer uncompressed data here, compress on Close. +} + +func (z *zstdWriteCloser) Write(p []byte) (int, error) { + return z.buf.Write(p) +} + +func (z *zstdWriteCloser) Close() error { + compressed := z.enc.EncodeAll(z.buf.Bytes(), nil) + _, err := io.Copy(z.writer, bytes.NewReader(compressed)) + return err +} + +func (c *compressor) Decompress(r io.Reader) (io.Reader, error) { + compressed, err := io.ReadAll(r) + if err != nil { + return nil, err + } + + uncompressed, err := c.decoder.DecodeAll(compressed, nil) + if err != nil { + return nil, err + } + + return bytes.NewReader(uncompressed), nil +} + +func (c *compressor) Name() string { + return Name +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go b/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go index 24869b155..39e49b2be 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go @@ -55,6 +55,7 @@ type Limits struct { EnforceMetricName bool `yaml:"enforce_metric_name" json:"enforce_metric_name"` IngestionTenantShardSize int `yaml:"ingestion_tenant_shard_size" json:"ingestion_tenant_shard_size"` MetricRelabelConfigs []*relabel.Config `yaml:"metric_relabel_configs,omitempty" json:"metric_relabel_configs,omitempty" doc:"nocli|description=List of metric relabel configurations. Note that in most situations, it is more effective to use metrics relabeling directly in the Prometheus server, e.g. remote_write.write_relabel_configs."` + MaxExemplars int `yaml:"max_exemplars" json:"max_exemplars"` // Ingester enforced limits. // Series @@ -63,11 +64,14 @@ type Limits struct { MaxLocalSeriesPerMetric int `yaml:"max_series_per_metric" json:"max_series_per_metric"` MaxGlobalSeriesPerUser int `yaml:"max_global_series_per_user" json:"max_global_series_per_user"` MaxGlobalSeriesPerMetric int `yaml:"max_global_series_per_metric" json:"max_global_series_per_metric"` + // Metadata MaxLocalMetricsWithMetadataPerUser int `yaml:"max_metadata_per_user" json:"max_metadata_per_user"` MaxLocalMetadataPerMetric int `yaml:"max_metadata_per_metric" json:"max_metadata_per_metric"` MaxGlobalMetricsWithMetadataPerUser int `yaml:"max_global_metadata_per_user" json:"max_global_metadata_per_user"` MaxGlobalMetadataPerMetric int `yaml:"max_global_metadata_per_metric" json:"max_global_metadata_per_metric"` + // Out-of-order + OutOfOrderTimeWindow model.Duration `yaml:"out_of_order_time_window" json:"out_of_order_time_window"` // Querier enforced limits. MaxChunksPerQuery int `yaml:"max_fetched_chunks_per_query" json:"max_fetched_chunks_per_query"` @@ -81,6 +85,9 @@ type Limits struct { MaxQueriersPerTenant int `yaml:"max_queriers_per_tenant" json:"max_queriers_per_tenant"` QueryVerticalShardSize int `yaml:"query_vertical_shard_size" json:"query_vertical_shard_size" doc:"hidden"` + // Query Frontend / Scheduler enforced limits. + MaxOutstandingPerTenant int `yaml:"max_outstanding_requests_per_tenant" json:"max_outstanding_requests_per_tenant"` + // Ruler defaults and limits. RulerEvaluationDelay model.Duration `yaml:"ruler_evaluation_delay_duration" json:"ruler_evaluation_delay_duration"` RulerTenantShardSize int `yaml:"ruler_tenant_shard_size" json:"ruler_tenant_shard_size"` @@ -144,6 +151,8 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { f.IntVar(&l.MaxLocalSeriesPerMetric, "ingester.max-series-per-metric", 50000, "The maximum number of active series per metric name, per ingester. 0 to disable.") f.IntVar(&l.MaxGlobalSeriesPerUser, "ingester.max-global-series-per-user", 0, "The maximum number of active series per user, across the cluster before replication. 0 to disable. Supported only if -distributor.shard-by-all-labels is true.") f.IntVar(&l.MaxGlobalSeriesPerMetric, "ingester.max-global-series-per-metric", 0, "The maximum number of active series per metric name, across the cluster before replication. 0 to disable.") + f.IntVar(&l.MaxExemplars, "ingester.max-exemplars", 0, "Enables support for exemplars in TSDB and sets the maximum number that will be stored. less than zero means disabled. If the value is set to zero, cortex will fallback to blocks-storage.tsdb.max-exemplars value.") + f.Var(&l.OutOfOrderTimeWindow, "ingester.out-of-order-time-window", "[Experimental] Configures the allowed time window for ingestion of out-of-order samples. Disabled (0s) by default.") f.IntVar(&l.MaxLocalMetricsWithMetadataPerUser, "ingester.max-metadata-per-user", 8000, "The maximum number of active metrics with metadata per user, per ingester. 0 to disable.") f.IntVar(&l.MaxLocalMetadataPerMetric, "ingester.max-metadata-per-metric", 10, "The maximum number of metadata per metric, per ingester. 0 to disable.") @@ -151,7 +160,7 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { f.IntVar(&l.MaxGlobalMetadataPerMetric, "ingester.max-global-metadata-per-metric", 0, "The maximum number of metadata per metric, across the cluster. 0 to disable.") f.IntVar(&l.MaxChunksPerQuery, "querier.max-fetched-chunks-per-query", 2000000, "Maximum number of chunks that can be fetched in a single query from ingesters and long-term storage. This limit is enforced in the querier, ruler and store-gateway. 0 to disable.") f.IntVar(&l.MaxFetchedSeriesPerQuery, "querier.max-fetched-series-per-query", 0, "The maximum number of unique series for which a query can fetch samples from each ingesters and blocks storage. This limit is enforced in the querier, ruler and store-gateway. 0 to disable") - f.IntVar(&l.MaxFetchedChunkBytesPerQuery, "querier.max-fetched-chunk-bytes-per-query", 0, "Deprecated (user max-fetched-data-bytes-per-query instead): The maximum size of all chunks in bytes that a query can fetch from each ingester and storage. This limit is enforced in the querier, ruler and store-gateway. 0 to disable.") + f.IntVar(&l.MaxFetchedChunkBytesPerQuery, "querier.max-fetched-chunk-bytes-per-query", 0, "Deprecated (use max-fetched-data-bytes-per-query instead): The maximum size of all chunks in bytes that a query can fetch from each ingester and storage. This limit is enforced in the querier, ruler and store-gateway. 0 to disable.") f.IntVar(&l.MaxFetchedDataBytesPerQuery, "querier.max-fetched-data-bytes-per-query", 0, "The maximum combined size of all data that a query can fetch from each ingester and storage. This limit is enforced in the querier and ruler for `query`, `query_range` and `series` APIs. 0 to disable.") f.Var(&l.MaxQueryLength, "store.max-query-length", "Limit the query time range (end - start time). This limit is enforced in the query-frontend (on the received query) and in the querier (on the query possibly split by the query-frontend). 0 to disable.") f.Var(&l.MaxQueryLookback, "querier.max-query-lookback", "Limit how long back data (series and metadata) can be queried, up until duration ago. This limit is enforced in the query-frontend, querier and ruler. If the requested time range is outside the allowed range, the request will not fail but will be manipulated to only query data within the allowed time range. 0 to disable.") @@ -161,6 +170,8 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { f.IntVar(&l.MaxQueriersPerTenant, "frontend.max-queriers-per-tenant", 0, "Maximum number of queriers that can handle requests for a single tenant. If set to 0 or value higher than number of available queriers, *all* queriers will handle requests for the tenant. Each frontend (or query-scheduler, if used) will select the same set of queriers for the same tenant (given that all queriers are connected to all frontends / query-schedulers). This option only works with queriers connecting to the query-frontend / query-scheduler, not when using downstream URL.") f.IntVar(&l.QueryVerticalShardSize, "frontend.query-vertical-shard-size", 0, "[Experimental] Number of shards to use when distributing shardable PromQL queries.") + f.IntVar(&l.MaxOutstandingPerTenant, "frontend.max-outstanding-requests-per-tenant", 100, "Maximum number of outstanding requests per tenant per request queue (either query frontend or query scheduler); requests beyond this error with HTTP 429.") + f.Var(&l.RulerEvaluationDelay, "ruler.evaluation-delay-duration", "Duration to delay the evaluation of rules to ensure the underlying metrics have been pushed to Cortex.") f.IntVar(&l.RulerTenantShardSize, "ruler.tenant-shard-size", 0, "The default tenant's shard size when the shuffle-sharding strategy is used by ruler. When this setting is specified in the per-tenant overrides, a value of 0 disables shuffle sharding for the tenant.") f.IntVar(&l.RulerMaxRulesPerRuleGroup, "ruler.max-rules-per-rule-group", 0, "Maximum number of rules per rule group per-tenant. 0 to disable.") @@ -381,6 +392,11 @@ func (o *Overrides) MaxGlobalSeriesPerUser(userID string) int { return o.GetOverridesForUser(userID).MaxGlobalSeriesPerUser } +// OutOfOrderTimeWindow returns the allowed time window for ingestion of out-of-order samples. +func (o *Overrides) OutOfOrderTimeWindow(userID string) model.Duration { + return o.GetOverridesForUser(userID).OutOfOrderTimeWindow +} + // MaxGlobalSeriesPerMetric returns the maximum number of series allowed per metric across the cluster. func (o *Overrides) MaxGlobalSeriesPerMetric(userID string) int { return o.GetOverridesForUser(userID).MaxGlobalSeriesPerMetric @@ -446,6 +462,12 @@ func (o *Overrides) MaxQueryParallelism(userID string) int { return o.GetOverridesForUser(userID).MaxQueryParallelism } +// MaxOutstandingPerTenant returns the limit to the maximum number +// of outstanding requests per tenant per request queue. +func (o *Overrides) MaxOutstandingPerTenant(userID string) int { + return o.GetOverridesForUser(userID).MaxOutstandingPerTenant +} + // EnforceMetricName whether to enforce the presence of a metric name. func (o *Overrides) EnforceMetricName(userID string) bool { return o.GetOverridesForUser(userID).EnforceMetricName @@ -553,6 +575,11 @@ func (o *Overrides) AlertmanagerReceiversBlockPrivateAddresses(user string) bool return o.GetOverridesForUser(user).AlertmanagerReceiversBlockPrivateAddresses } +// MaxExemplars gets the maximum number of exemplars that will be stored per user. 0 or less means disabled. +func (o *Overrides) MaxExemplars(userID string) int { + return o.GetOverridesForUser(userID).MaxExemplars +} + // Notification limits are special. Limits are returned in following order: // 1. per-tenant limits for given integration // 2. default limits for given integration diff --git a/vendor/github.com/dgryski/go-rendezvous/LICENSE b/vendor/github.com/dgryski/go-rendezvous/LICENSE deleted file mode 100644 index 22080f736..000000000 --- a/vendor/github.com/dgryski/go-rendezvous/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2017-2020 Damian Gryski - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/dgryski/go-rendezvous/rdv.go b/vendor/github.com/dgryski/go-rendezvous/rdv.go deleted file mode 100644 index 7a6f8203c..000000000 --- a/vendor/github.com/dgryski/go-rendezvous/rdv.go +++ /dev/null @@ -1,79 +0,0 @@ -package rendezvous - -type Rendezvous struct { - nodes map[string]int - nstr []string - nhash []uint64 - hash Hasher -} - -type Hasher func(s string) uint64 - -func New(nodes []string, hash Hasher) *Rendezvous { - r := &Rendezvous{ - nodes: make(map[string]int, len(nodes)), - nstr: make([]string, len(nodes)), - nhash: make([]uint64, len(nodes)), - hash: hash, - } - - for i, n := range nodes { - r.nodes[n] = i - r.nstr[i] = n - r.nhash[i] = hash(n) - } - - return r -} - -func (r *Rendezvous) Lookup(k string) string { - // short-circuit if we're empty - if len(r.nodes) == 0 { - return "" - } - - khash := r.hash(k) - - var midx int - var mhash = xorshiftMult64(khash ^ r.nhash[0]) - - for i, nhash := range r.nhash[1:] { - if h := xorshiftMult64(khash ^ nhash); h > mhash { - midx = i + 1 - mhash = h - } - } - - return r.nstr[midx] -} - -func (r *Rendezvous) Add(node string) { - r.nodes[node] = len(r.nstr) - r.nstr = append(r.nstr, node) - r.nhash = append(r.nhash, r.hash(node)) -} - -func (r *Rendezvous) Remove(node string) { - // find index of node to remove - nidx := r.nodes[node] - - // remove from the slices - l := len(r.nstr) - r.nstr[nidx] = r.nstr[l] - r.nstr = r.nstr[:l] - - r.nhash[nidx] = r.nhash[l] - r.nhash = r.nhash[:l] - - // update the map - delete(r.nodes, node) - moved := r.nstr[nidx] - r.nodes[moved] = nidx -} - -func xorshiftMult64(x uint64) uint64 { - x ^= x >> 12 // a - x ^= x << 25 // b - x ^= x >> 27 // c - return x * 2685821657736338717 -} diff --git a/vendor/github.com/dustin/go-humanize/.travis.yml b/vendor/github.com/dustin/go-humanize/.travis.yml index ba95cdd15..ac12e485a 100644 --- a/vendor/github.com/dustin/go-humanize/.travis.yml +++ b/vendor/github.com/dustin/go-humanize/.travis.yml @@ -1,12 +1,12 @@ sudo: false language: go +go_import_path: github.com/dustin/go-humanize go: - - 1.3.x - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x + - 1.13.x + - 1.14.x + - 1.15.x + - 1.16.x + - stable - master matrix: allow_failures: @@ -15,7 +15,7 @@ matrix: install: - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). script: - - go get -t -v ./... - diff -u <(echo -n) <(gofmt -d -s .) - - go tool vet . + - go vet . + - go install -v -race ./... - go test -v -race ./... diff --git a/vendor/github.com/dustin/go-humanize/README.markdown b/vendor/github.com/dustin/go-humanize/README.markdown index 91b4ae564..7d0b16b34 100644 --- a/vendor/github.com/dustin/go-humanize/README.markdown +++ b/vendor/github.com/dustin/go-humanize/README.markdown @@ -5,7 +5,7 @@ Just a few functions for helping humanize times and sizes. `go get` it as `github.com/dustin/go-humanize`, import it as `"github.com/dustin/go-humanize"`, use it as `humanize`. -See [godoc](https://godoc.org/github.com/dustin/go-humanize) for +See [godoc](https://pkg.go.dev/github.com/dustin/go-humanize) for complete documentation. ## Sizes diff --git a/vendor/github.com/dustin/go-humanize/bigbytes.go b/vendor/github.com/dustin/go-humanize/bigbytes.go index 1a2bf6172..3b015fd59 100644 --- a/vendor/github.com/dustin/go-humanize/bigbytes.go +++ b/vendor/github.com/dustin/go-humanize/bigbytes.go @@ -28,6 +28,10 @@ var ( BigZiByte = (&big.Int{}).Mul(BigEiByte, bigIECExp) // BigYiByte is 1,024 z bytes in bit.Ints BigYiByte = (&big.Int{}).Mul(BigZiByte, bigIECExp) + // BigRiByte is 1,024 y bytes in bit.Ints + BigRiByte = (&big.Int{}).Mul(BigYiByte, bigIECExp) + // BigQiByte is 1,024 r bytes in bit.Ints + BigQiByte = (&big.Int{}).Mul(BigRiByte, bigIECExp) ) var ( @@ -51,6 +55,10 @@ var ( BigZByte = (&big.Int{}).Mul(BigEByte, bigSIExp) // BigYByte is 1,000 SI z bytes in big.Ints BigYByte = (&big.Int{}).Mul(BigZByte, bigSIExp) + // BigRByte is 1,000 SI y bytes in big.Ints + BigRByte = (&big.Int{}).Mul(BigYByte, bigSIExp) + // BigQByte is 1,000 SI r bytes in big.Ints + BigQByte = (&big.Int{}).Mul(BigRByte, bigSIExp) ) var bigBytesSizeTable = map[string]*big.Int{ @@ -71,6 +79,10 @@ var bigBytesSizeTable = map[string]*big.Int{ "zb": BigZByte, "yib": BigYiByte, "yb": BigYByte, + "rib": BigRiByte, + "rb": BigRByte, + "qib": BigQiByte, + "qb": BigQByte, // Without suffix "": BigByte, "ki": BigKiByte, @@ -89,6 +101,10 @@ var bigBytesSizeTable = map[string]*big.Int{ "zi": BigZiByte, "y": BigYByte, "yi": BigYiByte, + "r": BigRByte, + "ri": BigRiByte, + "q": BigQByte, + "qi": BigQiByte, } var ten = big.NewInt(10) @@ -115,7 +131,7 @@ func humanateBigBytes(s, base *big.Int, sizes []string) string { // // BigBytes(82854982) -> 83 MB func BigBytes(s *big.Int) string { - sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} + sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB", "RB", "QB"} return humanateBigBytes(s, bigSIExp, sizes) } @@ -125,7 +141,7 @@ func BigBytes(s *big.Int) string { // // BigIBytes(82854982) -> 79 MiB func BigIBytes(s *big.Int) string { - sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} + sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB", "RiB", "QiB"} return humanateBigBytes(s, bigIECExp, sizes) } diff --git a/vendor/github.com/dustin/go-humanize/commaf.go b/vendor/github.com/dustin/go-humanize/commaf.go index 620690dec..2bc83a03c 100644 --- a/vendor/github.com/dustin/go-humanize/commaf.go +++ b/vendor/github.com/dustin/go-humanize/commaf.go @@ -1,3 +1,4 @@ +//go:build go1.6 // +build go1.6 package humanize diff --git a/vendor/github.com/dustin/go-humanize/ftoa.go b/vendor/github.com/dustin/go-humanize/ftoa.go index 1c62b640d..bce923f37 100644 --- a/vendor/github.com/dustin/go-humanize/ftoa.go +++ b/vendor/github.com/dustin/go-humanize/ftoa.go @@ -6,6 +6,9 @@ import ( ) func stripTrailingZeros(s string) string { + if !strings.ContainsRune(s, '.') { + return s + } offset := len(s) - 1 for offset > 0 { if s[offset] == '.' { diff --git a/vendor/github.com/dustin/go-humanize/number.go b/vendor/github.com/dustin/go-humanize/number.go index dec618659..6470d0d47 100644 --- a/vendor/github.com/dustin/go-humanize/number.go +++ b/vendor/github.com/dustin/go-humanize/number.go @@ -73,7 +73,7 @@ func FormatFloat(format string, n float64) string { if n > math.MaxFloat64 { return "Infinity" } - if n < -math.MaxFloat64 { + if n < (0.0 - math.MaxFloat64) { return "-Infinity" } diff --git a/vendor/github.com/dustin/go-humanize/si.go b/vendor/github.com/dustin/go-humanize/si.go index ae659e0e4..8b8501984 100644 --- a/vendor/github.com/dustin/go-humanize/si.go +++ b/vendor/github.com/dustin/go-humanize/si.go @@ -8,6 +8,8 @@ import ( ) var siPrefixTable = map[float64]string{ + -30: "q", // quecto + -27: "r", // ronto -24: "y", // yocto -21: "z", // zepto -18: "a", // atto @@ -25,6 +27,8 @@ var siPrefixTable = map[float64]string{ 18: "E", // exa 21: "Z", // zetta 24: "Y", // yotta + 27: "R", // ronna + 30: "Q", // quetta } var revSIPrefixTable = revfmap(siPrefixTable) diff --git a/vendor/github.com/efficientgo/core/COPYRIGHT b/vendor/github.com/efficientgo/core/COPYRIGHT new file mode 100644 index 000000000..0b803fd34 --- /dev/null +++ b/vendor/github.com/efficientgo/core/COPYRIGHT @@ -0,0 +1,2 @@ +Copyright (c) The EfficientGo Authors. +Licensed under the Apache License 2.0. \ No newline at end of file diff --git a/vendor/github.com/efficientgo/tools/core/LICENSE b/vendor/github.com/efficientgo/core/LICENSE similarity index 100% rename from vendor/github.com/efficientgo/tools/core/LICENSE rename to vendor/github.com/efficientgo/core/LICENSE diff --git a/vendor/github.com/efficientgo/tools/core/pkg/errcapture/do.go b/vendor/github.com/efficientgo/core/errcapture/do.go similarity index 89% rename from vendor/github.com/efficientgo/tools/core/pkg/errcapture/do.go rename to vendor/github.com/efficientgo/core/errcapture/do.go index c7176a475..40f9eb9d7 100644 --- a/vendor/github.com/efficientgo/tools/core/pkg/errcapture/do.go +++ b/vendor/github.com/efficientgo/core/errcapture/do.go @@ -10,11 +10,10 @@ package errcapture import ( "io" - "io/ioutil" "os" - "github.com/efficientgo/tools/core/pkg/merrors" - "github.com/pkg/errors" + "github.com/efficientgo/core/errors" + "github.com/efficientgo/core/merrors" ) type doFunc func() error @@ -38,7 +37,7 @@ func Do(err *error, doer doFunc, format string, a ...interface{}) { // ExhaustClose closes the io.ReadCloser with error capture but exhausts the reader before. func ExhaustClose(err *error, r io.ReadCloser, format string, a ...interface{}) { - _, copyErr := io.Copy(ioutil.Discard, r) + _, copyErr := io.Copy(io.Discard, r) Do(err, r.Close, format, a...) diff --git a/vendor/github.com/efficientgo/tools/core/pkg/errcapture/doc.go b/vendor/github.com/efficientgo/core/errcapture/doc.go similarity index 71% rename from vendor/github.com/efficientgo/tools/core/pkg/errcapture/doc.go rename to vendor/github.com/efficientgo/core/errcapture/doc.go index 96c1c2291..ab7f2ed82 100644 --- a/vendor/github.com/efficientgo/tools/core/pkg/errcapture/doc.go +++ b/vendor/github.com/efficientgo/core/errcapture/doc.go @@ -1,29 +1,43 @@ // Copyright (c) The EfficientGo Authors. // Licensed under the Apache License 2.0. -package errcapture - +// Package errcapture implements robust error handling in defer statements using provided return argument. +// // Close a `io.Closer` interface or execute any function that returns error safely while capturing error. // It's often forgotten but it's a caller responsibility to close all implementations of `Closer`, // such as *os.File or io.ReaderCloser. Commonly we would use: // -// defer closer.Close() +// defer closer.Close() // // This is wrong. Close() usually return important error (e.g for os.File the actual file flush might happen and fail on `Close` method). // It's very important to *always* check error. `errcapture` provides utility functions to capture error and add to provided one, // still allowing to put them in a convenient `defer` statement: // -// func <...>(...) (err error) { -// ... -// defer errcapture.Do(&err, closer.Close, "log format message") +// func <...>(...) (err error) { +// ... +// defer errcapture.Do(&err, closer.Close, "log format message") // -// ... -// } +// ... +// } // // If Close returns error, `errcapture.Do` will capture it, add to input error if not nil and return by argument. // +// Example: +// +// func DoAndClose(f *os.File) (err error) { +// defer errcapture.Do(&err, f.Close, "close file at the end") +// +// // Do something... +// if err := do(); err != nil { +// return err +// } +// +// return nil +// } +// // The errcapture.ExhaustClose function provide the same functionality but takes an io.ReadCloser and exhausts the whole // reader before closing. This is useful when trying to use http keep-alive connections because for the same connection // to be re-used the whole response body needs to be exhausted. // // Check https://pkg.go.dev/github.com/efficientgo/tools/pkg/logerrcapture if you want to just log an error instead. +package errcapture diff --git a/vendor/github.com/efficientgo/core/errors/doc.go b/vendor/github.com/efficientgo/core/errors/doc.go new file mode 100644 index 000000000..c489b2e2f --- /dev/null +++ b/vendor/github.com/efficientgo/core/errors/doc.go @@ -0,0 +1,13 @@ +// Copyright (c) The EfficientGo Authors. +// Licensed under the Apache License 2.0. + +// Initially copied from Thanos and contributed by https://github.com/bisakhmondal. +// +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +// Package errors provides basic utilities to manipulate errors with a useful stacktrace. It combines the +// benefits of errors.New and fmt.Errorf world into a single package. It is an improved and minimal version of the popular +// https://github.com/pkg/errors (archived) package allowing reliable wrapping of errors with stacktrace. Unfortunately, +// the standard library recommended error wrapping using `%+w` is prone to errors and does not support stacktraces. +package errors diff --git a/vendor/github.com/efficientgo/core/errors/errors.go b/vendor/github.com/efficientgo/core/errors/errors.go new file mode 100644 index 000000000..ab032cfe1 --- /dev/null +++ b/vendor/github.com/efficientgo/core/errors/errors.go @@ -0,0 +1,168 @@ +// Copyright (c) The EfficientGo Authors. +// Licensed under the Apache License 2.0. + +// Initially copied from Thanos and contributed by https://github.com/bisakhmondal. +// +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package errors + +import ( + "errors" + "fmt" + "strings" +) + +// base is the fundamental struct that implements the error interface and the acts as the backbone of this errors package. +type base struct { + // info contains the error message passed through calls like errors.Wrap, errors.New. + info string + // stacktrace stores information about the program counters - i.e. where this error was generated. + stack stacktrace + // err is the actual error which is being wrapped with a stacktrace and message information. + err error +} + +// Error implements the error interface. +func (b *base) Error() string { + if b.err != nil { + return fmt.Sprintf("%s: %s", b.info, b.err.Error()) + } + return b.info +} + +// Unwrap implements the error Unwrap interface. +func (b *base) Unwrap() error { + return b.err +} + +// Format implements the fmt.Formatter interface to support the formatting of an error chain with "%+v" verb. +// Whenever error is printed with %+v format verb, stacktrace info gets dumped to the output. +func (b *base) Format(s fmt.State, verb rune) { + if verb == 'v' && s.Flag('+') { + s.Write([]byte(formatErrorChain(b))) + return + } + + s.Write([]byte(b.Error())) +} + +// New returns a new error with a stacktrace of recent call frames. Each call to New +// returns a distinct error value even if the text is identical. An alternative of +// the errors.New function from github.com/pkg/errors. +func New(message string) error { + return &base{ + info: message, + stack: newStackTrace(), + err: nil, + } +} + +// Newf is like New, but it formats input according to a format specifier. +// An alternative of the fmt.Errorf function. +// +// If no args have been passed, it is same as `New` function without formatting. Character like +// '%' still has to be escaped in that scenario. +func Newf(format string, args ...interface{}) error { + return &base{ + info: fmt.Sprintf(format, args...), + stack: newStackTrace(), + err: nil, + } +} + +// Wrap returns a new error, which wraps another error with a stacktrace containing recent call frames. +// +// If cause is nil, it does not produce the error, similar to errors.Wrap from github.com/pkg/errors was doing. +// Still, avoid `errors.Wrap(nil, "...") patterns as it can lead to inefficiencies while constructing arguments +// to format as well readability issues. We keep this behaviour to make sure it's a drop-in replacement. +func Wrap(cause error, message string) error { + if cause == nil { + return nil + } + + return &base{ + info: message, + stack: newStackTrace(), + err: cause, + } +} + +// Wrapf is like Wrap but the message is formatted with the supplied format specifier. +// +// If no args have been passed, it is same as `Wrap` function without formatting. +// Character like '%' still has to be escaped in that scenario. +func Wrapf(cause error, format string, args ...interface{}) error { + if cause == nil { + return nil + } + return &base{ + info: fmt.Sprintf(format, args...), + stack: newStackTrace(), + err: cause, + } +} + +// Cause returns the result of repeatedly calling the Unwrap method on err, if err's +// type implements an Unwrap method. Otherwise, Cause returns the last encountered error. +// The difference between Unwrap and Cause is the first one performs unwrapping of one level +// but Cause returns the last err (whether it's nil or not) where it failed to assert +// the interface containing the Unwrap method. +// This is a replacement of errors.Cause without the causer interface from pkg/errors which +// actually can be sufficed through the errors.Is function. But considering some use cases +// where we need to peel off all the external layers applied through errors.Wrap family, +// it is useful ( where external SDK doesn't use errors.Is internally). +func Cause(err error) error { + for err != nil { + e, ok := err.(interface { + Unwrap() error + }) + if !ok { + return err + } + err = e.Unwrap() + } + return nil +} + +// formatErrorChain formats an error chain. +func formatErrorChain(err error) string { + var buf strings.Builder + for err != nil { + if e, ok := err.(*base); ok { + buf.WriteString(fmt.Sprintf("%s\n%v", e.info, e.stack)) + err = e.err + } else { + buf.WriteString(fmt.Sprintf("%s\n", err.Error())) + err = nil + } + } + return buf.String() +} + +// The functions `Is`, `As` & `Unwrap` provides a thin wrapper around the builtin errors +// package in go. Just for sake of completeness and correct autocompletion behaviors from +// IDEs they have been wrapped using functions instead of using variable to reference them +// as first class functions (eg: var Is = errros.Is ). + +// Is is a wrapper of built-in errors.Is. It reports whether any error in err's +// chain matches target. The chain consists of err itself followed by the sequence +// of errors obtained by repeatedly calling Unwrap. +func Is(err, target error) bool { + return errors.Is(err, target) +} + +// As is a wrapper of built-in errors.As. It finds the first error in err's +// chain that matches target, and if one is found, sets target to that error +// value and returns true. Otherwise, it returns false. +func As(err error, target interface{}) bool { + return errors.As(err, target) +} + +// Unwrap is a wrapper of built-in errors.Unwrap. Unwrap returns the result of +// calling the Unwrap method on err, if err's type contains an Unwrap method +// returning error. Otherwise, Unwrap returns nil. +func Unwrap(err error) error { + return errors.Unwrap(err) +} diff --git a/vendor/github.com/efficientgo/core/errors/stacktrace.go b/vendor/github.com/efficientgo/core/errors/stacktrace.go new file mode 100644 index 000000000..a479977d8 --- /dev/null +++ b/vendor/github.com/efficientgo/core/errors/stacktrace.go @@ -0,0 +1,58 @@ +// Copyright (c) The EfficientGo Authors. +// Licensed under the Apache License 2.0. + +// Initially copied from Thanos and contributed by https://github.com/bisakhmondal. +// +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package errors + +import ( + "fmt" + "runtime" + "strings" +) + +// stacktrace holds a snapshot of program counters. +type stacktrace []uintptr + +// newStackTrace captures a stack trace. It skips first 3 frames to record the +// snapshot of the stack trace at the origin of a particular error. It tries to +// record maximum 16 frames (if available). +func newStackTrace() stacktrace { + const stackDepth = 16 // record maximum 16 frames (if available). + + pc := make([]uintptr, stackDepth) + // using skip=3 for not to count the program counter address of + // 1. the respective function from errors package (eg. errors.New) + // 2. newStacktrace itself + // 3. the function used in runtime.Callers + n := runtime.Callers(3, pc) + + // this approach is taken to reduce long term memory footprint (obtained through escape analysis). + // We are returning a new slice by re-slicing the pc with the required length and capacity (when the + // no of returned callFrames is less that stackDepth). This uses less memory compared to pc[:n] as + // the capacity of new slice is inherited from the parent slice if not specified. + return pc[:n:n] +} + +// String implements the fmt.Stringer interface to provide formatted text output. +func (s stacktrace) String() string { + var buf strings.Builder + + // CallersFrames takes the slice of Program Counter addresses returned by Callers to + // retrieve function/file/line information. + cf := runtime.CallersFrames(s) + for { + // more indicates if the next call will be successful or not. + frame, more := cf.Next() + // used formatting scheme <`>`space><:> for example: + // > testing.tRunner /home/go/go1.17.8/src/testing/testing.go:1259 + buf.WriteString(fmt.Sprintf("> %s\t%s:%d\n", frame.Func.Name(), frame.File, frame.Line)) + if !more { + break + } + } + return buf.String() +} diff --git a/vendor/github.com/efficientgo/tools/core/pkg/logerrcapture/do.go b/vendor/github.com/efficientgo/core/logerrcapture/do.go similarity index 94% rename from vendor/github.com/efficientgo/tools/core/pkg/logerrcapture/do.go rename to vendor/github.com/efficientgo/core/logerrcapture/do.go index 41662b901..6bad9fa6e 100644 --- a/vendor/github.com/efficientgo/tools/core/pkg/logerrcapture/do.go +++ b/vendor/github.com/efficientgo/core/logerrcapture/do.go @@ -11,10 +11,9 @@ package logerrcapture import ( "fmt" "io" - "io/ioutil" "os" - "github.com/pkg/errors" + "github.com/efficientgo/core/errors" ) // Logger interface compatible with go-kit/logger. @@ -42,7 +41,7 @@ func Do(logger Logger, doer doFunc, format string, a ...interface{}) { // ExhaustClose closes the io.ReadCloser with a log message on error but exhausts the reader before. func ExhaustClose(logger Logger, r io.ReadCloser, format string, a ...interface{}) { - _, err := io.Copy(ioutil.Discard, r) + _, err := io.Copy(io.Discard, r) if err != nil { _ = logger.Log("msg", "failed to exhaust reader, performance may be impeded", "err", err) } diff --git a/vendor/github.com/efficientgo/tools/core/pkg/logerrcapture/doc.go b/vendor/github.com/efficientgo/core/logerrcapture/doc.go similarity index 66% rename from vendor/github.com/efficientgo/tools/core/pkg/logerrcapture/doc.go rename to vendor/github.com/efficientgo/core/logerrcapture/doc.go index 414efc995..90738562c 100644 --- a/vendor/github.com/efficientgo/tools/core/pkg/logerrcapture/doc.go +++ b/vendor/github.com/efficientgo/core/logerrcapture/doc.go @@ -1,30 +1,44 @@ // Copyright (c) The EfficientGo Authors. // Licensed under the Apache License 2.0. -package logerrcapture - -// Close a `io.Closer` interface or execute any function that returns error safely while logging error. +// Package logerrcapture implements robust error handling in defer statements using provided logger. +// +// The Close a `io.Closer` interface or execute any function that returns error safely while logging error. // It's often forgotten but it's a caller responsibility to close all implementations of `Closer`, // such as *os.File or io.ReaderCloser. Commonly we would use: // -// defer closer.Close() +// defer closer.Close() // // This is wrong. Close() usually return important error (e.g for os.File the actual file flush might happen and fail on `Close` method). // It's very important to *always* check error. `logerrcapture` provides utility functions to capture error and log it via provided // logger, while still allowing to put them in a convenient `defer` statement: // -// func <...>(...) (err error) { -// ... -// defer logerrcapture.Do(logger, closer.Close, "log format message") +// func <...>(...) (err error) { +// ... +// defer logerrcapture.Do(logger, closer.Close, "log format message") // -// ... -// } +// ... +// } // // If Close returns error, `logerrcapture.Do` will capture it, add to input error if not nil and return by argument. // +// Example: +// +// func DoAndClose(f *os.File, logger logerrcapture.Logger) error { +// defer logerrcapture.Do(logger, f.Close, "close file at the end") +// +// // Do something... +// if err := do(); err != nil { +// return err +// } +// +// return nil +// } +// // The logerrcapture.ExhaustClose function provide the same functionality but takes an io.ReadCloser and exhausts the whole // reader before closing. This is useful when trying to use http keep-alive connections because for the same connection // to be re-used the whole response body needs to be exhausted. // // Recommended: Check https://pkg.go.dev/github.com/efficientgo/tools/pkg/errcapture if you want to return error instead of just logging (causing // hard error). +package logerrcapture diff --git a/vendor/github.com/efficientgo/core/merrors/doc.go b/vendor/github.com/efficientgo/core/merrors/doc.go new file mode 100644 index 000000000..a670493f4 --- /dev/null +++ b/vendor/github.com/efficientgo/core/merrors/doc.go @@ -0,0 +1,29 @@ +// Copyright (c) The EfficientGo Authors. +// Licensed under the Apache License 2.0. + +// Package merrors implements multi error implementation that chains errors on the same level. +// Supports errors.As and errors.Is functions. +// +// Example 1: +// +// return merrors.New(err1, err2).Err() +// +// Example 2: +// +// merr := merrors.New(err1) +// merr.Add(err2, errOrNil3) +// for _, err := range errs { +// merr.Add(err) +// } +// return merr.Err() +// +// Example 3: +// +// func CloseAll(closers []io.Closer) error { +// errs := merrors.New() +// for _ , c := range closers { +// errs.Add(c.Close()) +// } +// return errs.Err() +// } +package merrors diff --git a/vendor/github.com/efficientgo/tools/core/pkg/merrors/errors.go b/vendor/github.com/efficientgo/core/merrors/merrors.go similarity index 100% rename from vendor/github.com/efficientgo/tools/core/pkg/merrors/errors.go rename to vendor/github.com/efficientgo/core/merrors/merrors.go diff --git a/vendor/github.com/efficientgo/core/testutil/doc.go b/vendor/github.com/efficientgo/core/testutil/doc.go new file mode 100644 index 000000000..ac4ca1df5 --- /dev/null +++ b/vendor/github.com/efficientgo/core/testutil/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) The EfficientGo Authors. +// Licensed under the Apache License 2.0. + +// Package testutil is a minimal testing utility with only few functions like `Assert`, `Ok`, `NotOk` for errors and `Equals`. +// It also provides TB ("TestOrBench") utility for union of testing and benchmarks. +package testutil diff --git a/vendor/github.com/efficientgo/core/testutil/internal/difflib.go b/vendor/github.com/efficientgo/core/testutil/internal/difflib.go new file mode 100644 index 000000000..aea79d215 --- /dev/null +++ b/vendor/github.com/efficientgo/core/testutil/internal/difflib.go @@ -0,0 +1,645 @@ +// Copyright (c) The EfficientGo Authors. +// Licensed under the Apache License 2.0. + +// It provides tools to compare sequences of strings and generate textual diffs. +// +// Maintaining `GetUnifiedDiffString` here because original repository +// (https://github.com/pmezard/go-difflib) is no loger maintained. + +package internal + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strings" +) + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +func calculateRatio(matches, length int) float64 { + if length > 0 { + return 2.0 * float64(matches) / float64(length) + } + return 1.0 +} + +type Match struct { + A int + B int + Size int +} + +type OpCode struct { + Tag byte + I1 int + I2 int + J1 int + J2 int +} + +// SequenceMatcher compares sequence of strings. The basic +// algorithm predates, and is a little fancier than, an algorithm +// published in the late 1980's by Ratcliff and Obershelp under the +// hyperbolic name "gestalt pattern matching". The basic idea is to find +// the longest contiguous matching subsequence that contains no "junk" +// elements (R-O doesn't address junk). The same idea is then applied +// recursively to the pieces of the sequences to the left and to the right +// of the matching subsequence. This does not yield minimal edit +// sequences, but does tend to yield matches that "look right" to people. +// +// SequenceMatcher tries to compute a "human-friendly diff" between two +// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the +// longest *contiguous* & junk-free matching subsequence. That's what +// catches peoples' eyes. The Windows(tm) windiff has another interesting +// notion, pairing up elements that appear uniquely in each sequence. +// That, and the method here, appear to yield more intuitive difference +// reports than does diff. This method appears to be the least vulnerable +// to synching up on blocks of "junk lines", though (like blank lines in +// ordinary text files, or maybe "

" lines in HTML files). That may be +// because this is the only method of the 3 that has a *concept* of +// "junk" . +// +// Timing: Basic R-O is cubic time worst case and quadratic time expected +// case. SequenceMatcher is quadratic time for the worst case and has +// expected-case behavior dependent in a complicated way on how many +// elements the sequences have in common; best case time is linear. +type SequenceMatcher struct { + a []string + b []string + b2j map[string][]int + IsJunk func(string) bool + autoJunk bool + bJunk map[string]struct{} + matchingBlocks []Match + fullBCount map[string]int + bPopular map[string]struct{} + opCodes []OpCode +} + +func NewMatcher(a, b []string) *SequenceMatcher { + m := SequenceMatcher{autoJunk: true} + m.SetSeqs(a, b) + return &m +} + +func NewMatcherWithJunk(a, b []string, autoJunk bool, + isJunk func(string) bool, +) *SequenceMatcher { + m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk} + m.SetSeqs(a, b) + return &m +} + +// Set two sequences to be compared. +func (m *SequenceMatcher) SetSeqs(a, b []string) { + m.SetSeq1(a) + m.SetSeq2(b) +} + +// Set the first sequence to be compared. The second sequence to be compared is +// not changed. +// +// SequenceMatcher computes and caches detailed information about the second +// sequence, so if you want to compare one sequence S against many sequences, +// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other +// sequences. +// +// See also SetSeqs() and SetSeq2(). +func (m *SequenceMatcher) SetSeq1(a []string) { + if &a == &m.a { + return + } + m.a = a + m.matchingBlocks = nil + m.opCodes = nil +} + +// Set the second sequence to be compared. The first sequence to be compared is +// not changed. +func (m *SequenceMatcher) SetSeq2(b []string) { + if &b == &m.b { + return + } + m.b = b + m.matchingBlocks = nil + m.opCodes = nil + m.fullBCount = nil + m.chainB() +} + +func (m *SequenceMatcher) chainB() { + // Populate line -> index mapping + b2j := map[string][]int{} + for i, s := range m.b { + indices := b2j[s] + indices = append(indices, i) + b2j[s] = indices + } + + // Purge junk elements + m.bJunk = map[string]struct{}{} + if m.IsJunk != nil { + junk := m.bJunk + for s := range b2j { + if m.IsJunk(s) { + junk[s] = struct{}{} + } + } + for s := range junk { + delete(b2j, s) + } + } + + // Purge remaining popular elements + popular := map[string]struct{}{} + n := len(m.b) + if m.autoJunk && n >= 200 { + ntest := n/100 + 1 + for s, indices := range b2j { + if len(indices) > ntest { + popular[s] = struct{}{} + } + } + for s := range popular { + delete(b2j, s) + } + } + m.bPopular = popular + m.b2j = b2j +} + +func (m *SequenceMatcher) isBJunk(s string) bool { + _, ok := m.bJunk[s] + return ok +} + +// Find longest matching block in a[alo:ahi] and b[blo:bhi]. +// +// If IsJunk is not defined: +// +// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where +// +// alo <= i <= i+k <= ahi +// blo <= j <= j+k <= bhi +// +// and for all (i',j',k') meeting those conditions, +// +// k >= k' +// i <= i' +// and if i == i', j <= j' +// +// In other words, of all maximal matching blocks, return one that +// starts earliest in a, and of all those maximal matching blocks that +// start earliest in a, return the one that starts earliest in b. +// +// If IsJunk is defined, first the longest matching block is +// determined as above, but with the additional restriction that no +// junk element appears in the block. Then that block is extended as +// far as possible by matching (only) junk elements on both sides. So +// the resulting block never matches on junk except as identical junk +// happens to be adjacent to an "interesting" match. +// +// If no blocks match, return (alo, blo, 0). +func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match { + // CAUTION: stripping common prefix or suffix would be incorrect. + // E.g., + // ab + // acab + // Longest matching block is "ab", but if common prefix is + // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so + // strip, so ends up claiming that ab is changed to acab by + // inserting "ca" in the middle. That's minimal but unintuitive: + // "it's obvious" that someone inserted "ac" at the front. + // Windiff ends up at the same place as diff, but by pairing up + // the unique 'b's and then matching the first two 'a's. + besti, bestj, bestsize := alo, blo, 0 + + // find longest junk-free match + // during an iteration of the loop, j2len[j] = length of longest + // junk-free match ending with a[i-1] and b[j] + j2len := map[int]int{} + for i := alo; i != ahi; i++ { + // look at all instances of a[i] in b; note that because + // b2j has no junk keys, the loop is skipped if a[i] is junk + newj2len := map[int]int{} + for _, j := range m.b2j[m.a[i]] { + // a[i] matches b[j] + if j < blo { + continue + } + if j >= bhi { + break + } + k := j2len[j-1] + 1 + newj2len[j] = k + if k > bestsize { + besti, bestj, bestsize = i-k+1, j-k+1, k + } + } + j2len = newj2len + } + + // Extend the best by non-junk elements on each end. In particular, + // "popular" non-junk elements aren't in b2j, which greatly speeds + // the inner loop above, but also means "the best" match so far + // doesn't contain any junk *or* popular non-junk elements. + for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) && + m.a[besti-1] == m.b[bestj-1] { + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + } + for besti+bestsize < ahi && bestj+bestsize < bhi && + !m.isBJunk(m.b[bestj+bestsize]) && + m.a[besti+bestsize] == m.b[bestj+bestsize] { + bestsize++ + } + + // Now that we have a wholly interesting match (albeit possibly + // empty!), we may as well suck up the matching junk on each + // side of it too. Can't think of a good reason not to, and it + // saves post-processing the (possibly considerable) expense of + // figuring out what to do with it. In the case of an empty + // interesting match, this is clearly the right thing to do, + // because no other kind of match is possible in the regions. + for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) && + m.a[besti-1] == m.b[bestj-1] { + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + } + for besti+bestsize < ahi && bestj+bestsize < bhi && + m.isBJunk(m.b[bestj+bestsize]) && + m.a[besti+bestsize] == m.b[bestj+bestsize] { + bestsize++ + } + + return Match{A: besti, B: bestj, Size: bestsize} +} + +// Return list of triples describing matching subsequences. +// +// Each triple is of the form (i, j, n), and means that +// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in +// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are +// adjacent triples in the list, and the second is not the last triple in the +// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe +// adjacent equal blocks. +// +// The last triple is a dummy, (len(a), len(b), 0), and is the only +// triple with n==0. +func (m *SequenceMatcher) GetMatchingBlocks() []Match { + if m.matchingBlocks != nil { + return m.matchingBlocks + } + + var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match + matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match { + match := m.findLongestMatch(alo, ahi, blo, bhi) + i, j, k := match.A, match.B, match.Size + if match.Size > 0 { + if alo < i && blo < j { + matched = matchBlocks(alo, i, blo, j, matched) + } + matched = append(matched, match) + if i+k < ahi && j+k < bhi { + matched = matchBlocks(i+k, ahi, j+k, bhi, matched) + } + } + return matched + } + matched := matchBlocks(0, len(m.a), 0, len(m.b), nil) + + // It's possible that we have adjacent equal blocks in the + // matching_blocks list now. + nonAdjacent := []Match{} + i1, j1, k1 := 0, 0, 0 + for _, b := range matched { + // Is this block adjacent to i1, j1, k1? + i2, j2, k2 := b.A, b.B, b.Size + if i1+k1 == i2 && j1+k1 == j2 { + // Yes, so collapse them -- this just increases the length of + // the first block by the length of the second, and the first + // block so lengthened remains the block to compare against. + k1 += k2 + } else { + // Not adjacent. Remember the first block (k1==0 means it's + // the dummy we started with), and make the second block the + // new block to compare against. + if k1 > 0 { + nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) + } + i1, j1, k1 = i2, j2, k2 + } + } + if k1 > 0 { + nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) + } + + nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0}) + m.matchingBlocks = nonAdjacent + return m.matchingBlocks +} + +// Return list of 5-tuples describing how to turn a into b. +// +// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple +// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the +// tuple preceding it, and likewise for j1 == the previous j2. +// +// The tags are characters, with these meanings: +// +// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2] +// +// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case. +// +// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case. +// +// 'e' (equal): a[i1:i2] == b[j1:j2] +func (m *SequenceMatcher) GetOpCodes() []OpCode { + if m.opCodes != nil { + return m.opCodes + } + i, j := 0, 0 + matching := m.GetMatchingBlocks() + opCodes := make([]OpCode, 0, len(matching)) + for _, m := range matching { + // invariant: we've pumped out correct diffs to change + // a[:i] into b[:j], and the next matching block is + // a[ai:ai+size] == b[bj:bj+size]. So we need to pump + // out a diff to change a[i:ai] into b[j:bj], pump out + // the matching block, and move (i,j) beyond the match + ai, bj, size := m.A, m.B, m.Size + tag := byte(0) + if i < ai && j < bj { + tag = 'r' + } else if i < ai { + tag = 'd' + } else if j < bj { + tag = 'i' + } + if tag > 0 { + opCodes = append(opCodes, OpCode{tag, i, ai, j, bj}) + } + i, j = ai+size, bj+size + // the list of matching blocks is terminated by a + // sentinel with size 0 + if size > 0 { + opCodes = append(opCodes, OpCode{'e', ai, i, bj, j}) + } + } + m.opCodes = opCodes + return m.opCodes +} + +// Isolate change clusters by eliminating ranges with no changes. +// +// Return a generator of groups with up to n lines of context. +// Each group is in the same format as returned by GetOpCodes(). +func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { + if n < 0 { + n = 3 + } + codes := m.GetOpCodes() + if len(codes) == 0 { + codes = []OpCode{{'e', 0, 1, 0, 1}} + } + // Fixup leading and trailing groups if they show no changes. + if codes[0].Tag == 'e' { + c := codes[0] + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2} + } + if codes[len(codes)-1].Tag == 'e' { + c := codes[len(codes)-1] + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)} + } + nn := n + n + groups := [][]OpCode{} + group := []OpCode{} + for _, c := range codes { + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + // End the current group and start a new one whenever + // there is a large range with no changes. + if c.Tag == 'e' && i2-i1 > nn { + group = append(group, OpCode{ + c.Tag, i1, min(i2, i1+n), + j1, min(j2, j1+n), + }) + groups = append(groups, group) + group = []OpCode{} + i1, j1 = max(i1, i2-n), max(j1, j2-n) + } + group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) + } + if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') { + groups = append(groups, group) + } + return groups +} + +// Return a measure of the sequences' similarity (float in [0,1]). +// +// Where T is the total number of elements in both sequences, and +// M is the number of matches, this is 2.0*M / T. +// Note that this is 1 if the sequences are identical, and 0 if +// they have nothing in common. +// +// .Ratio() is expensive to compute if you haven't already computed +// .GetMatchingBlocks() or .GetOpCodes(), in which case you may +// want to try .QuickRatio() or .RealQuickRation() first to get an +// upper bound. +func (m *SequenceMatcher) Ratio() float64 { + matches := 0 + for _, m := range m.GetMatchingBlocks() { + matches += m.Size + } + return calculateRatio(matches, len(m.a)+len(m.b)) +} + +// Return an upper bound on ratio() relatively quickly. +// +// This isn't defined beyond that it is an upper bound on .Ratio(), and +// is faster to compute. +func (m *SequenceMatcher) QuickRatio() float64 { + // viewing a and b as multisets, set matches to the cardinality + // of their intersection; this counts the number of matches + // without regard to order, so is clearly an upper bound + if m.fullBCount == nil { + m.fullBCount = map[string]int{} + for _, s := range m.b { + m.fullBCount[s]++ + } + } + + // avail[x] is the number of times x appears in 'b' less the + // number of times we've seen it in 'a' so far ... kinda + avail := map[string]int{} + matches := 0 + for _, s := range m.a { + n, ok := avail[s] + if !ok { + n = m.fullBCount[s] + } + avail[s] = n - 1 + if n > 0 { + matches++ + } + } + return calculateRatio(matches, len(m.a)+len(m.b)) +} + +// Return an upper bound on ratio() very quickly. +// +// This isn't defined beyond that it is an upper bound on .Ratio(), and +// is faster to compute than either .Ratio() or .QuickRatio(). +func (m *SequenceMatcher) RealQuickRatio() float64 { + la, lb := len(m.a), len(m.b) + return calculateRatio(min(la, lb), la+lb) +} + +// Convert range to the "ed" format +func formatRangeUnified(start, stop int) string { + // Per the diff spec at http://www.unix.org/single_unix_specification/ + beginning := start + 1 // lines start numbering with one + length := stop - start + if length == 1 { + return fmt.Sprintf("%d", beginning) + } + if length == 0 { + beginning-- // empty ranges begin at line just before the range + } + return fmt.Sprintf("%d,%d", beginning, length) +} + +// Unified diff parameters +type UnifiedDiff struct { + A []string // First sequence lines + FromFile string // First file name + FromDate string // First file time + B []string // Second sequence lines + ToFile string // Second file name + ToDate string // Second file time + Eol string // Headers end of line, defaults to LF + Context int // Number of context lines +} + +// Compare two sequences of lines; generate the delta as a unified diff. +// +// Unified diffs are a compact way of showing line changes and a few +// lines of context. The number of context lines is set by 'n' which +// defaults to three. +// +// By default, the diff control lines (those with ---, +++, or @@) are +// created with a trailing newline. This is helpful so that inputs +// created from file.readlines() result in diffs that are suitable for +// file.writelines() since both the inputs and outputs have trailing +// newlines. +// +// For inputs that do not have trailing newlines, set the lineterm +// argument to "" so that the output will be uniformly newline free. +// +// The unidiff format normally has a header for filenames and modification +// times. Any or all of these may be specified using strings for +// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. +// The modification times are normally expressed in the ISO 8601 format. +func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { + buf := bufio.NewWriter(writer) + defer buf.Flush() + wf := func(format string, args ...interface{}) error { + _, err := buf.WriteString(fmt.Sprintf(format, args...)) + return err + } + ws := func(s string) error { + _, err := buf.WriteString(s) + return err + } + + if len(diff.Eol) == 0 { + diff.Eol = "\n" + } + + started := false + m := NewMatcher(diff.A, diff.B) + for _, g := range m.GetGroupedOpCodes(diff.Context) { + if !started { + started = true + fromDate := "" + if len(diff.FromDate) > 0 { + fromDate = "\t" + diff.FromDate + } + toDate := "" + if len(diff.ToDate) > 0 { + toDate = "\t" + diff.ToDate + } + if diff.FromFile != "" || diff.ToFile != "" { + err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol) + if err != nil { + return err + } + err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol) + if err != nil { + return err + } + } + } + first, last := g[0], g[len(g)-1] + range1 := formatRangeUnified(first.I1, last.I2) + range2 := formatRangeUnified(first.J1, last.J2) + if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil { + return err + } + for _, c := range g { + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + if c.Tag == 'e' { + for _, line := range diff.A[i1:i2] { + if err := ws(" " + line); err != nil { + return err + } + } + continue + } + if c.Tag == 'r' || c.Tag == 'd' { + for _, line := range diff.A[i1:i2] { + if err := ws("-" + line); err != nil { + return err + } + } + } + if c.Tag == 'r' || c.Tag == 'i' { + for _, line := range diff.B[j1:j2] { + if err := ws("+" + line); err != nil { + return err + } + } + } + } + } + return nil +} + +// Like WriteUnifiedDiff but returns the diff a string. +func GetUnifiedDiffString(diff UnifiedDiff) (string, error) { + w := &bytes.Buffer{} + err := WriteUnifiedDiff(w, diff) + return w.String(), err +} + +// Split a string on "\n" while preserving them. The output can be used +// as input for UnifiedDiff and ContextDiff structures. +func SplitLines(s string) []string { + lines := strings.SplitAfter(s, "\n") + lines[len(lines)-1] += "\n" + return lines +} diff --git a/vendor/github.com/efficientgo/tools/core/pkg/testutil/testorbench.go b/vendor/github.com/efficientgo/core/testutil/testorbench.go similarity index 100% rename from vendor/github.com/efficientgo/tools/core/pkg/testutil/testorbench.go rename to vendor/github.com/efficientgo/core/testutil/testorbench.go diff --git a/vendor/github.com/efficientgo/core/testutil/testutil.go b/vendor/github.com/efficientgo/core/testutil/testutil.go new file mode 100644 index 000000000..c88191bc1 --- /dev/null +++ b/vendor/github.com/efficientgo/core/testutil/testutil.go @@ -0,0 +1,233 @@ +// Copyright (c) The EfficientGo Authors. +// Licensed under the Apache License 2.0. + +package testutil + +import ( + "fmt" + "path/filepath" + "reflect" + "runtime" + "runtime/debug" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/efficientgo/core/errors" + "github.com/efficientgo/core/testutil/internal" + "github.com/google/go-cmp/cmp" +) + +const limitOfElemChars = 1e3 + +func withLimitf(f string, v ...interface{}) string { + s := fmt.Sprintf(f, v...) + if len(s) > limitOfElemChars { + return s[:limitOfElemChars] + "...(output trimmed)" + } + return s +} + +// Assert fails the test if the condition is false. +func Assert(tb testing.TB, condition bool, v ...interface{}) { + tb.Helper() + if condition { + return + } + _, file, line, _ := runtime.Caller(1) + + var msg string + if len(v) > 0 { + msg = fmt.Sprintf(v[0].(string), v[1:]...) + } + tb.Fatalf("\033[31m%s:%d: \"%s\"\033[39m\n\n", filepath.Base(file), line, withLimitf(msg)) +} + +// Ok fails the test if an err is not nil. +func Ok(tb testing.TB, err error, v ...interface{}) { + tb.Helper() + if err == nil { + return + } + _, file, line, _ := runtime.Caller(1) + + var msg string + if len(v) > 0 { + msg = fmt.Sprintf(v[0].(string), v[1:]...) + } + tb.Fatalf("\033[31m%s:%d: \"%s\"\n\n unexpected error: %s\033[39m\n\n", filepath.Base(file), line, withLimitf(msg), withLimitf(err.Error())) +} + +// NotOk fails the test if an err is nil. +func NotOk(tb testing.TB, err error, v ...interface{}) { + tb.Helper() + if err != nil { + return + } + _, file, line, _ := runtime.Caller(1) + + var msg string + if len(v) > 0 { + msg = fmt.Sprintf(v[0].(string), v[1:]...) + } + tb.Fatalf("\033[31m%s:%d: \"%s\"\n\n expected error, got nothing \033[39m\n\n", filepath.Base(file), line, withLimitf(msg)) +} + +// Equals fails the test if exp is not equal to act. +func Equals(tb testing.TB, exp, act interface{}, v ...interface{}) { + tb.Helper() + if reflect.DeepEqual(exp, act) { + return + } + fatalNotEqual(tb, exp, act, v...) +} + +func fatalNotEqual(tb testing.TB, exp, act interface{}, v ...interface{}) { + _, file, line, _ := runtime.Caller(2) + + var msg string + if len(v) > 0 { + msg = fmt.Sprintf(v[0].(string), v[1:]...) + } + tb.Fatalf( + "\033[31m%s:%d: \"%s\"\n\n\texp: %s\n\n\tgot: %s%s\033[39m\n\n", + filepath.Base(file), line, withLimitf(msg), withLimitf("%#v", exp), withLimitf("%#v", act), withLimitf(diff(exp, act)), + ) +} + +type goCmp struct { + opts cmp.Options +} + +// WithGoCmp allows specifying options and using https://github.com/google/go-cmp +// for equality comparisons. The compatibility guarantee of this function's arguments +// are the same as go-cmp (no guarantee due to v0.x). +func WithGoCmp(opts ...cmp.Option) goCmp { + return goCmp{opts: opts} +} + +// Equals uses go-cmp for comparing equality between two structs, and can be used with +// various options defined in go-cmp/cmp and go-cmp/cmp/cmpopts. +func (o goCmp) Equals(tb testing.TB, exp, act interface{}, v ...interface{}) { + tb.Helper() + if cmp.Equal(exp, act, o.opts) { + return + } + fatalNotEqual(tb, exp, act, v...) +} + +// FaultOrPanicToErr returns error if panic of fault was triggered during execution of function. +func FaultOrPanicToErr(f func()) (err error) { + // Set this go routine to panic on segfault to allow asserting on those. + debug.SetPanicOnFault(true) + defer func() { + if r := recover(); r != nil { + err = errors.Newf("invoked function panicked or caused segmentation fault: %v", r) + } + debug.SetPanicOnFault(false) + }() + + f() + + return err +} + +// ContainsStringSlice fails the test if needle is not contained within haystack, if haystack or needle is +// an empty slice, or if needle is longer than haystack. +func ContainsStringSlice(tb testing.TB, haystack, needle []string) { + _, file, line, _ := runtime.Caller(1) + + if !contains(haystack, needle) { + tb.Fatalf("\033[31m%s:%d: %s does not contain %s\033[39m\n\n", filepath.Base(file), line, withLimitf("%#v", haystack), withLimitf("%#v", needle)) + } +} + +func contains(haystack, needle []string) bool { + if len(haystack) == 0 || len(needle) == 0 { + return false + } + + if len(haystack) < len(needle) { + return false + } + + for i := 0; i < len(haystack); i++ { + outer := i + + for j := 0; j < len(needle); j++ { + // End of the haystack but not the end of the needle, end + if outer == len(haystack) { + return false + } + + // No match, try the next index of the haystack + if haystack[outer] != needle[j] { + break + } + + // End of the needle and it still matches, end + if j == len(needle)-1 { + return true + } + + // This element matches between the two slices, try the next one + outer++ + } + } + + return false +} + +func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { + t := reflect.TypeOf(v) + k := t.Kind() + + if k == reflect.Ptr { + t = t.Elem() + k = t.Kind() + } + return t, k +} + +// diff returns a diff of both values as long as both are of the same type and +// are a struct, map, slice, array or string. Otherwise it returns an empty string. +func diff(expected interface{}, actual interface{}) string { + if expected == nil || actual == nil { + return "" + } + + et, ek := typeAndKind(expected) + at, _ := typeAndKind(actual) + if et != at { + return "" + } + + if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array && ek != reflect.String { + return "" + } + + var e, a string + c := spew.ConfigState{ + Indent: " ", + DisablePointerAddresses: true, + DisableCapacities: true, + SortKeys: true, + } + if et != reflect.TypeOf("") { + e = c.Sdump(expected) + a = c.Sdump(actual) + } else { + e = reflect.ValueOf(expected).String() + a = reflect.ValueOf(actual).String() + } + + diff, _ := internal.GetUnifiedDiffString(internal.UnifiedDiff{ + A: internal.SplitLines(e), + B: internal.SplitLines(a), + FromFile: "Expected", + FromDate: "", + ToFile: "Actual", + ToDate: "", + Context: 1, + }) + return "\n\nDiff:\n" + diff +} diff --git a/vendor/github.com/efficientgo/tools/core/pkg/merrors/doc.go b/vendor/github.com/efficientgo/tools/core/pkg/merrors/doc.go deleted file mode 100644 index d1eaf8333..000000000 --- a/vendor/github.com/efficientgo/tools/core/pkg/merrors/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (c) The EfficientGo Authors. -// Licensed under the Apache License 2.0. - -package merrors - -// Safe multi error implementation that chains errors on the same level. Supports errors.As and errors.Is functions. -// -// Example 1: -// -// return merrors.New(err1, err2).Err() -// -// Example 2: -// -// merr := merrors.New(err1) -// merr.Add(err2, errOrNil3) -// for _, err := range errs { -// merr.Add(err) -// } -// return merr.Err() -// diff --git a/vendor/github.com/efficientgo/tools/core/pkg/testutil/doc.go b/vendor/github.com/efficientgo/tools/core/pkg/testutil/doc.go deleted file mode 100644 index dc9f63f1b..000000000 --- a/vendor/github.com/efficientgo/tools/core/pkg/testutil/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright (c) The EfficientGo Authors. -// Licensed under the Apache License 2.0. - -package testutil - -// Simplistic assertion helpers for testing code. TestOrBench utils for union of testing and benchmarks. diff --git a/vendor/github.com/efficientgo/tools/core/pkg/testutil/testutil.go b/vendor/github.com/efficientgo/tools/core/pkg/testutil/testutil.go deleted file mode 100644 index 8a46d2e68..000000000 --- a/vendor/github.com/efficientgo/tools/core/pkg/testutil/testutil.go +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright (c) The EfficientGo Authors. -// Licensed under the Apache License 2.0. - -package testutil - -import ( - "fmt" - "path/filepath" - "reflect" - "runtime" - "runtime/debug" - "testing" - - "github.com/davecgh/go-spew/spew" - "github.com/pkg/errors" - "github.com/pmezard/go-difflib/difflib" - "go.uber.org/goleak" -) - -// Assert fails the test if the condition is false. -func Assert(tb testing.TB, condition bool, v ...interface{}) { - tb.Helper() - if condition { - return - } - _, file, line, _ := runtime.Caller(1) - - var msg string - if len(v) > 0 { - msg = fmt.Sprintf(v[0].(string), v[1:]...) - } - tb.Fatalf("\033[31m%s:%d: "+msg+"\033[39m\n\n", filepath.Base(file), line) -} - -// Ok fails the test if an err is not nil. -func Ok(tb testing.TB, err error, v ...interface{}) { - tb.Helper() - if err == nil { - return - } - _, file, line, _ := runtime.Caller(1) - - var msg string - if len(v) > 0 { - msg = fmt.Sprintf(v[0].(string), v[1:]...) - } - tb.Fatalf("\033[31m%s:%d:"+msg+"\n\n unexpected error: %s\033[39m\n\n", filepath.Base(file), line, err.Error()) -} - -// NotOk fails the test if an err is nil. -func NotOk(tb testing.TB, err error, v ...interface{}) { - tb.Helper() - if err != nil { - return - } - _, file, line, _ := runtime.Caller(1) - - var msg string - if len(v) > 0 { - msg = fmt.Sprintf(v[0].(string), v[1:]...) - } - tb.Fatalf("\033[31m%s:%d:"+msg+"\n\n expected error, got nothing \033[39m\n\n", filepath.Base(file), line) -} - -// Equals fails the test if exp is not equal to act. -func Equals(tb testing.TB, exp, act interface{}, v ...interface{}) { - tb.Helper() - if reflect.DeepEqual(exp, act) { - return - } - _, file, line, _ := runtime.Caller(1) - - var msg string - if len(v) > 0 { - msg = fmt.Sprintf(v[0].(string), v[1:]...) - } - tb.Fatal(sprintfWithLimit("\033[31m%s:%d:"+msg+"\n\n\texp: %#v\n\n\tgot: %#v%s\033[39m\n\n", filepath.Base(file), line, exp, act, diff(exp, act))) -} - -func sprintfWithLimit(act string, v ...interface{}) string { - s := fmt.Sprintf(act, v...) - if len(s) > 10000 { - return s[:10000] + "...(output trimmed)" - } - return s -} - -func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { - t := reflect.TypeOf(v) - k := t.Kind() - - if k == reflect.Ptr { - t = t.Elem() - k = t.Kind() - } - return t, k -} - -// diff returns a diff of both values as long as both are of the same type and -// are a struct, map, slice, array or string. Otherwise it returns an empty string. -func diff(expected interface{}, actual interface{}) string { - if expected == nil || actual == nil { - return "" - } - - et, ek := typeAndKind(expected) - at, _ := typeAndKind(actual) - if et != at { - return "" - } - - if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array && ek != reflect.String { - return "" - } - - var e, a string - c := spew.ConfigState{ - Indent: " ", - DisablePointerAddresses: true, - DisableCapacities: true, - SortKeys: true, - } - if et != reflect.TypeOf("") { - e = c.Sdump(expected) - a = c.Sdump(actual) - } else { - e = reflect.ValueOf(expected).String() - a = reflect.ValueOf(actual).String() - } - - diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ - A: difflib.SplitLines(e), - B: difflib.SplitLines(a), - FromFile: "Expected", - FromDate: "", - ToFile: "Actual", - ToDate: "", - Context: 1, - }) - return "\n\nDiff:\n" + diff -} - -// TolerantVerifyLeakMain verifies go leaks but excludes the go routines that are -// launched as side effects of some of our dependencies. -func TolerantVerifyLeakMain(m *testing.M) { - goleak.VerifyTestMain(m, - // https://github.com/census-instrumentation/opencensus-go/blob/d7677d6af5953e0506ac4c08f349c62b917a443a/stats/view/worker.go#L34 - goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"), - // https://github.com/kubernetes/klog/blob/c85d02d1c76a9ebafa81eb6d35c980734f2c4727/klog.go#L417 - goleak.IgnoreTopFunction("k8s.io/klog/v2.(*loggingT).flushDaemon"), - goleak.IgnoreTopFunction("k8s.io/klog.(*loggingT).flushDaemon"), - ) -} - -// TolerantVerifyLeak verifies go leaks but excludes the go routines that are -// launched as side effects of some of our dependencies. -func TolerantVerifyLeak(t *testing.T) { - goleak.VerifyNone(t, - // https://github.com/census-instrumentation/opencensus-go/blob/d7677d6af5953e0506ac4c08f349c62b917a443a/stats/view/worker.go#L34 - goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"), - // https://github.com/kubernetes/klog/blob/c85d02d1c76a9ebafa81eb6d35c980734f2c4727/klog.go#L417 - goleak.IgnoreTopFunction("k8s.io/klog/v2.(*loggingT).flushDaemon"), - goleak.IgnoreTopFunction("k8s.io/klog.(*loggingT).flushDaemon"), - ) -} - -// FaultOrPanicToErr returns error if panic of fault was triggered during execution of function. -func FaultOrPanicToErr(f func()) (err error) { - // Set this go routine to panic on segfault to allow asserting on those. - debug.SetPanicOnFault(true) - defer func() { - if r := recover(); r != nil { - err = errors.Errorf("invoked function panicked or caused segmentation fault: %v", r) - } - debug.SetPanicOnFault(false) - }() - - f() - - return err -} diff --git a/vendor/github.com/efficientgo/tools/extkingpin/LICENSE b/vendor/github.com/efficientgo/tools/extkingpin/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/efficientgo/tools/extkingpin/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/efficientgo/tools/extkingpin/doc.go b/vendor/github.com/efficientgo/tools/extkingpin/doc.go new file mode 100644 index 000000000..f07082b7a --- /dev/null +++ b/vendor/github.com/efficientgo/tools/extkingpin/doc.go @@ -0,0 +1,20 @@ +// Copyright (c) The EfficientGo Authors. +// Licensed under the Apache License 2.0. + +package extkingpin + +// PathOrContent is a flag type that defines two flags to fetch bytes. Either from file (*-file flag) or content (* flag). +// Also returns content of YAML file with substituted environment variables. +// Follows K8s convention, i.e $(...), as mentioned here https://kubernetes.io/docs/tasks/inject-data-application/define-interdependent-environment-variables/. + +// RegisterPathOrContent registers PathOrContent flag in kingpinCmdClause. + +// Content returns the content of the file when given or directly the content that has been passed to the flag. +// It returns an error when: +// * The file and content flags are both not empty. +// * The file flag is not empty but the file can't be read. +// * The content is empty and the flag has been defined as required. + +// Option is a functional option type for PathOrContent objects. +// WithRequired allows you to override default required option. +// WithEnvSubstitution allows you to override default envSubstitution option. diff --git a/vendor/github.com/efficientgo/tools/extkingpin/pathorcontent.go b/vendor/github.com/efficientgo/tools/extkingpin/pathorcontent.go new file mode 100644 index 000000000..0a4b5ff17 --- /dev/null +++ b/vendor/github.com/efficientgo/tools/extkingpin/pathorcontent.go @@ -0,0 +1,143 @@ +// Copyright (c) The EfficientGo Authors. +// Licensed under the Apache License 2.0. + +// Taken from Thanos project. +// +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. +package extkingpin + +import ( + "fmt" + "io/ioutil" + "os" + "regexp" + + "github.com/pkg/errors" + "gopkg.in/alecthomas/kingpin.v2" +) + +// PathOrContent is a flag type that defines two flags to fetch bytes. Either from file (*-file flag) or content (* flag). +type PathOrContent struct { + flagName string + + envSubstitution bool + required bool + hidden bool + + path *string + content *string +} + +// Option is a functional option type for PathOrContent objects. +type Option func(*PathOrContent) + +type FlagClause interface { + Flag(name, help string) *kingpin.FlagClause +} + +// RegisterPathOrContent registers PathOrContent flag in kingpinCmdClause. +func RegisterPathOrContent(cmd FlagClause, flagName string, help string, opts ...Option) *PathOrContent { + fileFlagName := fmt.Sprintf("%s-file", flagName) + fileFlag := cmd.Flag(fileFlagName, fmt.Sprintf("Path to %s", help)).PlaceHolder("") + contentFlag := cmd.Flag(flagName, fmt.Sprintf("Alternative to '%s' flag (mutually exclusive). Content of %s", fileFlagName, help)).PlaceHolder("") + + p := &PathOrContent{ + flagName: flagName, + } + + for _, opt := range opts { + opt(p) + } + + if p.hidden { + fileFlag = fileFlag.Hidden() + contentFlag = contentFlag.Hidden() + } + + p.path = fileFlag.String() + p.content = contentFlag.String() + return p +} + +// Content returns the content of the file when given or directly the content that has been passed to the flag. +// It returns an error when: +// * The file and content flags are both not empty. +// * The file flag is not empty but the file can't be read. +// * The content is empty and the flag has been defined as required. +func (p *PathOrContent) Content() ([]byte, error) { + fileFlagName := fmt.Sprintf("%s-file", p.flagName) + + if len(*p.path) > 0 && len(*p.content) > 0 { + return nil, errors.Errorf("both %s and %s flags set.", fileFlagName, p.flagName) + } + + var content []byte + if len(*p.path) > 0 { + c, err := ioutil.ReadFile(*p.path) + if err != nil { + return nil, errors.Wrapf(err, "loading file %s for %s", *p.path, fileFlagName) + } + content = c + } else { + content = []byte(*p.content) + } + + if len(content) == 0 && p.required { + return nil, errors.Errorf("flag %s or %s is required for running this command and content cannot be empty.", fileFlagName, p.flagName) + } + if p.envSubstitution { + replace, err := expandEnv(content) + if err != nil { + return nil, err + } + content = replace + } + return content, nil +} + +// Path returns the file's path. +func (p *PathOrContent) Path() string { + return *p.path +} + +// WithRequired allows you to override default required option. +func WithRequired() Option { + return func(p *PathOrContent) { + p.required = true + } +} + +// WithEnvSubstitution allows you to override default envSubstitution option. +func WithEnvSubstitution() Option { + return func(p *PathOrContent) { + p.envSubstitution = true + } +} + +// WithHidden allows you to override the default option and keeps the flag hidden. +func WithHidden() Option { + return func(p *PathOrContent) { + p.hidden = true + } +} + +// expandEnv returns content of YAML file with substituted environment variables. +// Follows K8s convention, i.e $(...), as mentioned here https://kubernetes.io/docs/tasks/inject-data-application/define-interdependent-environment-variables/. +func expandEnv(b []byte) (r []byte, err error) { + var envRe = regexp.MustCompile(`\$\(([a-zA-Z_0-9]+)\)`) + r = envRe.ReplaceAllFunc(b, func(n []byte) []byte { + if err != nil { + return nil + } + n = n[2 : len(n)-1] + + v, ok := os.LookupEnv(string(n)) + if !ok { + err = errors.Errorf("found reference to unset environment variable %q", n) + return nil + } + return []byte(v) + }) + return r, err +} diff --git a/vendor/github.com/fatih/color/README.md b/vendor/github.com/fatih/color/README.md index 5152bf59b..be82827ca 100644 --- a/vendor/github.com/fatih/color/README.md +++ b/vendor/github.com/fatih/color/README.md @@ -7,7 +7,6 @@ suits you. ![Color](https://user-images.githubusercontent.com/438920/96832689-03b3e000-13f4-11eb-9803-46f4c4de3406.jpg) - ## Install ```bash @@ -124,17 +123,17 @@ fmt.Println("All text will now be bold magenta.") ``` ### Disable/Enable color - + There might be a case where you want to explicitly disable/enable color output. the `go-isatty` package will automatically disable color output for non-tty output streams (for example if the output were piped directly to `less`). The `color` package also disables color output if the [`NO_COLOR`](https://no-color.org) environment -variable is set (regardless of its value). +variable is set to a non-empty string. -`Color` has support to disable/enable colors programatically both globally and +`Color` has support to disable/enable colors programmatically both globally and for single color definitions. For example suppose you have a CLI app and a -`--no-color` bool flag. You can easily disable the color output with: +`-no-color` bool flag. You can easily disable the color output with: ```go var flagNoColor = flag.Bool("no-color", false, "Disable color output") @@ -167,11 +166,10 @@ To output color in GitHub Actions (or other CI systems that support ANSI colors) * Save/Return previous values * Evaluate fmt.Formatter interface - ## Credits - * [Fatih Arslan](https://github.com/fatih) - * Windows support via @mattn: [colorable](https://github.com/mattn/go-colorable) +* [Fatih Arslan](https://github.com/fatih) +* Windows support via @mattn: [colorable](https://github.com/mattn/go-colorable) ## License diff --git a/vendor/github.com/fatih/color/color.go b/vendor/github.com/fatih/color/color.go index 98a60f3c8..889f9e77b 100644 --- a/vendor/github.com/fatih/color/color.go +++ b/vendor/github.com/fatih/color/color.go @@ -19,10 +19,10 @@ var ( // set (regardless of its value). This is a global option and affects all // colors. For more control over each color block use the methods // DisableColor() individually. - NoColor = noColorExists() || os.Getenv("TERM") == "dumb" || + NoColor = noColorIsSet() || os.Getenv("TERM") == "dumb" || (!isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd())) - // Output defines the standard output of the print functions. By default + // Output defines the standard output of the print functions. By default, // os.Stdout is used. Output = colorable.NewColorableStdout() @@ -35,10 +35,9 @@ var ( colorsCacheMu sync.Mutex // protects colorsCache ) -// noColorExists returns true if the environment variable NO_COLOR exists. -func noColorExists() bool { - _, exists := os.LookupEnv("NO_COLOR") - return exists +// noColorIsSet returns true if the environment variable NO_COLOR is set to a non-empty string. +func noColorIsSet() bool { + return os.Getenv("NO_COLOR") != "" } // Color defines a custom color object which is defined by SGR parameters. @@ -120,7 +119,7 @@ func New(value ...Attribute) *Color { params: make([]Attribute, 0), } - if noColorExists() { + if noColorIsSet() { c.noColor = boolPtr(true) } @@ -152,7 +151,7 @@ func (c *Color) Set() *Color { return c } - fmt.Fprintf(Output, c.format()) + fmt.Fprint(Output, c.format()) return c } @@ -164,16 +163,21 @@ func (c *Color) unset() { Unset() } -func (c *Color) setWriter(w io.Writer) *Color { +// SetWriter is used to set the SGR sequence with the given io.Writer. This is +// a low-level function, and users should use the higher-level functions, such +// as color.Fprint, color.Print, etc. +func (c *Color) SetWriter(w io.Writer) *Color { if c.isNoColorSet() { return c } - fmt.Fprintf(w, c.format()) + fmt.Fprint(w, c.format()) return c } -func (c *Color) unsetWriter(w io.Writer) { +// UnsetWriter resets all escape attributes and clears the output with the give +// io.Writer. Usually should be called after SetWriter(). +func (c *Color) UnsetWriter(w io.Writer) { if c.isNoColorSet() { return } @@ -192,20 +196,14 @@ func (c *Color) Add(value ...Attribute) *Color { return c } -func (c *Color) prepend(value Attribute) { - c.params = append(c.params, 0) - copy(c.params[1:], c.params[0:]) - c.params[0] = value -} - // Fprint formats using the default formats for its operands and writes to w. // Spaces are added between operands when neither is a string. // It returns the number of bytes written and any write error encountered. // On Windows, users should wrap w with colorable.NewColorable() if w is of // type *os.File. func (c *Color) Fprint(w io.Writer, a ...interface{}) (n int, err error) { - c.setWriter(w) - defer c.unsetWriter(w) + c.SetWriter(w) + defer c.UnsetWriter(w) return fmt.Fprint(w, a...) } @@ -227,8 +225,8 @@ func (c *Color) Print(a ...interface{}) (n int, err error) { // On Windows, users should wrap w with colorable.NewColorable() if w is of // type *os.File. func (c *Color) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - c.setWriter(w) - defer c.unsetWriter(w) + c.SetWriter(w) + defer c.UnsetWriter(w) return fmt.Fprintf(w, format, a...) } @@ -248,8 +246,8 @@ func (c *Color) Printf(format string, a ...interface{}) (n int, err error) { // On Windows, users should wrap w with colorable.NewColorable() if w is of // type *os.File. func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - c.setWriter(w) - defer c.unsetWriter(w) + c.SetWriter(w) + defer c.UnsetWriter(w) return fmt.Fprintln(w, a...) } @@ -396,7 +394,7 @@ func (c *Color) DisableColor() { } // EnableColor enables the color output. Use it in conjunction with -// DisableColor(). Otherwise this method has no side effects. +// DisableColor(). Otherwise, this method has no side effects. func (c *Color) EnableColor() { c.noColor = boolPtr(false) } diff --git a/vendor/github.com/fatih/color/doc.go b/vendor/github.com/fatih/color/doc.go index 04541de78..9491ad541 100644 --- a/vendor/github.com/fatih/color/doc.go +++ b/vendor/github.com/fatih/color/doc.go @@ -5,106 +5,105 @@ that suits you. Use simple and default helper functions with predefined foreground colors: - color.Cyan("Prints text in cyan.") + color.Cyan("Prints text in cyan.") - // a newline will be appended automatically - color.Blue("Prints %s in blue.", "text") + // a newline will be appended automatically + color.Blue("Prints %s in blue.", "text") - // More default foreground colors.. - color.Red("We have red") - color.Yellow("Yellow color too!") - color.Magenta("And many others ..") + // More default foreground colors.. + color.Red("We have red") + color.Yellow("Yellow color too!") + color.Magenta("And many others ..") - // Hi-intensity colors - color.HiGreen("Bright green color.") - color.HiBlack("Bright black means gray..") - color.HiWhite("Shiny white color!") + // Hi-intensity colors + color.HiGreen("Bright green color.") + color.HiBlack("Bright black means gray..") + color.HiWhite("Shiny white color!") -However there are times where custom color mixes are required. Below are some +However, there are times when custom color mixes are required. Below are some examples to create custom color objects and use the print functions of each separate color object. - // Create a new color object - c := color.New(color.FgCyan).Add(color.Underline) - c.Println("Prints cyan text with an underline.") + // Create a new color object + c := color.New(color.FgCyan).Add(color.Underline) + c.Println("Prints cyan text with an underline.") - // Or just add them to New() - d := color.New(color.FgCyan, color.Bold) - d.Printf("This prints bold cyan %s\n", "too!.") + // Or just add them to New() + d := color.New(color.FgCyan, color.Bold) + d.Printf("This prints bold cyan %s\n", "too!.") - // Mix up foreground and background colors, create new mixes! - red := color.New(color.FgRed) + // Mix up foreground and background colors, create new mixes! + red := color.New(color.FgRed) - boldRed := red.Add(color.Bold) - boldRed.Println("This will print text in bold red.") + boldRed := red.Add(color.Bold) + boldRed.Println("This will print text in bold red.") - whiteBackground := red.Add(color.BgWhite) - whiteBackground.Println("Red text with White background.") + whiteBackground := red.Add(color.BgWhite) + whiteBackground.Println("Red text with White background.") - // Use your own io.Writer output - color.New(color.FgBlue).Fprintln(myWriter, "blue color!") + // Use your own io.Writer output + color.New(color.FgBlue).Fprintln(myWriter, "blue color!") - blue := color.New(color.FgBlue) - blue.Fprint(myWriter, "This will print text in blue.") + blue := color.New(color.FgBlue) + blue.Fprint(myWriter, "This will print text in blue.") You can create PrintXxx functions to simplify even more: - // Create a custom print function for convenient - red := color.New(color.FgRed).PrintfFunc() - red("warning") - red("error: %s", err) + // Create a custom print function for convenient + red := color.New(color.FgRed).PrintfFunc() + red("warning") + red("error: %s", err) - // Mix up multiple attributes - notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() - notice("don't forget this...") + // Mix up multiple attributes + notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() + notice("don't forget this...") You can also FprintXxx functions to pass your own io.Writer: - blue := color.New(FgBlue).FprintfFunc() - blue(myWriter, "important notice: %s", stars) - - // Mix up with multiple attributes - success := color.New(color.Bold, color.FgGreen).FprintlnFunc() - success(myWriter, don't forget this...") + blue := color.New(FgBlue).FprintfFunc() + blue(myWriter, "important notice: %s", stars) + // Mix up with multiple attributes + success := color.New(color.Bold, color.FgGreen).FprintlnFunc() + success(myWriter, don't forget this...") Or create SprintXxx functions to mix strings with other non-colorized strings: - yellow := New(FgYellow).SprintFunc() - red := New(FgRed).SprintFunc() + yellow := New(FgYellow).SprintFunc() + red := New(FgRed).SprintFunc() - fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error")) + fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error")) - info := New(FgWhite, BgGreen).SprintFunc() - fmt.Printf("this %s rocks!\n", info("package")) + info := New(FgWhite, BgGreen).SprintFunc() + fmt.Printf("this %s rocks!\n", info("package")) Windows support is enabled by default. All Print functions work as intended. -However only for color.SprintXXX functions, user should use fmt.FprintXXX and +However, only for color.SprintXXX functions, user should use fmt.FprintXXX and set the output to color.Output: - fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) + fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) - info := New(FgWhite, BgGreen).SprintFunc() - fmt.Fprintf(color.Output, "this %s rocks!\n", info("package")) + info := New(FgWhite, BgGreen).SprintFunc() + fmt.Fprintf(color.Output, "this %s rocks!\n", info("package")) Using with existing code is possible. Just use the Set() method to set the standard output to the given parameters. That way a rewrite of an existing code is not required. - // Use handy standard colors. - color.Set(color.FgYellow) + // Use handy standard colors. + color.Set(color.FgYellow) - fmt.Println("Existing text will be now in Yellow") - fmt.Printf("This one %s\n", "too") + fmt.Println("Existing text will be now in Yellow") + fmt.Printf("This one %s\n", "too") - color.Unset() // don't forget to unset + color.Unset() // don't forget to unset - // You can mix up parameters - color.Set(color.FgMagenta, color.Bold) - defer color.Unset() // use it in your function + // You can mix up parameters + color.Set(color.FgMagenta, color.Bold) + defer color.Unset() // use it in your function - fmt.Println("All text will be now bold magenta.") + fmt.Println("All text will be now bold magenta.") There might be a case where you want to disable color output (for example to pipe the standard output of your app to somewhere else). `Color` has support to @@ -112,24 +111,24 @@ disable colors both globally and for single color definition. For example suppose you have a CLI app and a `--no-color` bool flag. You can easily disable the color output with: - var flagNoColor = flag.Bool("no-color", false, "Disable color output") + var flagNoColor = flag.Bool("no-color", false, "Disable color output") - if *flagNoColor { - color.NoColor = true // disables colorized output - } + if *flagNoColor { + color.NoColor = true // disables colorized output + } You can also disable the color by setting the NO_COLOR environment variable to any value. It also has support for single color definitions (local). You can disable/enable color output on the fly: - c := color.New(color.FgCyan) - c.Println("Prints cyan text") + c := color.New(color.FgCyan) + c.Println("Prints cyan text") - c.DisableColor() - c.Println("This is printed without any color") + c.DisableColor() + c.Println("This is printed without any color") - c.EnableColor() - c.Println("This prints again cyan...") + c.EnableColor() + c.Println("This prints again cyan...") */ package color diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore index 4cd0cbaf4..1d89d85ce 100644 --- a/vendor/github.com/fsnotify/fsnotify/.gitignore +++ b/vendor/github.com/fsnotify/fsnotify/.gitignore @@ -1,6 +1,6 @@ -# Setup a Global .gitignore for OS and editor generated files: -# https://help.github.com/articles/ignoring-files -# git config --global core.excludesfile ~/.gitignore_global +# go test -c output +*.test +*.test.exe -.vagrant -*.sublime-project +# Output of go build ./cmd/fsnotify +/fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/AUTHORS b/vendor/github.com/fsnotify/fsnotify/AUTHORS deleted file mode 100644 index 6cbabe5ef..000000000 --- a/vendor/github.com/fsnotify/fsnotify/AUTHORS +++ /dev/null @@ -1,62 +0,0 @@ -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# You can update this list using the following command: -# -# $ (head -n10 AUTHORS && git shortlog -se | sed -E 's/^\s+[0-9]+\t//') | tee AUTHORS - -# Please keep the list sorted. - -Aaron L -Adrien Bustany -Alexey Kazakov -Amit Krishnan -Anmol Sethi -Bjørn Erik Pedersen -Brian Goff -Bruno Bigras -Caleb Spare -Case Nelson -Chris Howey -Christoffer Buchholz -Daniel Wagner-Hall -Dave Cheney -Eric Lin -Evan Phoenix -Francisco Souza -Gautam Dey -Hari haran -Ichinose Shogo -Johannes Ebke -John C Barstow -Kelvin Fo -Ken-ichirou MATSUZAWA -Matt Layher -Matthias Stone -Nathan Youngman -Nickolai Zeldovich -Oliver Bristow -Patrick -Paul Hammond -Pawel Knap -Pieter Droogendijk -Pratik Shinde -Pursuit92 -Riku Voipio -Rob Figueiredo -Rodrigo Chiossi -Slawek Ligus -Soge Zhang -Tiffany Jernigan -Tilak Sharma -Tobias Klauser -Tom Payne -Travis Cline -Tudor Golubenco -Vahe Khachikyan -Yukang -bronze1man -debrando -henrikedwards -铁哥 diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md index cc01c08f5..77f9593bd 100644 --- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -7,6 +7,95 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +Nothing yet. + +## [1.6.0] - 2022-10-13 + +This version of fsnotify needs Go 1.16 (this was already the case since 1.5.1, +but not documented). It also increases the minimum Linux version to 2.6.32. + +### Additions + +- all: add `Event.Has()` and `Op.Has()` ([#477]) + + This makes checking events a lot easier; for example: + + if event.Op&Write == Write && !(event.Op&Remove == Remove) { + } + + Becomes: + + if event.Has(Write) && !event.Has(Remove) { + } + +- all: add cmd/fsnotify ([#463]) + + A command-line utility for testing and some examples. + +### Changes and fixes + +- inotify: don't ignore events for files that don't exist ([#260], [#470]) + + Previously the inotify watcher would call `os.Lstat()` to check if a file + still exists before emitting events. + + This was inconsistent with other platforms and resulted in inconsistent event + reporting (e.g. when a file is quickly removed and re-created), and generally + a source of confusion. It was added in 2013 to fix a memory leak that no + longer exists. + +- all: return `ErrNonExistentWatch` when `Remove()` is called on a path that's + not watched ([#460]) + +- inotify: replace epoll() with non-blocking inotify ([#434]) + + Non-blocking inotify was not generally available at the time this library was + written in 2014, but now it is. As a result, the minimum Linux version is + bumped from 2.6.27 to 2.6.32. This hugely simplifies the code and is faster. + +- kqueue: don't check for events every 100ms ([#480]) + + The watcher would wake up every 100ms, even when there was nothing to do. Now + it waits until there is something to do. + +- macos: retry opening files on EINTR ([#475]) + +- kqueue: skip unreadable files ([#479]) + + kqueue requires a file descriptor for every file in a directory; this would + fail if a file was unreadable by the current user. Now these files are simply + skipped. + +- windows: fix renaming a watched directory if the parent is also watched ([#370]) + +- windows: increase buffer size from 4K to 64K ([#485]) + +- windows: close file handle on Remove() ([#288]) + +- kqueue: put pathname in the error if watching a file fails ([#471]) + +- inotify, windows: calling Close() more than once could race ([#465]) + +- kqueue: improve Close() performance ([#233]) + +- all: various documentation additions and clarifications. + +[#233]: https://github.com/fsnotify/fsnotify/pull/233 +[#260]: https://github.com/fsnotify/fsnotify/pull/260 +[#288]: https://github.com/fsnotify/fsnotify/pull/288 +[#370]: https://github.com/fsnotify/fsnotify/pull/370 +[#434]: https://github.com/fsnotify/fsnotify/pull/434 +[#460]: https://github.com/fsnotify/fsnotify/pull/460 +[#463]: https://github.com/fsnotify/fsnotify/pull/463 +[#465]: https://github.com/fsnotify/fsnotify/pull/465 +[#470]: https://github.com/fsnotify/fsnotify/pull/470 +[#471]: https://github.com/fsnotify/fsnotify/pull/471 +[#475]: https://github.com/fsnotify/fsnotify/pull/475 +[#477]: https://github.com/fsnotify/fsnotify/pull/477 +[#479]: https://github.com/fsnotify/fsnotify/pull/479 +[#480]: https://github.com/fsnotify/fsnotify/pull/480 +[#485]: https://github.com/fsnotify/fsnotify/pull/485 + ## [1.5.4] - 2022-04-25 * Windows: add missing defer to `Watcher.WatchList` [#447](https://github.com/fsnotify/fsnotify/pull/447) @@ -40,6 +129,30 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 [#385](https://github.com/fsnotify/fsnotify/pull/385) * Go 1.14+: Fix unsafe pointer conversion [#325](https://github.com/fsnotify/fsnotify/pull/325) +## [1.4.9] - 2020-03-11 + +* Move example usage to the readme #329. This may resolve #328. + +## [1.4.8] - 2020-03-10 + +* CI: test more go versions (@nathany 1d13583d846ea9d66dcabbfefbfb9d8e6fb05216) +* Tests: Queued inotify events could have been read by the test before max_queued_events was hit (@matthias-stone #265) +* Tests: t.Fatalf -> t.Errorf in go routines (@gdey #266) +* CI: Less verbosity (@nathany #267) +* Tests: Darwin: Exchangedata is deprecated on 10.13 (@nathany #267) +* Tests: Check if channels are closed in the example (@alexeykazakov #244) +* CI: Only run golint on latest version of go and fix issues (@cpuguy83 #284) +* CI: Add windows to travis matrix (@cpuguy83 #284) +* Docs: Remover appveyor badge (@nathany 11844c0959f6fff69ba325d097fce35bd85a8e93) +* Linux: create epoll and pipe fds with close-on-exec (@JohannesEbke #219) +* Linux: open files with close-on-exec (@linxiulei #273) +* Docs: Plan to support fanotify (@nathany ab058b44498e8b7566a799372a39d150d9ea0119 ) +* Project: Add go.mod (@nathany #309) +* Project: Revise editor config (@nathany #309) +* Project: Update copyright for 2019 (@nathany #309) +* CI: Drop go1.8 from CI matrix (@nathany #309) +* Docs: Updating the FAQ section for supportability with NFS & FUSE filesystems (@Pratik32 4bf2d1fec78374803a39307bfb8d340688f4f28e ) + ## [1.4.7] - 2018-01-09 * BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine) diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md index 8a642563d..ea379759d 100644 --- a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md +++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md @@ -1,60 +1,26 @@ -# Contributing +Thank you for your interest in contributing to fsnotify! We try to review and +merge PRs in a reasonable timeframe, but please be aware that: -## Issues +- To avoid "wasted" work, please discus changes on the issue tracker first. You + can just send PRs, but they may end up being rejected for one reason or the + other. -* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues). -* Please indicate the platform you are using fsnotify on. -* A code example to reproduce the problem is appreciated. +- fsnotify is a cross-platform library, and changes must work reasonably well on + all supported platforms. -## Pull Requests +- Changes will need to be compatible; old code should still compile, and the + runtime behaviour can't change in ways that are likely to lead to problems for + users. -### Contributor License Agreement +Testing +------- +Just `go test ./...` runs all the tests; the CI runs this on all supported +platforms. Testing different platforms locally can be done with something like +[goon] or [Vagrant], but this isn't super-easy to set up at the moment. -fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual). +Use the `-short` flag to make the "stress test" run faster. -Please indicate that you have signed the CLA in your pull request. -### How fsnotify is Developed - -* Development is done on feature branches. -* Tests are run on BSD, Linux, macOS and Windows. -* Pull requests are reviewed and [applied to master][am] using [hub][]. - * Maintainers may modify or squash commits rather than asking contributors to. -* To issue a new release, the maintainers will: - * Update the CHANGELOG - * Tag a version, which will become available through gopkg.in. - -### How to Fork - -For smooth sailing, always use the original import path. Installing with `go get` makes this easy. - -1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`) -2. Create your feature branch (`git checkout -b my-new-feature`) -3. Ensure everything works and the tests pass (see below) -4. Commit your changes (`git commit -am 'Add some feature'`) - -Contribute upstream: - -1. Fork fsnotify on GitHub -2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`) -3. Push to the branch (`git push fork my-new-feature`) -4. Create a new Pull Request on GitHub - -This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/). - -### Testing - -fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows. - -Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on. - -### Maintainers - -Help maintaining fsnotify is welcome. To be a maintainer: - -* Submit a pull request and sign the CLA as above. -* You must be able to run the test suite on Mac, Windows, Linux and BSD. - -All code changes should be internal pull requests. - -Releases are tagged using [Semantic Versioning](http://semver.org/). +[goon]: https://github.com/arp242/goon +[Vagrant]: https://www.vagrantup.com/ +[integration_test.go]: /integration_test.go diff --git a/vendor/github.com/fsnotify/fsnotify/LICENSE b/vendor/github.com/fsnotify/fsnotify/LICENSE index e180c8fb0..fb03ade75 100644 --- a/vendor/github.com/fsnotify/fsnotify/LICENSE +++ b/vendor/github.com/fsnotify/fsnotify/LICENSE @@ -1,28 +1,25 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. -Copyright (c) 2012-2019 fsnotify Authors. All rights reserved. +Copyright © 2012 The Go Authors. All rights reserved. +Copyright © fsnotify Authors. All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. +* Neither the name of Google Inc. nor the names of its contributors may be used + to endorse or promote products derived from this software without specific + prior written permission. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md index 0731c5ef8..d4e6080fe 100644 --- a/vendor/github.com/fsnotify/fsnotify/README.md +++ b/vendor/github.com/fsnotify/fsnotify/README.md @@ -1,120 +1,161 @@ -# File system notifications for Go +fsnotify is a Go library to provide cross-platform filesystem notifications on +Windows, Linux, macOS, and BSD systems. -[![Go Reference](https://pkg.go.dev/badge/github.com/fsnotify/fsnotify.svg)](https://pkg.go.dev/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify) [![Maintainers Wanted](https://img.shields.io/badge/maintainers-wanted-red.svg)](https://github.com/fsnotify/fsnotify/issues/413) +Go 1.16 or newer is required; the full documentation is at +https://pkg.go.dev/github.com/fsnotify/fsnotify -fsnotify utilizes [`golang.org/x/sys`](https://pkg.go.dev/golang.org/x/sys) rather than [`syscall`](https://pkg.go.dev/syscall) from the standard library. +**It's best to read the documentation at pkg.go.dev, as it's pinned to the last +released version, whereas this README is for the last development version which +may include additions/changes.** -Cross platform: Windows, Linux, BSD and macOS. +--- -| Adapter | OS | Status | -| --------------------- | -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | -| inotify | Linux 2.6.27 or later, Android\* | Supported | -| kqueue | BSD, macOS, iOS\* | Supported | -| ReadDirectoryChangesW | Windows | Supported | -| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) | -| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/pull/371) | -| fanotify | Linux 2.6.37+ | [Maybe](https://github.com/fsnotify/fsnotify/issues/114) | -| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) | -| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) | +Platform support: -\* Android and iOS are untested. +| Adapter | OS | Status | +| --------------------- | ---------------| -------------------------------------------------------------| +| inotify | Linux 2.6.32+ | Supported | +| kqueue | BSD, macOS | Supported | +| ReadDirectoryChangesW | Windows | Supported | +| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) | +| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/pull/371) | +| fanotify | Linux 5.9+ | [Maybe](https://github.com/fsnotify/fsnotify/issues/114) | +| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) | +| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) | -Please see [the documentation](https://pkg.go.dev/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information. +Linux and macOS should include Android and iOS, but these are currently untested. -## API stability - -fsnotify is a fork of [howeyc/fsnotify](https://github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA). - -All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). - -## Usage +Usage +----- +A basic example: ```go package main import ( - "log" + "log" - "github.com/fsnotify/fsnotify" + "github.com/fsnotify/fsnotify" ) func main() { - watcher, err := fsnotify.NewWatcher() - if err != nil { - log.Fatal(err) - } - defer watcher.Close() - - done := make(chan bool) - go func() { - for { - select { - case event, ok := <-watcher.Events: - if !ok { - return - } - log.Println("event:", event) - if event.Op&fsnotify.Write == fsnotify.Write { - log.Println("modified file:", event.Name) - } - case err, ok := <-watcher.Errors: - if !ok { - return - } - log.Println("error:", err) - } - } - }() - - err = watcher.Add("/tmp/foo") - if err != nil { - log.Fatal(err) - } - <-done + // Create new watcher. + watcher, err := fsnotify.NewWatcher() + if err != nil { + log.Fatal(err) + } + defer watcher.Close() + + // Start listening for events. + go func() { + for { + select { + case event, ok := <-watcher.Events: + if !ok { + return + } + log.Println("event:", event) + if event.Has(fsnotify.Write) { + log.Println("modified file:", event.Name) + } + case err, ok := <-watcher.Errors: + if !ok { + return + } + log.Println("error:", err) + } + } + }() + + // Add a path. + err = watcher.Add("/tmp") + if err != nil { + log.Fatal(err) + } + + // Block main goroutine forever. + <-make(chan struct{}) } ``` -## Contributing +Some more examples can be found in [cmd/fsnotify](cmd/fsnotify), which can be +run with: -Please refer to [CONTRIBUTING][] before opening an issue or pull request. + % go run ./cmd/fsnotify -## FAQ +FAQ +--- +### Will a file still be watched when it's moved to another directory? +No, not unless you are watching the location it was moved to. -**When a file is moved to another directory is it still being watched?** +### Are subdirectories watched too? +No, you must add watches for any directory you want to watch (a recursive +watcher is on the roadmap: [#18]). -No (it shouldn't be, unless you are watching where it was moved to). +[#18]: https://github.com/fsnotify/fsnotify/issues/18 -**When I watch a directory, are all subdirectories watched as well?** +### Do I have to watch the Error and Event channels in a goroutine? +As of now, yes (you can read both channels in the same goroutine using `select`, +you don't need a separate goroutine for both channels; see the example). -No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]). +### Why don't notifications work with NFS, SMB, FUSE, /proc, or /sys? +fsnotify requires support from underlying OS to work. The current NFS and SMB +protocols does not provide network level support for file notifications, and +neither do the /proc and /sys virtual filesystems. -**Do I have to watch the Error and Event channels in a separate goroutine?** +This could be fixed with a polling watcher ([#9]), but it's not yet implemented. -As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7]) +[#9]: https://github.com/fsnotify/fsnotify/issues/9 -**Why am I receiving multiple events for the same file on OS X?** +Platform-specific notes +----------------------- +### Linux +When a file is removed a REMOVE event won't be emitted until all file +descriptors are closed; it will emit a CHMOD instead: -Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]). + fp := os.Open("file") + os.Remove("file") // CHMOD + fp.Close() // REMOVE -**How many files can be watched at once?** +This is the event that inotify sends, so not much can be changed about this. -There are OS-specific limits as to how many watches can be created: -* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error. -* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error. +The `fs.inotify.max_user_watches` sysctl variable specifies the upper limit for +the number of watches per user, and `fs.inotify.max_user_instances` specifies +the maximum number of inotify instances per user. Every Watcher you create is an +"instance", and every path you add is a "watch". -**Why don't notifications work with NFS filesystems or filesystem in userspace (FUSE)?** +These are also exposed in `/proc` as `/proc/sys/fs/inotify/max_user_watches` and +`/proc/sys/fs/inotify/max_user_instances` -fsnotify requires support from underlying OS to work. The current NFS protocol does not provide network level support for file notifications. +To increase them you can use `sysctl` or write the value to proc file: -[#62]: https://github.com/howeyc/fsnotify/issues/62 -[#18]: https://github.com/fsnotify/fsnotify/issues/18 -[#11]: https://github.com/fsnotify/fsnotify/issues/11 -[#7]: https://github.com/howeyc/fsnotify/issues/7 + # The default values on Linux 5.18 + sysctl fs.inotify.max_user_watches=124983 + sysctl fs.inotify.max_user_instances=128 + +To make the changes persist on reboot edit `/etc/sysctl.conf` or +`/usr/lib/sysctl.d/50-default.conf` (details differ per Linux distro; check your +distro's documentation): + + fs.inotify.max_user_watches=124983 + fs.inotify.max_user_instances=128 -[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md +Reaching the limit will result in a "no space left on device" or "too many open +files" error. -## Related Projects +### kqueue (macOS, all BSD systems) +kqueue requires opening a file descriptor for every file that's being watched; +so if you're watching a directory with five files then that's six file +descriptors. You will run in to your system's "max open files" limit faster on +these platforms. -* [notify](https://github.com/rjeczalik/notify) -* [fsevents](https://github.com/fsnotify/fsevents) +The sysctl variables `kern.maxfiles` and `kern.maxfilesperproc` can be used to +control the maximum number of open files. +### macOS +Spotlight indexing on macOS can result in multiple events (see [#15]). A temporary +workaround is to add your folder(s) to the *Spotlight Privacy settings* until we +have a native FSEvents implementation (see [#11]). + +[#11]: https://github.com/fsnotify/fsnotify/issues/11 +[#15]: https://github.com/fsnotify/fsnotify/issues/15 diff --git a/vendor/github.com/fsnotify/fsnotify/backend_fen.go b/vendor/github.com/fsnotify/fsnotify/backend_fen.go new file mode 100644 index 000000000..1a95ad8e7 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_fen.go @@ -0,0 +1,162 @@ +//go:build solaris +// +build solaris + +package fsnotify + +import ( + "errors" +) + +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # macOS notes +// +// Spotlight indexing on macOS can result in multiple events (see [#15]). A +// temporary workaround is to add your folder(s) to the "Spotlight Privacy +// Settings" until we have a native FSEvents implementation (see [#11]). +// +// [#11]: https://github.com/fsnotify/fsnotify/issues/11 +// [#15]: https://github.com/fsnotify/fsnotify/issues/15 +type Watcher struct { + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, so you + // probably want to wait until you've stopped receiving + // them (see the dedup example in cmd/fsnotify). + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // and on kqueue when a file is truncated. On Windows + // it's never sent. + Events chan Event + + // Errors sends any errors. + Errors chan error +} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + return nil, errors.New("FEN based watcher not yet supported for fsnotify\n") +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + return nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; attempting to watch it more than once will +// return an error. Paths that do not yet exist on the filesystem cannot be +// added. A watch will be automatically removed if the path is deleted. +// +// A path will remain watched if it gets renamed to somewhere else on the same +// filesystem, but the monitor will get removed if the path gets deleted and +// re-created, or if it's moved to a different filesystem. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many tools update files atomically. Instead of "just" writing +// to the file a temporary file will be written to first, and if successful the +// temporary file is moved to to destination removing the original, or some +// variant thereof. The watcher on the original file is now lost, as it no +// longer exists. +// +// Instead, watch the parent directory and use Event.Name to filter out files +// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. +func (w *Watcher) Add(name string) error { + return nil +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +func (w *Watcher) Remove(name string) error { + return nil +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go new file mode 100644 index 000000000..54c77fbb0 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go @@ -0,0 +1,459 @@ +//go:build linux +// +build linux + +package fsnotify + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + "unsafe" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # macOS notes +// +// Spotlight indexing on macOS can result in multiple events (see [#15]). A +// temporary workaround is to add your folder(s) to the "Spotlight Privacy +// Settings" until we have a native FSEvents implementation (see [#11]). +// +// [#11]: https://github.com/fsnotify/fsnotify/issues/11 +// [#15]: https://github.com/fsnotify/fsnotify/issues/15 +type Watcher struct { + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, so you + // probably want to wait until you've stopped receiving + // them (see the dedup example in cmd/fsnotify). + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // and on kqueue when a file is truncated. On Windows + // it's never sent. + Events chan Event + + // Errors sends any errors. + Errors chan error + + // Store fd here as os.File.Read() will no longer return on close after + // calling Fd(). See: https://github.com/golang/go/issues/26439 + fd int + mu sync.Mutex // Map access + inotifyFile *os.File + watches map[string]*watch // Map of inotify watches (key: path) + paths map[int]string // Map of watched paths (key: watch descriptor) + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + doneResp chan struct{} // Channel to respond to Close +} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + // Create inotify fd + // Need to set the FD to nonblocking mode in order for SetDeadline methods to work + // Otherwise, blocking i/o operations won't terminate on close + fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK) + if fd == -1 { + return nil, errno + } + + w := &Watcher{ + fd: fd, + inotifyFile: os.NewFile(uintptr(fd), ""), + watches: make(map[string]*watch), + paths: make(map[int]string), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + doneResp: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +// Returns true if the event was sent, or false if watcher is closed. +func (w *Watcher) sendEvent(e Event) bool { + select { + case w.Events <- e: + return true + case <-w.done: + } + return false +} + +// Returns true if the error was sent, or false if watcher is closed. +func (w *Watcher) sendError(err error) bool { + select { + case w.Errors <- err: + return true + case <-w.done: + return false + } +} + +func (w *Watcher) isClosed() bool { + select { + case <-w.done: + return true + default: + return false + } +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + w.mu.Lock() + if w.isClosed() { + w.mu.Unlock() + return nil + } + + // Send 'close' signal to goroutine, and set the Watcher to closed. + close(w.done) + w.mu.Unlock() + + // Causes any blocking reads to return with an error, provided the file + // still supports deadline operations. + err := w.inotifyFile.Close() + if err != nil { + return err + } + + // Wait for goroutine to close + <-w.doneResp + + return nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; attempting to watch it more than once will +// return an error. Paths that do not yet exist on the filesystem cannot be +// added. A watch will be automatically removed if the path is deleted. +// +// A path will remain watched if it gets renamed to somewhere else on the same +// filesystem, but the monitor will get removed if the path gets deleted and +// re-created, or if it's moved to a different filesystem. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many tools update files atomically. Instead of "just" writing +// to the file a temporary file will be written to first, and if successful the +// temporary file is moved to to destination removing the original, or some +// variant thereof. The watcher on the original file is now lost, as it no +// longer exists. +// +// Instead, watch the parent directory and use Event.Name to filter out files +// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. +func (w *Watcher) Add(name string) error { + name = filepath.Clean(name) + if w.isClosed() { + return errors.New("inotify instance already closed") + } + + var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | + unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | + unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF + + w.mu.Lock() + defer w.mu.Unlock() + watchEntry := w.watches[name] + if watchEntry != nil { + flags |= watchEntry.flags | unix.IN_MASK_ADD + } + wd, errno := unix.InotifyAddWatch(w.fd, name, flags) + if wd == -1 { + return errno + } + + if watchEntry == nil { + w.watches[name] = &watch{wd: uint32(wd), flags: flags} + w.paths[wd] = name + } else { + watchEntry.wd = uint32(wd) + watchEntry.flags = flags + } + + return nil +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + + // Fetch the watch. + w.mu.Lock() + defer w.mu.Unlock() + watch, ok := w.watches[name] + + // Remove it from inotify. + if !ok { + return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) + } + + // We successfully removed the watch if InotifyRmWatch doesn't return an + // error, we need to clean up our internal state to ensure it matches + // inotify's kernel state. + delete(w.paths, int(watch.wd)) + delete(w.watches, name) + + // inotify_rm_watch will return EINVAL if the file has been deleted; + // the inotify will already have been removed. + // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously + // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE + // so that EINVAL means that the wd is being rm_watch()ed or its file removed + // by another thread and we have not received IN_IGNORE event. + success, errno := unix.InotifyRmWatch(w.fd, watch.wd) + if success == -1 { + // TODO: Perhaps it's not helpful to return an error here in every case; + // The only two possible errors are: + // + // - EBADF, which happens when w.fd is not a valid file descriptor + // of any kind. + // - EINVAL, which is when fd is not an inotify descriptor or wd + // is not a valid watch descriptor. Watch descriptors are + // invalidated when they are removed explicitly or implicitly; + // explicitly by inotify_rm_watch, implicitly when the file they + // are watching is deleted. + return errno + } + + return nil +} + +// WatchList returns all paths added with [Add] (and are not yet removed). +func (w *Watcher) WatchList() []string { + w.mu.Lock() + defer w.mu.Unlock() + + entries := make([]string, 0, len(w.watches)) + for pathname := range w.watches { + entries = append(entries, pathname) + } + + return entries +} + +type watch struct { + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) +} + +// readEvents reads from the inotify file descriptor, converts the +// received events into Event objects and sends them via the Events channel +func (w *Watcher) readEvents() { + defer func() { + close(w.doneResp) + close(w.Errors) + close(w.Events) + }() + + var ( + buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events + errno error // Syscall errno + ) + for { + // See if we have been closed. + if w.isClosed() { + return + } + + n, err := w.inotifyFile.Read(buf[:]) + switch { + case errors.Unwrap(err) == os.ErrClosed: + return + case err != nil: + if !w.sendError(err) { + return + } + continue + } + + if n < unix.SizeofInotifyEvent { + var err error + if n == 0 { + // If EOF is received. This should really never happen. + err = io.EOF + } else if n < 0 { + // If an error occurred while reading. + err = errno + } else { + // Read was too short. + err = errors.New("notify: short read in readEvents()") + } + if !w.sendError(err) { + return + } + continue + } + + var offset uint32 + // We don't know how many events we just read into the buffer + // While the offset points to at least one whole event... + for offset <= uint32(n-unix.SizeofInotifyEvent) { + var ( + // Point "raw" to the event in the buffer + raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) + mask = uint32(raw.Mask) + nameLen = uint32(raw.Len) + ) + + if mask&unix.IN_Q_OVERFLOW != 0 { + if !w.sendError(ErrEventOverflow) { + return + } + } + + // If the event happened to the watched directory or the watched file, the kernel + // doesn't append the filename to the event, but we would like to always fill the + // the "Name" field with a valid filename. We retrieve the path of the watch from + // the "paths" map. + w.mu.Lock() + name, ok := w.paths[int(raw.Wd)] + // IN_DELETE_SELF occurs when the file/directory being watched is removed. + // This is a sign to clean up the maps, otherwise we are no longer in sync + // with the inotify kernel state which has already deleted the watch + // automatically. + if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + delete(w.paths, int(raw.Wd)) + delete(w.watches, name) + } + w.mu.Unlock() + + if nameLen > 0 { + // Point "bytes" at the first byte of the filename + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] + // The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + + event := w.newEvent(name, mask) + + // Send the events that are not ignored on the events channel + if mask&unix.IN_IGNORED == 0 { + if !w.sendEvent(event) { + return + } + } + + // Move to the next event in the buffer + offset += unix.SizeofInotifyEvent + nameLen + } + } +} + +// newEvent returns an platform-independent Event based on an inotify mask. +func (w *Watcher) newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + e.Op |= Create + } + if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { + e.Op |= Remove + } + if mask&unix.IN_MODIFY == unix.IN_MODIFY { + e.Op |= Write + } + if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + e.Op |= Rename + } + if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { + e.Op |= Chmod + } + return e +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go new file mode 100644 index 000000000..29087469b --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go @@ -0,0 +1,707 @@ +//go:build freebsd || openbsd || netbsd || dragonfly || darwin +// +build freebsd openbsd netbsd dragonfly darwin + +package fsnotify + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # macOS notes +// +// Spotlight indexing on macOS can result in multiple events (see [#15]). A +// temporary workaround is to add your folder(s) to the "Spotlight Privacy +// Settings" until we have a native FSEvents implementation (see [#11]). +// +// [#11]: https://github.com/fsnotify/fsnotify/issues/11 +// [#15]: https://github.com/fsnotify/fsnotify/issues/15 +type Watcher struct { + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, so you + // probably want to wait until you've stopped receiving + // them (see the dedup example in cmd/fsnotify). + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // and on kqueue when a file is truncated. On Windows + // it's never sent. + Events chan Event + + // Errors sends any errors. + Errors chan error + + done chan struct{} + kq int // File descriptor (as returned by the kqueue() syscall). + closepipe [2]int // Pipe used for closing. + mu sync.Mutex // Protects access to watcher data + watches map[string]int // Watched file descriptors (key: path). + watchesByDir map[string]map[int]struct{} // Watched file descriptors indexed by the parent directory (key: dirname(path)). + userWatches map[string]struct{} // Watches added with Watcher.Add() + dirFlags map[string]uint32 // Watched directories to fflags used in kqueue. + paths map[int]pathInfo // File descriptors to path names for processing kqueue events. + fileExists map[string]struct{} // Keep track of if we know this file exists (to stop duplicate create events). + isClosed bool // Set to true when Close() is first called +} + +type pathInfo struct { + name string + isDir bool +} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + kq, closepipe, err := newKqueue() + if err != nil { + return nil, err + } + + w := &Watcher{ + kq: kq, + closepipe: closepipe, + watches: make(map[string]int), + watchesByDir: make(map[string]map[int]struct{}), + dirFlags: make(map[string]uint32), + paths: make(map[int]pathInfo), + fileExists: make(map[string]struct{}), + userWatches: make(map[string]struct{}), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +// newKqueue creates a new kernel event queue and returns a descriptor. +// +// This registers a new event on closepipe, which will trigger an event when +// it's closed. This way we can use kevent() without timeout/polling; without +// the closepipe, it would block forever and we wouldn't be able to stop it at +// all. +func newKqueue() (kq int, closepipe [2]int, err error) { + kq, err = unix.Kqueue() + if kq == -1 { + return kq, closepipe, err + } + + // Register the close pipe. + err = unix.Pipe(closepipe[:]) + if err != nil { + unix.Close(kq) + return kq, closepipe, err + } + + // Register changes to listen on the closepipe. + changes := make([]unix.Kevent_t, 1) + // SetKevent converts int to the platform-specific types. + unix.SetKevent(&changes[0], closepipe[0], unix.EVFILT_READ, + unix.EV_ADD|unix.EV_ENABLE|unix.EV_ONESHOT) + + ok, err := unix.Kevent(kq, changes, nil, nil) + if ok == -1 { + unix.Close(kq) + unix.Close(closepipe[0]) + unix.Close(closepipe[1]) + return kq, closepipe, err + } + return kq, closepipe, nil +} + +// Returns true if the event was sent, or false if watcher is closed. +func (w *Watcher) sendEvent(e Event) bool { + select { + case w.Events <- e: + return true + case <-w.done: + } + return false +} + +// Returns true if the error was sent, or false if watcher is closed. +func (w *Watcher) sendError(err error) bool { + select { + case w.Errors <- err: + return true + case <-w.done: + } + return false +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return nil + } + w.isClosed = true + + // copy paths to remove while locked + pathsToRemove := make([]string, 0, len(w.watches)) + for name := range w.watches { + pathsToRemove = append(pathsToRemove, name) + } + w.mu.Unlock() // Unlock before calling Remove, which also locks + for _, name := range pathsToRemove { + w.Remove(name) + } + + // Send "quit" message to the reader goroutine. + unix.Close(w.closepipe[1]) + close(w.done) + + return nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; attempting to watch it more than once will +// return an error. Paths that do not yet exist on the filesystem cannot be +// added. A watch will be automatically removed if the path is deleted. +// +// A path will remain watched if it gets renamed to somewhere else on the same +// filesystem, but the monitor will get removed if the path gets deleted and +// re-created, or if it's moved to a different filesystem. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many tools update files atomically. Instead of "just" writing +// to the file a temporary file will be written to first, and if successful the +// temporary file is moved to to destination removing the original, or some +// variant thereof. The watcher on the original file is now lost, as it no +// longer exists. +// +// Instead, watch the parent directory and use Event.Name to filter out files +// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. +func (w *Watcher) Add(name string) error { + w.mu.Lock() + w.userWatches[name] = struct{}{} + w.mu.Unlock() + _, err := w.addWatch(name, noteAllEvents) + return err +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + w.mu.Lock() + watchfd, ok := w.watches[name] + w.mu.Unlock() + if !ok { + return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) + } + + err := w.register([]int{watchfd}, unix.EV_DELETE, 0) + if err != nil { + return err + } + + unix.Close(watchfd) + + w.mu.Lock() + isDir := w.paths[watchfd].isDir + delete(w.watches, name) + delete(w.userWatches, name) + + parentName := filepath.Dir(name) + delete(w.watchesByDir[parentName], watchfd) + + if len(w.watchesByDir[parentName]) == 0 { + delete(w.watchesByDir, parentName) + } + + delete(w.paths, watchfd) + delete(w.dirFlags, name) + delete(w.fileExists, name) + w.mu.Unlock() + + // Find all watched paths that are in this directory that are not external. + if isDir { + var pathsToRemove []string + w.mu.Lock() + for fd := range w.watchesByDir[name] { + path := w.paths[fd] + if _, ok := w.userWatches[path.name]; !ok { + pathsToRemove = append(pathsToRemove, path.name) + } + } + w.mu.Unlock() + for _, name := range pathsToRemove { + // Since these are internal, not much sense in propagating error + // to the user, as that will just confuse them with an error about + // a path they did not explicitly watch themselves. + w.Remove(name) + } + } + + return nil +} + +// WatchList returns all paths added with [Add] (and are not yet removed). +func (w *Watcher) WatchList() []string { + w.mu.Lock() + defer w.mu.Unlock() + + entries := make([]string, 0, len(w.userWatches)) + for pathname := range w.userWatches { + entries = append(entries, pathname) + } + + return entries +} + +// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) +const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME + +// addWatch adds name to the watched file set. +// The flags are interpreted as described in kevent(2). +// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks. +func (w *Watcher) addWatch(name string, flags uint32) (string, error) { + var isDir bool + // Make ./name and name equivalent + name = filepath.Clean(name) + + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return "", errors.New("kevent instance already closed") + } + watchfd, alreadyWatching := w.watches[name] + // We already have a watch, but we can still override flags. + if alreadyWatching { + isDir = w.paths[watchfd].isDir + } + w.mu.Unlock() + + if !alreadyWatching { + fi, err := os.Lstat(name) + if err != nil { + return "", err + } + + // Don't watch sockets or named pipes + if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) { + return "", nil + } + + // Follow Symlinks + // + // Linux can add unresolvable symlinks to the watch list without issue, + // and Windows can't do symlinks period. To maintain consistency, we + // will act like everything is fine if the link can't be resolved. + // There will simply be no file events for broken symlinks. Hence the + // returns of nil on errors. + if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + name, err = filepath.EvalSymlinks(name) + if err != nil { + return "", nil + } + + w.mu.Lock() + _, alreadyWatching = w.watches[name] + w.mu.Unlock() + + if alreadyWatching { + return name, nil + } + + fi, err = os.Lstat(name) + if err != nil { + return "", nil + } + } + + // Retry on EINTR; open() can return EINTR in practice on macOS. + // See #354, and go issues 11180 and 39237. + for { + watchfd, err = unix.Open(name, openMode, 0) + if err == nil { + break + } + if errors.Is(err, unix.EINTR) { + continue + } + + return "", err + } + + isDir = fi.IsDir() + } + + err := w.register([]int{watchfd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) + if err != nil { + unix.Close(watchfd) + return "", err + } + + if !alreadyWatching { + w.mu.Lock() + parentName := filepath.Dir(name) + w.watches[name] = watchfd + + watchesByDir, ok := w.watchesByDir[parentName] + if !ok { + watchesByDir = make(map[int]struct{}, 1) + w.watchesByDir[parentName] = watchesByDir + } + watchesByDir[watchfd] = struct{}{} + + w.paths[watchfd] = pathInfo{name: name, isDir: isDir} + w.mu.Unlock() + } + + if isDir { + // Watch the directory if it has not been watched before, + // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + w.mu.Lock() + + watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && + (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) + // Store flags so this watch can be updated later + w.dirFlags[name] = flags + w.mu.Unlock() + + if watchDir { + if err := w.watchDirectoryFiles(name); err != nil { + return "", err + } + } + } + return name, nil +} + +// readEvents reads from kqueue and converts the received kevents into +// Event values that it sends down the Events channel. +func (w *Watcher) readEvents() { + defer func() { + err := unix.Close(w.kq) + if err != nil { + w.Errors <- err + } + unix.Close(w.closepipe[0]) + close(w.Events) + close(w.Errors) + }() + + eventBuffer := make([]unix.Kevent_t, 10) + for closed := false; !closed; { + kevents, err := w.read(eventBuffer) + // EINTR is okay, the syscall was interrupted before timeout expired. + if err != nil && err != unix.EINTR { + if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) { + closed = true + } + continue + } + + // Flush the events we received to the Events channel + for _, kevent := range kevents { + var ( + watchfd = int(kevent.Ident) + mask = uint32(kevent.Fflags) + ) + + // Shut down the loop when the pipe is closed, but only after all + // other events have been processed. + if watchfd == w.closepipe[0] { + closed = true + continue + } + + w.mu.Lock() + path := w.paths[watchfd] + w.mu.Unlock() + + event := w.newEvent(path.name, mask) + + if path.isDir && !event.Has(Remove) { + // Double check to make sure the directory exists. This can + // happen when we do a rm -fr on a recursively watched folders + // and we receive a modification event first but the folder has + // been deleted and later receive the delete event. + if _, err := os.Lstat(event.Name); os.IsNotExist(err) { + event.Op |= Remove + } + } + + if event.Has(Rename) || event.Has(Remove) { + w.Remove(event.Name) + w.mu.Lock() + delete(w.fileExists, event.Name) + w.mu.Unlock() + } + + if path.isDir && event.Has(Write) && !event.Has(Remove) { + w.sendDirectoryChangeEvents(event.Name) + } else { + if !w.sendEvent(event) { + closed = true + continue + } + } + + if event.Has(Remove) { + // Look for a file that may have overwritten this. + // For example, mv f1 f2 will delete f2, then create f2. + if path.isDir { + fileDir := filepath.Clean(event.Name) + w.mu.Lock() + _, found := w.watches[fileDir] + w.mu.Unlock() + if found { + // make sure the directory exists before we watch for changes. When we + // do a recursive watch and perform rm -fr, the parent directory might + // have gone missing, ignore the missing directory and let the + // upcoming delete event remove the watch from the parent directory. + if _, err := os.Lstat(fileDir); err == nil { + w.sendDirectoryChangeEvents(fileDir) + } + } + } else { + filePath := filepath.Clean(event.Name) + if fileInfo, err := os.Lstat(filePath); err == nil { + w.sendFileCreatedEventIfNew(filePath, fileInfo) + } + } + } + } + } +} + +// newEvent returns an platform-independent Event based on kqueue Fflags. +func (w *Watcher) newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { + e.Op |= Remove + } + if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { + e.Op |= Write + } + if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { + e.Op |= Rename + } + if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { + e.Op |= Chmod + } + return e +} + +// watchDirectoryFiles to mimic inotify when adding a watch on a directory +func (w *Watcher) watchDirectoryFiles(dirPath string) error { + // Get all files + files, err := ioutil.ReadDir(dirPath) + if err != nil { + return err + } + + for _, fileInfo := range files { + path := filepath.Join(dirPath, fileInfo.Name()) + + cleanPath, err := w.internalWatch(path, fileInfo) + if err != nil { + // No permission to read the file; that's not a problem: just skip. + // But do add it to w.fileExists to prevent it from being picked up + // as a "new" file later (it still shows up in the directory + // listing). + switch { + case errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM): + cleanPath = filepath.Clean(path) + default: + return fmt.Errorf("%q: %w", filepath.Join(dirPath, fileInfo.Name()), err) + } + } + + w.mu.Lock() + w.fileExists[cleanPath] = struct{}{} + w.mu.Unlock() + } + + return nil +} + +// Search the directory for new files and send an event for them. +// +// This functionality is to have the BSD watcher match the inotify, which sends +// a create event for files created in a watched directory. +func (w *Watcher) sendDirectoryChangeEvents(dir string) { + // Get all files + files, err := ioutil.ReadDir(dir) + if err != nil { + if !w.sendError(fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)) { + return + } + } + + // Search for new files + for _, fi := range files { + err := w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi) + if err != nil { + return + } + } +} + +// sendFileCreatedEvent sends a create event if the file isn't already being tracked. +func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) { + w.mu.Lock() + _, doesExist := w.fileExists[filePath] + w.mu.Unlock() + if !doesExist { + if !w.sendEvent(Event{Name: filePath, Op: Create}) { + return + } + } + + // like watchDirectoryFiles (but without doing another ReadDir) + filePath, err = w.internalWatch(filePath, fileInfo) + if err != nil { + return err + } + + w.mu.Lock() + w.fileExists[filePath] = struct{}{} + w.mu.Unlock() + + return nil +} + +func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) { + if fileInfo.IsDir() { + // mimic Linux providing delete events for subdirectories + // but preserve the flags used if currently watching subdirectory + w.mu.Lock() + flags := w.dirFlags[name] + w.mu.Unlock() + + flags |= unix.NOTE_DELETE | unix.NOTE_RENAME + return w.addWatch(name, flags) + } + + // watch file to mimic Linux inotify + return w.addWatch(name, noteAllEvents) +} + +// Register events with the queue. +func (w *Watcher) register(fds []int, flags int, fflags uint32) error { + changes := make([]unix.Kevent_t, len(fds)) + for i, fd := range fds { + // SetKevent converts int to the platform-specific types. + unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) + changes[i].Fflags = fflags + } + + // Register the events. + success, err := unix.Kevent(w.kq, changes, nil, nil) + if success == -1 { + return err + } + return nil +} + +// read retrieves pending events, or waits until an event occurs. +func (w *Watcher) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { + n, err := unix.Kevent(w.kq, nil, events, nil) + if err != nil { + return nil, err + } + return events[0:n], nil +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_other.go b/vendor/github.com/fsnotify/fsnotify/backend_other.go new file mode 100644 index 000000000..a9bb1c3c4 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_other.go @@ -0,0 +1,66 @@ +//go:build !darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows +// +build !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows + +package fsnotify + +import ( + "fmt" + "runtime" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct{} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + return nil, fmt.Errorf("fsnotify not supported on %s", runtime.GOOS) +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + return nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; attempting to watch it more than once will +// return an error. Paths that do not yet exist on the filesystem cannot be +// added. A watch will be automatically removed if the path is deleted. +// +// A path will remain watched if it gets renamed to somewhere else on the same +// filesystem, but the monitor will get removed if the path gets deleted and +// re-created, or if it's moved to a different filesystem. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many tools update files atomically. Instead of "just" writing +// to the file a temporary file will be written to first, and if successful the +// temporary file is moved to to destination removing the original, or some +// variant thereof. The watcher on the original file is now lost, as it no +// longer exists. +// +// Instead, watch the parent directory and use Event.Name to filter out files +// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. +func (w *Watcher) Add(name string) error { + return nil +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +func (w *Watcher) Remove(name string) error { + return nil +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_windows.go b/vendor/github.com/fsnotify/fsnotify/backend_windows.go new file mode 100644 index 000000000..ae392867c --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_windows.go @@ -0,0 +1,746 @@ +//go:build windows +// +build windows + +package fsnotify + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "reflect" + "runtime" + "strings" + "sync" + "unsafe" + + "golang.org/x/sys/windows" +) + +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # macOS notes +// +// Spotlight indexing on macOS can result in multiple events (see [#15]). A +// temporary workaround is to add your folder(s) to the "Spotlight Privacy +// Settings" until we have a native FSEvents implementation (see [#11]). +// +// [#11]: https://github.com/fsnotify/fsnotify/issues/11 +// [#15]: https://github.com/fsnotify/fsnotify/issues/15 +type Watcher struct { + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, so you + // probably want to wait until you've stopped receiving + // them (see the dedup example in cmd/fsnotify). + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // and on kqueue when a file is truncated. On Windows + // it's never sent. + Events chan Event + + // Errors sends any errors. + Errors chan error + + port windows.Handle // Handle to completion port + input chan *input // Inputs to the reader are sent on this channel + quit chan chan<- error + + mu sync.Mutex // Protects access to watches, isClosed + watches watchMap // Map of watches (key: i-number) + isClosed bool // Set to true when Close() is first called +} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0) + if err != nil { + return nil, os.NewSyscallError("CreateIoCompletionPort", err) + } + w := &Watcher{ + port: port, + watches: make(watchMap), + input: make(chan *input, 1), + Events: make(chan Event, 50), + Errors: make(chan error), + quit: make(chan chan<- error, 1), + } + go w.readEvents() + return w, nil +} + +func (w *Watcher) sendEvent(name string, mask uint64) bool { + if mask == 0 { + return false + } + + event := w.newEvent(name, uint32(mask)) + select { + case ch := <-w.quit: + w.quit <- ch + case w.Events <- event: + } + return true +} + +// Returns true if the error was sent, or false if watcher is closed. +func (w *Watcher) sendError(err error) bool { + select { + case w.Errors <- err: + return true + case <-w.quit: + } + return false +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return nil + } + w.isClosed = true + w.mu.Unlock() + + // Send "quit" message to the reader goroutine + ch := make(chan error) + w.quit <- ch + if err := w.wakeupReader(); err != nil { + return err + } + return <-ch +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; attempting to watch it more than once will +// return an error. Paths that do not yet exist on the filesystem cannot be +// added. A watch will be automatically removed if the path is deleted. +// +// A path will remain watched if it gets renamed to somewhere else on the same +// filesystem, but the monitor will get removed if the path gets deleted and +// re-created, or if it's moved to a different filesystem. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many tools update files atomically. Instead of "just" writing +// to the file a temporary file will be written to first, and if successful the +// temporary file is moved to to destination removing the original, or some +// variant thereof. The watcher on the original file is now lost, as it no +// longer exists. +// +// Instead, watch the parent directory and use Event.Name to filter out files +// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. +func (w *Watcher) Add(name string) error { + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return errors.New("watcher already closed") + } + w.mu.Unlock() + + in := &input{ + op: opAddWatch, + path: filepath.Clean(name), + flags: sysFSALLEVENTS, + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +func (w *Watcher) Remove(name string) error { + in := &input{ + op: opRemoveWatch, + path: filepath.Clean(name), + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +// WatchList returns all paths added with [Add] (and are not yet removed). +func (w *Watcher) WatchList() []string { + w.mu.Lock() + defer w.mu.Unlock() + + entries := make([]string, 0, len(w.watches)) + for _, entry := range w.watches { + for _, watchEntry := range entry { + entries = append(entries, watchEntry.path) + } + } + + return entries +} + +// These options are from the old golang.org/x/exp/winfsnotify, where you could +// add various options to the watch. This has long since been removed. +// +// The "sys" in the name is misleading as they're not part of any "system". +// +// This should all be removed at some point, and just use windows.FILE_NOTIFY_* +const ( + sysFSALLEVENTS = 0xfff + sysFSATTRIB = 0x4 + sysFSCREATE = 0x100 + sysFSDELETE = 0x200 + sysFSDELETESELF = 0x400 + sysFSMODIFY = 0x2 + sysFSMOVE = 0xc0 + sysFSMOVEDFROM = 0x40 + sysFSMOVEDTO = 0x80 + sysFSMOVESELF = 0x800 + sysFSIGNORED = 0x8000 +) + +func (w *Watcher) newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { + e.Op |= Create + } + if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { + e.Op |= Remove + } + if mask&sysFSMODIFY == sysFSMODIFY { + e.Op |= Write + } + if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { + e.Op |= Rename + } + if mask&sysFSATTRIB == sysFSATTRIB { + e.Op |= Chmod + } + return e +} + +const ( + opAddWatch = iota + opRemoveWatch +) + +const ( + provisional uint64 = 1 << (32 + iota) +) + +type input struct { + op int + path string + flags uint32 + reply chan error +} + +type inode struct { + handle windows.Handle + volume uint32 + index uint64 +} + +type watch struct { + ov windows.Overlapped + ino *inode // i-number + path string // Directory path + mask uint64 // Directory itself is being watched with these notify flags + names map[string]uint64 // Map of names being watched and their notify flags + rename string // Remembers the old name while renaming a file + buf [65536]byte // 64K buffer +} + +type ( + indexMap map[uint64]*watch + watchMap map[uint32]indexMap +) + +func (w *Watcher) wakeupReader() error { + err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil) + if err != nil { + return os.NewSyscallError("PostQueuedCompletionStatus", err) + } + return nil +} + +func (w *Watcher) getDir(pathname string) (dir string, err error) { + attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname)) + if err != nil { + return "", os.NewSyscallError("GetFileAttributes", err) + } + if attr&windows.FILE_ATTRIBUTE_DIRECTORY != 0 { + dir = pathname + } else { + dir, _ = filepath.Split(pathname) + dir = filepath.Clean(dir) + } + return +} + +func (w *Watcher) getIno(path string) (ino *inode, err error) { + h, err := windows.CreateFile(windows.StringToUTF16Ptr(path), + windows.FILE_LIST_DIRECTORY, + windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE, + nil, windows.OPEN_EXISTING, + windows.FILE_FLAG_BACKUP_SEMANTICS|windows.FILE_FLAG_OVERLAPPED, 0) + if err != nil { + return nil, os.NewSyscallError("CreateFile", err) + } + + var fi windows.ByHandleFileInformation + err = windows.GetFileInformationByHandle(h, &fi) + if err != nil { + windows.CloseHandle(h) + return nil, os.NewSyscallError("GetFileInformationByHandle", err) + } + ino = &inode{ + handle: h, + volume: fi.VolumeSerialNumber, + index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), + } + return ino, nil +} + +// Must run within the I/O thread. +func (m watchMap) get(ino *inode) *watch { + if i := m[ino.volume]; i != nil { + return i[ino.index] + } + return nil +} + +// Must run within the I/O thread. +func (m watchMap) set(ino *inode, watch *watch) { + i := m[ino.volume] + if i == nil { + i = make(indexMap) + m[ino.volume] = i + } + i[ino.index] = watch +} + +// Must run within the I/O thread. +func (w *Watcher) addWatch(pathname string, flags uint64) error { + dir, err := w.getDir(pathname) + if err != nil { + return err + } + + ino, err := w.getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watchEntry := w.watches.get(ino) + w.mu.Unlock() + if watchEntry == nil { + _, err := windows.CreateIoCompletionPort(ino.handle, w.port, 0, 0) + if err != nil { + windows.CloseHandle(ino.handle) + return os.NewSyscallError("CreateIoCompletionPort", err) + } + watchEntry = &watch{ + ino: ino, + path: dir, + names: make(map[string]uint64), + } + w.mu.Lock() + w.watches.set(ino, watchEntry) + w.mu.Unlock() + flags |= provisional + } else { + windows.CloseHandle(ino.handle) + } + if pathname == dir { + watchEntry.mask |= flags + } else { + watchEntry.names[filepath.Base(pathname)] |= flags + } + + err = w.startRead(watchEntry) + if err != nil { + return err + } + + if pathname == dir { + watchEntry.mask &= ^provisional + } else { + watchEntry.names[filepath.Base(pathname)] &= ^provisional + } + return nil +} + +// Must run within the I/O thread. +func (w *Watcher) remWatch(pathname string) error { + dir, err := w.getDir(pathname) + if err != nil { + return err + } + ino, err := w.getIno(dir) + if err != nil { + return err + } + + w.mu.Lock() + watch := w.watches.get(ino) + w.mu.Unlock() + + err = windows.CloseHandle(ino.handle) + if err != nil { + w.sendError(os.NewSyscallError("CloseHandle", err)) + } + if watch == nil { + return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname) + } + if pathname == dir { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + watch.mask = 0 + } else { + name := filepath.Base(pathname) + w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + + return w.startRead(watch) +} + +// Must run within the I/O thread. +func (w *Watcher) deleteWatch(watch *watch) { + for name, mask := range watch.names { + if mask&provisional == 0 { + w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) + } + delete(watch.names, name) + } + if watch.mask != 0 { + if watch.mask&provisional == 0 { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + } + watch.mask = 0 + } +} + +// Must run within the I/O thread. +func (w *Watcher) startRead(watch *watch) error { + err := windows.CancelIo(watch.ino.handle) + if err != nil { + w.sendError(os.NewSyscallError("CancelIo", err)) + w.deleteWatch(watch) + } + mask := w.toWindowsFlags(watch.mask) + for _, m := range watch.names { + mask |= w.toWindowsFlags(m) + } + if mask == 0 { + err := windows.CloseHandle(watch.ino.handle) + if err != nil { + w.sendError(os.NewSyscallError("CloseHandle", err)) + } + w.mu.Lock() + delete(w.watches[watch.ino.volume], watch.ino.index) + w.mu.Unlock() + return nil + } + + rdErr := windows.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], + uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) + if rdErr != nil { + err := os.NewSyscallError("ReadDirectoryChanges", rdErr) + if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { + // Watched directory was probably removed + w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + err = nil + } + w.deleteWatch(watch) + w.startRead(watch) + return err + } + return nil +} + +// readEvents reads from the I/O completion port, converts the +// received events into Event objects and sends them via the Events channel. +// Entry point to the I/O thread. +func (w *Watcher) readEvents() { + var ( + n uint32 + key uintptr + ov *windows.Overlapped + ) + runtime.LockOSThread() + + for { + qErr := windows.GetQueuedCompletionStatus(w.port, &n, &key, &ov, windows.INFINITE) + // This error is handled after the watch == nil check below. NOTE: this + // seems odd, note sure if it's correct. + + watch := (*watch)(unsafe.Pointer(ov)) + if watch == nil { + select { + case ch := <-w.quit: + w.mu.Lock() + var indexes []indexMap + for _, index := range w.watches { + indexes = append(indexes, index) + } + w.mu.Unlock() + for _, index := range indexes { + for _, watch := range index { + w.deleteWatch(watch) + w.startRead(watch) + } + } + + err := windows.CloseHandle(w.port) + if err != nil { + err = os.NewSyscallError("CloseHandle", err) + } + close(w.Events) + close(w.Errors) + ch <- err + return + case in := <-w.input: + switch in.op { + case opAddWatch: + in.reply <- w.addWatch(in.path, uint64(in.flags)) + case opRemoveWatch: + in.reply <- w.remWatch(in.path) + } + default: + } + continue + } + + switch qErr { + case windows.ERROR_MORE_DATA: + if watch == nil { + w.sendError(errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")) + } else { + // The i/o succeeded but the buffer is full. + // In theory we should be building up a full packet. + // In practice we can get away with just carrying on. + n = uint32(unsafe.Sizeof(watch.buf)) + } + case windows.ERROR_ACCESS_DENIED: + // Watched directory was probably removed + w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.deleteWatch(watch) + w.startRead(watch) + continue + case windows.ERROR_OPERATION_ABORTED: + // CancelIo was called on this handle + continue + default: + w.sendError(os.NewSyscallError("GetQueuedCompletionPort", qErr)) + continue + case nil: + } + + var offset uint32 + for { + if n == 0 { + w.sendError(errors.New("short read in readEvents()")) + break + } + + // Point "raw" to the event in the buffer + raw := (*windows.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) + + // Create a buf that is the size of the path name + size := int(raw.FileNameLength / 2) + var buf []uint16 + // TODO: Use unsafe.Slice in Go 1.17; https://stackoverflow.com/questions/51187973 + sh := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + sh.Data = uintptr(unsafe.Pointer(&raw.FileName)) + sh.Len = size + sh.Cap = size + name := windows.UTF16ToString(buf) + fullname := filepath.Join(watch.path, name) + + var mask uint64 + switch raw.Action { + case windows.FILE_ACTION_REMOVED: + mask = sysFSDELETESELF + case windows.FILE_ACTION_MODIFIED: + mask = sysFSMODIFY + case windows.FILE_ACTION_RENAMED_OLD_NAME: + watch.rename = name + case windows.FILE_ACTION_RENAMED_NEW_NAME: + // Update saved path of all sub-watches. + old := filepath.Join(watch.path, watch.rename) + w.mu.Lock() + for _, watchMap := range w.watches { + for _, ww := range watchMap { + if strings.HasPrefix(ww.path, old) { + ww.path = filepath.Join(fullname, strings.TrimPrefix(ww.path, old)) + } + } + } + w.mu.Unlock() + + if watch.names[watch.rename] != 0 { + watch.names[name] |= watch.names[watch.rename] + delete(watch.names, watch.rename) + mask = sysFSMOVESELF + } + } + + sendNameEvent := func() { + w.sendEvent(fullname, watch.names[name]&mask) + } + if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME { + sendNameEvent() + } + if raw.Action == windows.FILE_ACTION_REMOVED { + w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + + w.sendEvent(fullname, watch.mask&w.toFSnotifyFlags(raw.Action)) + if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { + fullname = filepath.Join(watch.path, watch.rename) + sendNameEvent() + } + + // Move to the next event in the buffer + if raw.NextEntryOffset == 0 { + break + } + offset += raw.NextEntryOffset + + // Error! + if offset >= n { + w.sendError(errors.New( + "Windows system assumed buffer larger than it is, events have likely been missed.")) + break + } + } + + if err := w.startRead(watch); err != nil { + w.sendError(err) + } + } +} + +func (w *Watcher) toWindowsFlags(mask uint64) uint32 { + var m uint32 + if mask&sysFSMODIFY != 0 { + m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE + } + if mask&sysFSATTRIB != 0 { + m |= windows.FILE_NOTIFY_CHANGE_ATTRIBUTES + } + if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { + m |= windows.FILE_NOTIFY_CHANGE_FILE_NAME | windows.FILE_NOTIFY_CHANGE_DIR_NAME + } + return m +} + +func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { + switch action { + case windows.FILE_ACTION_ADDED: + return sysFSCREATE + case windows.FILE_ACTION_REMOVED: + return sysFSDELETE + case windows.FILE_ACTION_MODIFIED: + return sysFSMODIFY + case windows.FILE_ACTION_RENAMED_OLD_NAME: + return sysFSMOVEDFROM + case windows.FILE_ACTION_RENAMED_NEW_NAME: + return sysFSMOVEDTO + } + return 0 +} diff --git a/vendor/github.com/fsnotify/fsnotify/fen.go b/vendor/github.com/fsnotify/fsnotify/fen.go deleted file mode 100644 index b3ac3d8f5..000000000 --- a/vendor/github.com/fsnotify/fsnotify/fen.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build solaris -// +build solaris - -package fsnotify - -import ( - "errors" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - return nil, errors.New("FEN based watcher not yet supported for fsnotify\n") -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - return nil -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - return nil -} diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go index 0f4ee52e8..30a5bf0f0 100644 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -1,29 +1,37 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - //go:build !plan9 // +build !plan9 -// Package fsnotify provides a platform-independent interface for file system notifications. +// Package fsnotify provides a cross-platform interface for file system +// notifications. package fsnotify import ( - "bytes" "errors" "fmt" + "strings" ) -// Event represents a single file system notification. +// Event represents a file system notification. type Event struct { - Name string // Relative path to the file or directory. - Op Op // File operation that triggered the event. + // Path to the file or directory. + // + // Paths are relative to the input; for example with Add("dir") the Name + // will be set to "dir/file" if you create that file, but if you use + // Add("/path/to/dir") it will be "/path/to/dir/file". + Name string + + // File operation that triggered the event. + // + // This is a bitmask and some systems may send multiple operations at once. + // Use the Event.Has() method instead of comparing with ==. + Op Op } // Op describes a set of file operations. type Op uint32 -// These are the generalized file operations that can trigger a notification. +// The operations fsnotify can trigger; see the documentation on [Watcher] for a +// full description, and check them with [Event.Has]. const ( Create Op = 1 << iota Write @@ -32,38 +40,42 @@ const ( Chmod ) -func (op Op) String() string { - // Use a buffer for efficient string concatenation - var buffer bytes.Buffer +// Common errors that can be reported by a watcher +var ( + ErrNonExistentWatch = errors.New("can't remove non-existent watcher") + ErrEventOverflow = errors.New("fsnotify queue overflow") +) - if op&Create == Create { - buffer.WriteString("|CREATE") +func (op Op) String() string { + var b strings.Builder + if op.Has(Create) { + b.WriteString("|CREATE") } - if op&Remove == Remove { - buffer.WriteString("|REMOVE") + if op.Has(Remove) { + b.WriteString("|REMOVE") } - if op&Write == Write { - buffer.WriteString("|WRITE") + if op.Has(Write) { + b.WriteString("|WRITE") } - if op&Rename == Rename { - buffer.WriteString("|RENAME") + if op.Has(Rename) { + b.WriteString("|RENAME") } - if op&Chmod == Chmod { - buffer.WriteString("|CHMOD") + if op.Has(Chmod) { + b.WriteString("|CHMOD") } - if buffer.Len() == 0 { - return "" + if b.Len() == 0 { + return "[no events]" } - return buffer.String()[1:] // Strip leading pipe + return b.String()[1:] } -// String returns a string representation of the event in the form -// "file: REMOVE|WRITE|..." +// Has reports if this operation has the given operation. +func (o Op) Has(h Op) bool { return o&h == h } + +// Has reports if this event has the given operation. +func (e Event) Has(op Op) bool { return e.Op.Has(op) } + +// String returns a string representation of the event with their path. func (e Event) String() string { - return fmt.Sprintf("%q: %s", e.Name, e.Op.String()) + return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name) } - -// Common errors that can be reported by a watcher -var ( - ErrEventOverflow = errors.New("fsnotify queue overflow") -) diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go b/vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go deleted file mode 100644 index 596885598..000000000 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows -// +build !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows - -package fsnotify - -import ( - "fmt" - "runtime" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct{} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - return nil, fmt.Errorf("fsnotify not supported on %s", runtime.GOOS) -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - return nil -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - return nil -} diff --git a/vendor/github.com/fsnotify/fsnotify/inotify.go b/vendor/github.com/fsnotify/fsnotify/inotify.go deleted file mode 100644 index a6d0e0ec8..000000000 --- a/vendor/github.com/fsnotify/fsnotify/inotify.go +++ /dev/null @@ -1,351 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux -// +build linux - -package fsnotify - -import ( - "errors" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "sync" - "unsafe" - - "golang.org/x/sys/unix" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - mu sync.Mutex // Map access - fd int - poller *fdPoller - watches map[string]*watch // Map of inotify watches (key: path) - paths map[int]string // Map of watched paths (key: watch descriptor) - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - doneResp chan struct{} // Channel to respond to Close -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - // Create inotify fd - fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC) - if fd == -1 { - return nil, errno - } - // Create epoll - poller, err := newFdPoller(fd) - if err != nil { - unix.Close(fd) - return nil, err - } - w := &Watcher{ - fd: fd, - poller: poller, - watches: make(map[string]*watch), - paths: make(map[int]string), - Events: make(chan Event), - Errors: make(chan error), - done: make(chan struct{}), - doneResp: make(chan struct{}), - } - - go w.readEvents() - return w, nil -} - -func (w *Watcher) isClosed() bool { - select { - case <-w.done: - return true - default: - return false - } -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - if w.isClosed() { - return nil - } - - // Send 'close' signal to goroutine, and set the Watcher to closed. - close(w.done) - - // Wake up goroutine - w.poller.wake() - - // Wait for goroutine to close - <-w.doneResp - - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - name = filepath.Clean(name) - if w.isClosed() { - return errors.New("inotify instance already closed") - } - - const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | - unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | - unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF - - var flags uint32 = agnosticEvents - - w.mu.Lock() - defer w.mu.Unlock() - watchEntry := w.watches[name] - if watchEntry != nil { - flags |= watchEntry.flags | unix.IN_MASK_ADD - } - wd, errno := unix.InotifyAddWatch(w.fd, name, flags) - if wd == -1 { - return errno - } - - if watchEntry == nil { - w.watches[name] = &watch{wd: uint32(wd), flags: flags} - w.paths[wd] = name - } else { - watchEntry.wd = uint32(wd) - watchEntry.flags = flags - } - - return nil -} - -// Remove stops watching the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - name = filepath.Clean(name) - - // Fetch the watch. - w.mu.Lock() - defer w.mu.Unlock() - watch, ok := w.watches[name] - - // Remove it from inotify. - if !ok { - return fmt.Errorf("can't remove non-existent inotify watch for: %s", name) - } - - // We successfully removed the watch if InotifyRmWatch doesn't return an - // error, we need to clean up our internal state to ensure it matches - // inotify's kernel state. - delete(w.paths, int(watch.wd)) - delete(w.watches, name) - - // inotify_rm_watch will return EINVAL if the file has been deleted; - // the inotify will already have been removed. - // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously - // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE - // so that EINVAL means that the wd is being rm_watch()ed or its file removed - // by another thread and we have not received IN_IGNORE event. - success, errno := unix.InotifyRmWatch(w.fd, watch.wd) - if success == -1 { - // TODO: Perhaps it's not helpful to return an error here in every case. - // the only two possible errors are: - // EBADF, which happens when w.fd is not a valid file descriptor of any kind. - // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor. - // Watch descriptors are invalidated when they are removed explicitly or implicitly; - // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted. - return errno - } - - return nil -} - -// WatchList returns the directories and files that are being monitered. -func (w *Watcher) WatchList() []string { - w.mu.Lock() - defer w.mu.Unlock() - - entries := make([]string, 0, len(w.watches)) - for pathname := range w.watches { - entries = append(entries, pathname) - } - - return entries -} - -type watch struct { - wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) - flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) -} - -// readEvents reads from the inotify file descriptor, converts the -// received events into Event objects and sends them via the Events channel -func (w *Watcher) readEvents() { - var ( - buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events - n int // Number of bytes read with read() - errno error // Syscall errno - ok bool // For poller.wait - ) - - defer close(w.doneResp) - defer close(w.Errors) - defer close(w.Events) - defer unix.Close(w.fd) - defer w.poller.close() - - for { - // See if we have been closed. - if w.isClosed() { - return - } - - ok, errno = w.poller.wait() - if errno != nil { - select { - case w.Errors <- errno: - case <-w.done: - return - } - continue - } - - if !ok { - continue - } - - n, errno = unix.Read(w.fd, buf[:]) - // If a signal interrupted execution, see if we've been asked to close, and try again. - // http://man7.org/linux/man-pages/man7/signal.7.html : - // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable" - if errno == unix.EINTR { - continue - } - - // unix.Read might have been woken up by Close. If so, we're done. - if w.isClosed() { - return - } - - if n < unix.SizeofInotifyEvent { - var err error - if n == 0 { - // If EOF is received. This should really never happen. - err = io.EOF - } else if n < 0 { - // If an error occurred while reading. - err = errno - } else { - // Read was too short. - err = errors.New("notify: short read in readEvents()") - } - select { - case w.Errors <- err: - case <-w.done: - return - } - continue - } - - var offset uint32 - // We don't know how many events we just read into the buffer - // While the offset points to at least one whole event... - for offset <= uint32(n-unix.SizeofInotifyEvent) { - // Point "raw" to the event in the buffer - raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) - - mask := uint32(raw.Mask) - nameLen := uint32(raw.Len) - - if mask&unix.IN_Q_OVERFLOW != 0 { - select { - case w.Errors <- ErrEventOverflow: - case <-w.done: - return - } - } - - // If the event happened to the watched directory or the watched file, the kernel - // doesn't append the filename to the event, but we would like to always fill the - // the "Name" field with a valid filename. We retrieve the path of the watch from - // the "paths" map. - w.mu.Lock() - name, ok := w.paths[int(raw.Wd)] - // IN_DELETE_SELF occurs when the file/directory being watched is removed. - // This is a sign to clean up the maps, otherwise we are no longer in sync - // with the inotify kernel state which has already deleted the watch - // automatically. - if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { - delete(w.paths, int(raw.Wd)) - delete(w.watches, name) - } - w.mu.Unlock() - - if nameLen > 0 { - // Point "bytes" at the first byte of the filename - bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] - // The filename is padded with NULL bytes. TrimRight() gets rid of those. - name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") - } - - event := newEvent(name, mask) - - // Send the events that are not ignored on the events channel - if !event.ignoreLinux(mask) { - select { - case w.Events <- event: - case <-w.done: - return - } - } - - // Move to the next event in the buffer - offset += unix.SizeofInotifyEvent + nameLen - } - } -} - -// Certain types of events can be "ignored" and not sent over the Events -// channel. Such as events marked ignore by the kernel, or MODIFY events -// against files that do not exist. -func (e *Event) ignoreLinux(mask uint32) bool { - // Ignore anything the inotify API says to ignore - if mask&unix.IN_IGNORED == unix.IN_IGNORED { - return true - } - - // If the event is not a DELETE or RENAME, the file must exist. - // Otherwise the event is ignored. - // *Note*: this was put in place because it was seen that a MODIFY - // event was sent after the DELETE. This ignores that MODIFY and - // assumes a DELETE will come or has come if the file doesn't exist. - if !(e.Op&Remove == Remove || e.Op&Rename == Rename) { - _, statErr := os.Lstat(e.Name) - return os.IsNotExist(statErr) - } - return false -} - -// newEvent returns an platform-independent Event based on an inotify mask. -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { - e.Op |= Create - } - if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { - e.Op |= Remove - } - if mask&unix.IN_MODIFY == unix.IN_MODIFY { - e.Op |= Write - } - if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { - e.Op |= Rename - } - if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { - e.Op |= Chmod - } - return e -} diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go deleted file mode 100644 index b572a37c3..000000000 --- a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux -// +build linux - -package fsnotify - -import ( - "errors" - - "golang.org/x/sys/unix" -) - -type fdPoller struct { - fd int // File descriptor (as returned by the inotify_init() syscall) - epfd int // Epoll file descriptor - pipe [2]int // Pipe for waking up -} - -func emptyPoller(fd int) *fdPoller { - poller := new(fdPoller) - poller.fd = fd - poller.epfd = -1 - poller.pipe[0] = -1 - poller.pipe[1] = -1 - return poller -} - -// Create a new inotify poller. -// This creates an inotify handler, and an epoll handler. -func newFdPoller(fd int) (*fdPoller, error) { - var errno error - poller := emptyPoller(fd) - defer func() { - if errno != nil { - poller.close() - } - }() - - // Create epoll fd - poller.epfd, errno = unix.EpollCreate1(unix.EPOLL_CLOEXEC) - if poller.epfd == -1 { - return nil, errno - } - // Create pipe; pipe[0] is the read end, pipe[1] the write end. - errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK|unix.O_CLOEXEC) - if errno != nil { - return nil, errno - } - - // Register inotify fd with epoll - event := unix.EpollEvent{ - Fd: int32(poller.fd), - Events: unix.EPOLLIN, - } - errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event) - if errno != nil { - return nil, errno - } - - // Register pipe fd with epoll - event = unix.EpollEvent{ - Fd: int32(poller.pipe[0]), - Events: unix.EPOLLIN, - } - errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event) - if errno != nil { - return nil, errno - } - - return poller, nil -} - -// Wait using epoll. -// Returns true if something is ready to be read, -// false if there is not. -func (poller *fdPoller) wait() (bool, error) { - // 3 possible events per fd, and 2 fds, makes a maximum of 6 events. - // I don't know whether epoll_wait returns the number of events returned, - // or the total number of events ready. - // I decided to catch both by making the buffer one larger than the maximum. - events := make([]unix.EpollEvent, 7) - for { - n, errno := unix.EpollWait(poller.epfd, events, -1) - if n == -1 { - if errno == unix.EINTR { - continue - } - return false, errno - } - if n == 0 { - // If there are no events, try again. - continue - } - if n > 6 { - // This should never happen. More events were returned than should be possible. - return false, errors.New("epoll_wait returned more events than I know what to do with") - } - ready := events[:n] - epollhup := false - epollerr := false - epollin := false - for _, event := range ready { - if event.Fd == int32(poller.fd) { - if event.Events&unix.EPOLLHUP != 0 { - // This should not happen, but if it does, treat it as a wakeup. - epollhup = true - } - if event.Events&unix.EPOLLERR != 0 { - // If an error is waiting on the file descriptor, we should pretend - // something is ready to read, and let unix.Read pick up the error. - epollerr = true - } - if event.Events&unix.EPOLLIN != 0 { - // There is data to read. - epollin = true - } - } - if event.Fd == int32(poller.pipe[0]) { - if event.Events&unix.EPOLLHUP != 0 { - // Write pipe descriptor was closed, by us. This means we're closing down the - // watcher, and we should wake up. - } - if event.Events&unix.EPOLLERR != 0 { - // If an error is waiting on the pipe file descriptor. - // This is an absolute mystery, and should never ever happen. - return false, errors.New("Error on the pipe descriptor.") - } - if event.Events&unix.EPOLLIN != 0 { - // This is a regular wakeup, so we have to clear the buffer. - err := poller.clearWake() - if err != nil { - return false, err - } - } - } - } - - if epollhup || epollerr || epollin { - return true, nil - } - return false, nil - } -} - -// Close the write end of the poller. -func (poller *fdPoller) wake() error { - buf := make([]byte, 1) - n, errno := unix.Write(poller.pipe[1], buf) - if n == -1 { - if errno == unix.EAGAIN { - // Buffer is full, poller will wake. - return nil - } - return errno - } - return nil -} - -func (poller *fdPoller) clearWake() error { - // You have to be woken up a LOT in order to get to 100! - buf := make([]byte, 100) - n, errno := unix.Read(poller.pipe[0], buf) - if n == -1 { - if errno == unix.EAGAIN { - // Buffer is empty, someone else cleared our wake. - return nil - } - return errno - } - return nil -} - -// Close all poller file descriptors, but not the one passed to it. -func (poller *fdPoller) close() { - if poller.pipe[1] != -1 { - unix.Close(poller.pipe[1]) - } - if poller.pipe[0] != -1 { - unix.Close(poller.pipe[0]) - } - if poller.epfd != -1 { - unix.Close(poller.epfd) - } -} diff --git a/vendor/github.com/fsnotify/fsnotify/kqueue.go b/vendor/github.com/fsnotify/fsnotify/kqueue.go deleted file mode 100644 index 6fb8d8532..000000000 --- a/vendor/github.com/fsnotify/fsnotify/kqueue.go +++ /dev/null @@ -1,535 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build freebsd || openbsd || netbsd || dragonfly || darwin -// +build freebsd openbsd netbsd dragonfly darwin - -package fsnotify - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "sync" - "time" - - "golang.org/x/sys/unix" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - - kq int // File descriptor (as returned by the kqueue() syscall). - - mu sync.Mutex // Protects access to watcher data - watches map[string]int // Map of watched file descriptors (key: path). - externalWatches map[string]bool // Map of watches added by user of the library. - dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue. - paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events. - fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events). - isClosed bool // Set to true when Close() is first called -} - -type pathInfo struct { - name string - isDir bool -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - kq, err := kqueue() - if err != nil { - return nil, err - } - - w := &Watcher{ - kq: kq, - watches: make(map[string]int), - dirFlags: make(map[string]uint32), - paths: make(map[int]pathInfo), - fileExists: make(map[string]bool), - externalWatches: make(map[string]bool), - Events: make(chan Event), - Errors: make(chan error), - done: make(chan struct{}), - } - - go w.readEvents() - return w, nil -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return nil - } - w.isClosed = true - - // copy paths to remove while locked - var pathsToRemove = make([]string, 0, len(w.watches)) - for name := range w.watches { - pathsToRemove = append(pathsToRemove, name) - } - w.mu.Unlock() - // unlock before calling Remove, which also locks - - for _, name := range pathsToRemove { - w.Remove(name) - } - - // send a "quit" message to the reader goroutine - close(w.done) - - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - w.mu.Lock() - w.externalWatches[name] = true - w.mu.Unlock() - _, err := w.addWatch(name, noteAllEvents) - return err -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - name = filepath.Clean(name) - w.mu.Lock() - watchfd, ok := w.watches[name] - w.mu.Unlock() - if !ok { - return fmt.Errorf("can't remove non-existent kevent watch for: %s", name) - } - - const registerRemove = unix.EV_DELETE - if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil { - return err - } - - unix.Close(watchfd) - - w.mu.Lock() - isDir := w.paths[watchfd].isDir - delete(w.watches, name) - delete(w.paths, watchfd) - delete(w.dirFlags, name) - w.mu.Unlock() - - // Find all watched paths that are in this directory that are not external. - if isDir { - var pathsToRemove []string - w.mu.Lock() - for _, path := range w.paths { - wdir, _ := filepath.Split(path.name) - if filepath.Clean(wdir) == name { - if !w.externalWatches[path.name] { - pathsToRemove = append(pathsToRemove, path.name) - } - } - } - w.mu.Unlock() - for _, name := range pathsToRemove { - // Since these are internal, not much sense in propagating error - // to the user, as that will just confuse them with an error about - // a path they did not explicitly watch themselves. - w.Remove(name) - } - } - - return nil -} - -// WatchList returns the directories and files that are being monitered. -func (w *Watcher) WatchList() []string { - w.mu.Lock() - defer w.mu.Unlock() - - entries := make([]string, 0, len(w.watches)) - for pathname := range w.watches { - entries = append(entries, pathname) - } - - return entries -} - -// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) -const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME - -// keventWaitTime to block on each read from kevent -var keventWaitTime = durationToTimespec(100 * time.Millisecond) - -// addWatch adds name to the watched file set. -// The flags are interpreted as described in kevent(2). -// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks. -func (w *Watcher) addWatch(name string, flags uint32) (string, error) { - var isDir bool - // Make ./name and name equivalent - name = filepath.Clean(name) - - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return "", errors.New("kevent instance already closed") - } - watchfd, alreadyWatching := w.watches[name] - // We already have a watch, but we can still override flags. - if alreadyWatching { - isDir = w.paths[watchfd].isDir - } - w.mu.Unlock() - - if !alreadyWatching { - fi, err := os.Lstat(name) - if err != nil { - return "", err - } - - // Don't watch sockets. - if fi.Mode()&os.ModeSocket == os.ModeSocket { - return "", nil - } - - // Don't watch named pipes. - if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe { - return "", nil - } - - // Follow Symlinks - // Unfortunately, Linux can add bogus symlinks to watch list without - // issue, and Windows can't do symlinks period (AFAIK). To maintain - // consistency, we will act like everything is fine. There will simply - // be no file events for broken symlinks. - // Hence the returns of nil on errors. - if fi.Mode()&os.ModeSymlink == os.ModeSymlink { - name, err = filepath.EvalSymlinks(name) - if err != nil { - return "", nil - } - - w.mu.Lock() - _, alreadyWatching = w.watches[name] - w.mu.Unlock() - - if alreadyWatching { - return name, nil - } - - fi, err = os.Lstat(name) - if err != nil { - return "", nil - } - } - - watchfd, err = unix.Open(name, openMode, 0700) - if watchfd == -1 { - return "", err - } - - isDir = fi.IsDir() - } - - const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE - if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil { - unix.Close(watchfd) - return "", err - } - - if !alreadyWatching { - w.mu.Lock() - w.watches[name] = watchfd - w.paths[watchfd] = pathInfo{name: name, isDir: isDir} - w.mu.Unlock() - } - - if isDir { - // Watch the directory if it has not been watched before, - // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) - w.mu.Lock() - - watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && - (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) - // Store flags so this watch can be updated later - w.dirFlags[name] = flags - w.mu.Unlock() - - if watchDir { - if err := w.watchDirectoryFiles(name); err != nil { - return "", err - } - } - } - return name, nil -} - -// readEvents reads from kqueue and converts the received kevents into -// Event values that it sends down the Events channel. -func (w *Watcher) readEvents() { - eventBuffer := make([]unix.Kevent_t, 10) - -loop: - for { - // See if there is a message on the "done" channel - select { - case <-w.done: - break loop - default: - } - - // Get new events - kevents, err := read(w.kq, eventBuffer, &keventWaitTime) - // EINTR is okay, the syscall was interrupted before timeout expired. - if err != nil && err != unix.EINTR { - select { - case w.Errors <- err: - case <-w.done: - break loop - } - continue - } - - // Flush the events we received to the Events channel - for len(kevents) > 0 { - kevent := &kevents[0] - watchfd := int(kevent.Ident) - mask := uint32(kevent.Fflags) - w.mu.Lock() - path := w.paths[watchfd] - w.mu.Unlock() - event := newEvent(path.name, mask) - - if path.isDir && !(event.Op&Remove == Remove) { - // Double check to make sure the directory exists. This can happen when - // we do a rm -fr on a recursively watched folders and we receive a - // modification event first but the folder has been deleted and later - // receive the delete event - if _, err := os.Lstat(event.Name); os.IsNotExist(err) { - // mark is as delete event - event.Op |= Remove - } - } - - if event.Op&Rename == Rename || event.Op&Remove == Remove { - w.Remove(event.Name) - w.mu.Lock() - delete(w.fileExists, event.Name) - w.mu.Unlock() - } - - if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) { - w.sendDirectoryChangeEvents(event.Name) - } else { - // Send the event on the Events channel. - select { - case w.Events <- event: - case <-w.done: - break loop - } - } - - if event.Op&Remove == Remove { - // Look for a file that may have overwritten this. - // For example, mv f1 f2 will delete f2, then create f2. - if path.isDir { - fileDir := filepath.Clean(event.Name) - w.mu.Lock() - _, found := w.watches[fileDir] - w.mu.Unlock() - if found { - // make sure the directory exists before we watch for changes. When we - // do a recursive watch and perform rm -fr, the parent directory might - // have gone missing, ignore the missing directory and let the - // upcoming delete event remove the watch from the parent directory. - if _, err := os.Lstat(fileDir); err == nil { - w.sendDirectoryChangeEvents(fileDir) - } - } - } else { - filePath := filepath.Clean(event.Name) - if fileInfo, err := os.Lstat(filePath); err == nil { - w.sendFileCreatedEventIfNew(filePath, fileInfo) - } - } - } - - // Move to next event - kevents = kevents[1:] - } - } - - // cleanup - err := unix.Close(w.kq) - if err != nil { - // only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors. - select { - case w.Errors <- err: - default: - } - } - close(w.Events) - close(w.Errors) -} - -// newEvent returns an platform-independent Event based on kqueue Fflags. -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { - e.Op |= Remove - } - if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { - e.Op |= Write - } - if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { - e.Op |= Rename - } - if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { - e.Op |= Chmod - } - return e -} - -func newCreateEvent(name string) Event { - return Event{Name: name, Op: Create} -} - -// watchDirectoryFiles to mimic inotify when adding a watch on a directory -func (w *Watcher) watchDirectoryFiles(dirPath string) error { - // Get all files - files, err := ioutil.ReadDir(dirPath) - if err != nil { - return err - } - - for _, fileInfo := range files { - filePath := filepath.Join(dirPath, fileInfo.Name()) - filePath, err = w.internalWatch(filePath, fileInfo) - if err != nil { - return err - } - - w.mu.Lock() - w.fileExists[filePath] = true - w.mu.Unlock() - } - - return nil -} - -// sendDirectoryEvents searches the directory for newly created files -// and sends them over the event channel. This functionality is to have -// the BSD version of fsnotify match Linux inotify which provides a -// create event for files created in a watched directory. -func (w *Watcher) sendDirectoryChangeEvents(dirPath string) { - // Get all files - files, err := ioutil.ReadDir(dirPath) - if err != nil { - select { - case w.Errors <- err: - case <-w.done: - return - } - } - - // Search for new files - for _, fileInfo := range files { - filePath := filepath.Join(dirPath, fileInfo.Name()) - err := w.sendFileCreatedEventIfNew(filePath, fileInfo) - - if err != nil { - return - } - } -} - -// sendFileCreatedEvent sends a create event if the file isn't already being tracked. -func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) { - w.mu.Lock() - _, doesExist := w.fileExists[filePath] - w.mu.Unlock() - if !doesExist { - // Send create event - select { - case w.Events <- newCreateEvent(filePath): - case <-w.done: - return - } - } - - // like watchDirectoryFiles (but without doing another ReadDir) - filePath, err = w.internalWatch(filePath, fileInfo) - if err != nil { - return err - } - - w.mu.Lock() - w.fileExists[filePath] = true - w.mu.Unlock() - - return nil -} - -func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) { - if fileInfo.IsDir() { - // mimic Linux providing delete events for subdirectories - // but preserve the flags used if currently watching subdirectory - w.mu.Lock() - flags := w.dirFlags[name] - w.mu.Unlock() - - flags |= unix.NOTE_DELETE | unix.NOTE_RENAME - return w.addWatch(name, flags) - } - - // watch file to mimic Linux inotify - return w.addWatch(name, noteAllEvents) -} - -// kqueue creates a new kernel event queue and returns a descriptor. -func kqueue() (kq int, err error) { - kq, err = unix.Kqueue() - if kq == -1 { - return kq, err - } - return kq, nil -} - -// register events with the queue -func register(kq int, fds []int, flags int, fflags uint32) error { - changes := make([]unix.Kevent_t, len(fds)) - - for i, fd := range fds { - // SetKevent converts int to the platform-specific types: - unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) - changes[i].Fflags = fflags - } - - // register the events - success, err := unix.Kevent(kq, changes, nil, nil) - if success == -1 { - return err - } - return nil -} - -// read retrieves pending events, or waits until an event occurs. -// A timeout of nil blocks indefinitely, while 0 polls the queue. -func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) { - n, err := unix.Kevent(kq, nil, events, timeout) - if err != nil { - return nil, err - } - return events[0:n], nil -} - -// durationToTimespec prepares a timeout value -func durationToTimespec(d time.Duration) unix.Timespec { - return unix.NsecToTimespec(d.Nanoseconds()) -} diff --git a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh b/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh new file mode 100644 index 000000000..b09ef7683 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh @@ -0,0 +1,208 @@ +#!/usr/bin/env zsh +[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1 +setopt err_exit no_unset pipefail extended_glob + +# Simple script to update the godoc comments on all watchers. Probably took me +# more time to write this than doing it manually, but ah well 🙃 + +watcher=$(</tmp/x + print -r -- $cmt >>/tmp/x + tail -n+$(( end + 1 )) $file >>/tmp/x + mv /tmp/x $file + done +} + +set-cmt '^type Watcher struct ' $watcher +set-cmt '^func NewWatcher(' $new +set-cmt '^func (w \*Watcher) Add(' $add +set-cmt '^func (w \*Watcher) Remove(' $remove +set-cmt '^func (w \*Watcher) Close(' $close +set-cmt '^func (w \*Watcher) WatchList(' $watchlist +set-cmt '^[[:space:]]*Events *chan Event$' $events +set-cmt '^[[:space:]]*Errors *chan error$' $errors diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go b/vendor/github.com/fsnotify/fsnotify/system_bsd.go similarity index 57% rename from vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go rename to vendor/github.com/fsnotify/fsnotify/system_bsd.go index 36cc3845b..4322b0b88 100644 --- a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go +++ b/vendor/github.com/fsnotify/fsnotify/system_bsd.go @@ -1,7 +1,3 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - //go:build freebsd || openbsd || netbsd || dragonfly // +build freebsd openbsd netbsd dragonfly diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go b/vendor/github.com/fsnotify/fsnotify/system_darwin.go similarity index 52% rename from vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go rename to vendor/github.com/fsnotify/fsnotify/system_darwin.go index 98cd8476f..5da5ffa78 100644 --- a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go +++ b/vendor/github.com/fsnotify/fsnotify/system_darwin.go @@ -1,7 +1,3 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - //go:build darwin // +build darwin diff --git a/vendor/github.com/fsnotify/fsnotify/windows.go b/vendor/github.com/fsnotify/fsnotify/windows.go deleted file mode 100644 index 02ce7deb0..000000000 --- a/vendor/github.com/fsnotify/fsnotify/windows.go +++ /dev/null @@ -1,586 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build windows -// +build windows - -package fsnotify - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "reflect" - "runtime" - "sync" - "syscall" - "unsafe" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - isClosed bool // Set to true when Close() is first called - mu sync.Mutex // Map access - port syscall.Handle // Handle to completion port - watches watchMap // Map of watches (key: i-number) - input chan *input // Inputs to the reader are sent on this channel - quit chan chan<- error -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0) - if e != nil { - return nil, os.NewSyscallError("CreateIoCompletionPort", e) - } - w := &Watcher{ - port: port, - watches: make(watchMap), - input: make(chan *input, 1), - Events: make(chan Event, 50), - Errors: make(chan error), - quit: make(chan chan<- error, 1), - } - go w.readEvents() - return w, nil -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - if w.isClosed { - return nil - } - w.isClosed = true - - // Send "quit" message to the reader goroutine - ch := make(chan error) - w.quit <- ch - if err := w.wakeupReader(); err != nil { - return err - } - return <-ch -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - if w.isClosed { - return errors.New("watcher already closed") - } - in := &input{ - op: opAddWatch, - path: filepath.Clean(name), - flags: sysFSALLEVENTS, - reply: make(chan error), - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - in := &input{ - op: opRemoveWatch, - path: filepath.Clean(name), - reply: make(chan error), - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -// WatchList returns the directories and files that are being monitered. -func (w *Watcher) WatchList() []string { - w.mu.Lock() - defer w.mu.Unlock() - - entries := make([]string, 0, len(w.watches)) - for _, entry := range w.watches { - for _, watchEntry := range entry { - entries = append(entries, watchEntry.path) - } - } - - return entries -} - -const ( - // Options for AddWatch - sysFSONESHOT = 0x80000000 - sysFSONLYDIR = 0x1000000 - - // Events - sysFSACCESS = 0x1 - sysFSALLEVENTS = 0xfff - sysFSATTRIB = 0x4 - sysFSCLOSE = 0x18 - sysFSCREATE = 0x100 - sysFSDELETE = 0x200 - sysFSDELETESELF = 0x400 - sysFSMODIFY = 0x2 - sysFSMOVE = 0xc0 - sysFSMOVEDFROM = 0x40 - sysFSMOVEDTO = 0x80 - sysFSMOVESELF = 0x800 - - // Special events - sysFSIGNORED = 0x8000 - sysFSQOVERFLOW = 0x4000 -) - -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { - e.Op |= Create - } - if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { - e.Op |= Remove - } - if mask&sysFSMODIFY == sysFSMODIFY { - e.Op |= Write - } - if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { - e.Op |= Rename - } - if mask&sysFSATTRIB == sysFSATTRIB { - e.Op |= Chmod - } - return e -} - -const ( - opAddWatch = iota - opRemoveWatch -) - -const ( - provisional uint64 = 1 << (32 + iota) -) - -type input struct { - op int - path string - flags uint32 - reply chan error -} - -type inode struct { - handle syscall.Handle - volume uint32 - index uint64 -} - -type watch struct { - ov syscall.Overlapped - ino *inode // i-number - path string // Directory path - mask uint64 // Directory itself is being watched with these notify flags - names map[string]uint64 // Map of names being watched and their notify flags - rename string // Remembers the old name while renaming a file - buf [4096]byte -} - -type indexMap map[uint64]*watch -type watchMap map[uint32]indexMap - -func (w *Watcher) wakeupReader() error { - e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil) - if e != nil { - return os.NewSyscallError("PostQueuedCompletionStatus", e) - } - return nil -} - -func getDir(pathname string) (dir string, err error) { - attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname)) - if e != nil { - return "", os.NewSyscallError("GetFileAttributes", e) - } - if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { - dir = pathname - } else { - dir, _ = filepath.Split(pathname) - dir = filepath.Clean(dir) - } - return -} - -func getIno(path string) (ino *inode, err error) { - h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path), - syscall.FILE_LIST_DIRECTORY, - syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - nil, syscall.OPEN_EXISTING, - syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0) - if e != nil { - return nil, os.NewSyscallError("CreateFile", e) - } - var fi syscall.ByHandleFileInformation - if e = syscall.GetFileInformationByHandle(h, &fi); e != nil { - syscall.CloseHandle(h) - return nil, os.NewSyscallError("GetFileInformationByHandle", e) - } - ino = &inode{ - handle: h, - volume: fi.VolumeSerialNumber, - index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), - } - return ino, nil -} - -// Must run within the I/O thread. -func (m watchMap) get(ino *inode) *watch { - if i := m[ino.volume]; i != nil { - return i[ino.index] - } - return nil -} - -// Must run within the I/O thread. -func (m watchMap) set(ino *inode, watch *watch) { - i := m[ino.volume] - if i == nil { - i = make(indexMap) - m[ino.volume] = i - } - i[ino.index] = watch -} - -// Must run within the I/O thread. -func (w *Watcher) addWatch(pathname string, flags uint64) error { - dir, err := getDir(pathname) - if err != nil { - return err - } - if flags&sysFSONLYDIR != 0 && pathname != dir { - return nil - } - ino, err := getIno(dir) - if err != nil { - return err - } - w.mu.Lock() - watchEntry := w.watches.get(ino) - w.mu.Unlock() - if watchEntry == nil { - if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil { - syscall.CloseHandle(ino.handle) - return os.NewSyscallError("CreateIoCompletionPort", e) - } - watchEntry = &watch{ - ino: ino, - path: dir, - names: make(map[string]uint64), - } - w.mu.Lock() - w.watches.set(ino, watchEntry) - w.mu.Unlock() - flags |= provisional - } else { - syscall.CloseHandle(ino.handle) - } - if pathname == dir { - watchEntry.mask |= flags - } else { - watchEntry.names[filepath.Base(pathname)] |= flags - } - if err = w.startRead(watchEntry); err != nil { - return err - } - if pathname == dir { - watchEntry.mask &= ^provisional - } else { - watchEntry.names[filepath.Base(pathname)] &= ^provisional - } - return nil -} - -// Must run within the I/O thread. -func (w *Watcher) remWatch(pathname string) error { - dir, err := getDir(pathname) - if err != nil { - return err - } - ino, err := getIno(dir) - if err != nil { - return err - } - w.mu.Lock() - watch := w.watches.get(ino) - w.mu.Unlock() - if watch == nil { - return fmt.Errorf("can't remove non-existent watch for: %s", pathname) - } - if pathname == dir { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) - watch.mask = 0 - } else { - name := filepath.Base(pathname) - w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) - delete(watch.names, name) - } - return w.startRead(watch) -} - -// Must run within the I/O thread. -func (w *Watcher) deleteWatch(watch *watch) { - for name, mask := range watch.names { - if mask&provisional == 0 { - w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) - } - delete(watch.names, name) - } - if watch.mask != 0 { - if watch.mask&provisional == 0 { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) - } - watch.mask = 0 - } -} - -// Must run within the I/O thread. -func (w *Watcher) startRead(watch *watch) error { - if e := syscall.CancelIo(watch.ino.handle); e != nil { - w.Errors <- os.NewSyscallError("CancelIo", e) - w.deleteWatch(watch) - } - mask := toWindowsFlags(watch.mask) - for _, m := range watch.names { - mask |= toWindowsFlags(m) - } - if mask == 0 { - if e := syscall.CloseHandle(watch.ino.handle); e != nil { - w.Errors <- os.NewSyscallError("CloseHandle", e) - } - w.mu.Lock() - delete(w.watches[watch.ino.volume], watch.ino.index) - w.mu.Unlock() - return nil - } - e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], - uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) - if e != nil { - err := os.NewSyscallError("ReadDirectoryChanges", e) - if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { - // Watched directory was probably removed - if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) { - if watch.mask&sysFSONESHOT != 0 { - watch.mask = 0 - } - } - err = nil - } - w.deleteWatch(watch) - w.startRead(watch) - return err - } - return nil -} - -// readEvents reads from the I/O completion port, converts the -// received events into Event objects and sends them via the Events channel. -// Entry point to the I/O thread. -func (w *Watcher) readEvents() { - var ( - n, key uint32 - ov *syscall.Overlapped - ) - runtime.LockOSThread() - - for { - e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE) - watch := (*watch)(unsafe.Pointer(ov)) - - if watch == nil { - select { - case ch := <-w.quit: - w.mu.Lock() - var indexes []indexMap - for _, index := range w.watches { - indexes = append(indexes, index) - } - w.mu.Unlock() - for _, index := range indexes { - for _, watch := range index { - w.deleteWatch(watch) - w.startRead(watch) - } - } - var err error - if e := syscall.CloseHandle(w.port); e != nil { - err = os.NewSyscallError("CloseHandle", e) - } - close(w.Events) - close(w.Errors) - ch <- err - return - case in := <-w.input: - switch in.op { - case opAddWatch: - in.reply <- w.addWatch(in.path, uint64(in.flags)) - case opRemoveWatch: - in.reply <- w.remWatch(in.path) - } - default: - } - continue - } - - switch e { - case syscall.ERROR_MORE_DATA: - if watch == nil { - w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer") - } else { - // The i/o succeeded but the buffer is full. - // In theory we should be building up a full packet. - // In practice we can get away with just carrying on. - n = uint32(unsafe.Sizeof(watch.buf)) - } - case syscall.ERROR_ACCESS_DENIED: - // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) - w.deleteWatch(watch) - w.startRead(watch) - continue - case syscall.ERROR_OPERATION_ABORTED: - // CancelIo was called on this handle - continue - default: - w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e) - continue - case nil: - } - - var offset uint32 - for { - if n == 0 { - w.Events <- newEvent("", sysFSQOVERFLOW) - w.Errors <- errors.New("short read in readEvents()") - break - } - - // Point "raw" to the event in the buffer - raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) - // TODO: Consider using unsafe.Slice that is available from go1.17 - // https://stackoverflow.com/questions/51187973/how-to-create-an-array-or-a-slice-from-an-array-unsafe-pointer-in-golang - // instead of using a fixed syscall.MAX_PATH buf, we create a buf that is the size of the path name - size := int(raw.FileNameLength / 2) - var buf []uint16 - sh := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - sh.Data = uintptr(unsafe.Pointer(&raw.FileName)) - sh.Len = size - sh.Cap = size - name := syscall.UTF16ToString(buf) - fullname := filepath.Join(watch.path, name) - - var mask uint64 - switch raw.Action { - case syscall.FILE_ACTION_REMOVED: - mask = sysFSDELETESELF - case syscall.FILE_ACTION_MODIFIED: - mask = sysFSMODIFY - case syscall.FILE_ACTION_RENAMED_OLD_NAME: - watch.rename = name - case syscall.FILE_ACTION_RENAMED_NEW_NAME: - if watch.names[watch.rename] != 0 { - watch.names[name] |= watch.names[watch.rename] - delete(watch.names, watch.rename) - mask = sysFSMOVESELF - } - } - - sendNameEvent := func() { - if w.sendEvent(fullname, watch.names[name]&mask) { - if watch.names[name]&sysFSONESHOT != 0 { - delete(watch.names, name) - } - } - } - if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME { - sendNameEvent() - } - if raw.Action == syscall.FILE_ACTION_REMOVED { - w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) - delete(watch.names, name) - } - if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) { - if watch.mask&sysFSONESHOT != 0 { - watch.mask = 0 - } - } - if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME { - fullname = filepath.Join(watch.path, watch.rename) - sendNameEvent() - } - - // Move to the next event in the buffer - if raw.NextEntryOffset == 0 { - break - } - offset += raw.NextEntryOffset - - // Error! - if offset >= n { - w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.") - break - } - } - - if err := w.startRead(watch); err != nil { - w.Errors <- err - } - } -} - -func (w *Watcher) sendEvent(name string, mask uint64) bool { - if mask == 0 { - return false - } - event := newEvent(name, uint32(mask)) - select { - case ch := <-w.quit: - w.quit <- ch - case w.Events <- event: - } - return true -} - -func toWindowsFlags(mask uint64) uint32 { - var m uint32 - if mask&sysFSACCESS != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS - } - if mask&sysFSMODIFY != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE - } - if mask&sysFSATTRIB != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES - } - if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME - } - return m -} - -func toFSnotifyFlags(action uint32) uint64 { - switch action { - case syscall.FILE_ACTION_ADDED: - return sysFSCREATE - case syscall.FILE_ACTION_REMOVED: - return sysFSDELETE - case syscall.FILE_ACTION_MODIFIED: - return sysFSMODIFY - case syscall.FILE_ACTION_RENAMED_OLD_NAME: - return sysFSMOVEDFROM - case syscall.FILE_ACTION_RENAMED_NEW_NAME: - return sysFSMOVEDTO - } - return 0 -} diff --git a/vendor/github.com/go-logfmt/logfmt/CHANGELOG.md b/vendor/github.com/go-logfmt/logfmt/CHANGELOG.md index 1a9a27bcf..8f349c4b8 100644 --- a/vendor/github.com/go-logfmt/logfmt/CHANGELOG.md +++ b/vendor/github.com/go-logfmt/logfmt/CHANGELOG.md @@ -1,48 +1,82 @@ # Changelog + All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [0.6.0] - 2023-01-30 + +[0.6.0]: https://github.com/go-logfmt/logfmt/compare/v0.5.1...v0.6.0 + +### Added + +- NewDecoderSize by [@alexanderjophus] + +## [0.5.1] - 2021-08-18 + +[0.5.1]: https://github.com/go-logfmt/logfmt/compare/v0.5.0...v0.5.1 + +### Changed + +- Update the `go.mod` file for Go 1.17 as described in the [Go 1.17 release + notes](https://golang.org/doc/go1.17#go-command) + ## [0.5.0] - 2020-01-03 +[0.5.0]: https://github.com/go-logfmt/logfmt/compare/v0.4.0...v0.5.0 + ### Changed + - Remove the dependency on github.com/kr/logfmt by [@ChrisHines] - Move fuzz code to github.com/go-logfmt/fuzzlogfmt by [@ChrisHines] ## [0.4.0] - 2018-11-21 +[0.4.0]: https://github.com/go-logfmt/logfmt/compare/v0.3.0...v0.4.0 + ### Added + - Go module support by [@ChrisHines] - CHANGELOG by [@ChrisHines] ### Changed + - Drop invalid runes from keys instead of returning ErrInvalidKey by [@ChrisHines] - On panic while printing, attempt to print panic value by [@bboreham] ## [0.3.0] - 2016-11-15 + +[0.3.0]: https://github.com/go-logfmt/logfmt/compare/v0.2.0...v0.3.0 + ### Added + - Pool buffers for quoted strings and byte slices by [@nussjustin] + ### Fixed + - Fuzz fix, quote invalid UTF-8 values by [@judwhite] ## [0.2.0] - 2016-05-08 + +[0.2.0]: https://github.com/go-logfmt/logfmt/compare/v0.1.0...v0.2.0 + ### Added + - Encoder.EncodeKeyvals by [@ChrisHines] ## [0.1.0] - 2016-03-28 + +[0.1.0]: https://github.com/go-logfmt/logfmt/commits/v0.1.0 + ### Added + - Encoder by [@ChrisHines] - Decoder by [@ChrisHines] - MarshalKeyvals by [@ChrisHines] -[0.5.0]: https://github.com/go-logfmt/logfmt/compare/v0.4.0...v0.5.0 -[0.4.0]: https://github.com/go-logfmt/logfmt/compare/v0.3.0...v0.4.0 -[0.3.0]: https://github.com/go-logfmt/logfmt/compare/v0.2.0...v0.3.0 -[0.2.0]: https://github.com/go-logfmt/logfmt/compare/v0.1.0...v0.2.0 -[0.1.0]: https://github.com/go-logfmt/logfmt/commits/v0.1.0 - [@ChrisHines]: https://github.com/ChrisHines [@bboreham]: https://github.com/bboreham [@judwhite]: https://github.com/judwhite [@nussjustin]: https://github.com/nussjustin +[@alexanderjophus]: https://github.com/alexanderjophus diff --git a/vendor/github.com/go-logfmt/logfmt/README.md b/vendor/github.com/go-logfmt/logfmt/README.md index 8e48fcd3a..71c57944e 100644 --- a/vendor/github.com/go-logfmt/logfmt/README.md +++ b/vendor/github.com/go-logfmt/logfmt/README.md @@ -1,20 +1,25 @@ +# logfmt + [![Go Reference](https://pkg.go.dev/badge/github.com/go-logfmt/logfmt.svg)](https://pkg.go.dev/github.com/go-logfmt/logfmt) [![Go Report Card](https://goreportcard.com/badge/go-logfmt/logfmt)](https://goreportcard.com/report/go-logfmt/logfmt) [![Github Actions](https://github.com/go-logfmt/logfmt/actions/workflows/test.yml/badge.svg)](https://github.com/go-logfmt/logfmt/actions/workflows/test.yml) -[![Coverage Status](https://coveralls.io/repos/github/go-logfmt/logfmt/badge.svg?branch=master)](https://coveralls.io/github/go-logfmt/logfmt?branch=master) - -# logfmt +[![Coverage Status](https://coveralls.io/repos/github/go-logfmt/logfmt/badge.svg?branch=master)](https://coveralls.io/github/go-logfmt/logfmt?branch=main) Package logfmt implements utilities to marshal and unmarshal data in the [logfmt -format](https://brandur.org/logfmt). It provides an API similar to -[encoding/json](http://golang.org/pkg/encoding/json/) and -[encoding/xml](http://golang.org/pkg/encoding/xml/). +format][fmt]. It provides an API similar to [encoding/json][json] and +[encoding/xml][xml]. + +[fmt]: https://brandur.org/logfmt +[json]: https://pkg.go.dev/encoding/json +[xml]: https://pkg.go.dev/encoding/xml The logfmt format was first documented by Brandur Leach in [this -article](https://brandur.org/logfmt). The format has not been formally -standardized. The most authoritative public specification to date has been the -documentation of a Go Language [package](http://godoc.org/github.com/kr/logfmt) -written by Blake Mizerany and Keith Rarick. +article][origin]. The format has not been formally standardized. The most +authoritative public specification to date has been the documentation of a Go +Language [package][parser] written by Blake Mizerany and Keith Rarick. + +[origin]: https://brandur.org/logfmt +[parser]: https://pkg.go.dev/github.com/kr/logfmt ## Goals @@ -30,4 +35,7 @@ standard as a goal. ## Versioning -Package logfmt publishes releases via [semver](http://semver.org/) compatible Git tags prefixed with a single 'v'. +This project publishes releases according to the Go language guidelines for +[developing and publishing modules][pub]. + +[pub]: https://go.dev/doc/modules/developing diff --git a/vendor/github.com/go-logfmt/logfmt/decode.go b/vendor/github.com/go-logfmt/logfmt/decode.go index 2013708e4..a1c22dcbd 100644 --- a/vendor/github.com/go-logfmt/logfmt/decode.go +++ b/vendor/github.com/go-logfmt/logfmt/decode.go @@ -29,6 +29,23 @@ func NewDecoder(r io.Reader) *Decoder { return dec } +// NewDecoderSize returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read data from r beyond +// the logfmt records requested. +// The size argument specifies the size of the initial buffer that the +// Decoder will use to read records from r. +// If a log line is longer than the size argument, the Decoder will return +// a bufio.ErrTooLong error. +func NewDecoderSize(r io.Reader, size int) *Decoder { + scanner := bufio.NewScanner(r) + scanner.Buffer(make([]byte, 0, size), size) + dec := &Decoder{ + s: scanner, + } + return dec +} + // ScanRecord advances the Decoder to the next record, which can then be // parsed with the ScanKeyval method. It returns false when decoding stops, // either by reaching the end of the input or an error. After ScanRecord diff --git a/vendor/github.com/go-redis/redis/v8/.gitignore b/vendor/github.com/go-redis/redis/v8/.gitignore deleted file mode 100644 index b975a7b4c..000000000 --- a/vendor/github.com/go-redis/redis/v8/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -*.rdb -testdata/*/ -.idea/ diff --git a/vendor/github.com/go-redis/redis/v8/.golangci.yml b/vendor/github.com/go-redis/redis/v8/.golangci.yml deleted file mode 100644 index de514554a..000000000 --- a/vendor/github.com/go-redis/redis/v8/.golangci.yml +++ /dev/null @@ -1,4 +0,0 @@ -run: - concurrency: 8 - deadline: 5m - tests: false diff --git a/vendor/github.com/go-redis/redis/v8/.prettierrc.yml b/vendor/github.com/go-redis/redis/v8/.prettierrc.yml deleted file mode 100644 index 8b7f044ad..000000000 --- a/vendor/github.com/go-redis/redis/v8/.prettierrc.yml +++ /dev/null @@ -1,4 +0,0 @@ -semi: false -singleQuote: true -proseWrap: always -printWidth: 100 diff --git a/vendor/github.com/go-redis/redis/v8/CHANGELOG.md b/vendor/github.com/go-redis/redis/v8/CHANGELOG.md deleted file mode 100644 index 195e51933..000000000 --- a/vendor/github.com/go-redis/redis/v8/CHANGELOG.md +++ /dev/null @@ -1,177 +0,0 @@ -## [8.11.5](https://github.com/go-redis/redis/compare/v8.11.4...v8.11.5) (2022-03-17) - - -### Bug Fixes - -* add missing Expire methods to Cmdable ([17e3b43](https://github.com/go-redis/redis/commit/17e3b43879d516437ada71cf9c0deac6a382ed9a)) -* add whitespace for avoid unlikely colisions ([7f7c181](https://github.com/go-redis/redis/commit/7f7c1817617cfec909efb13d14ad22ef05a6ad4c)) -* example/otel compile error ([#2028](https://github.com/go-redis/redis/issues/2028)) ([187c07c](https://github.com/go-redis/redis/commit/187c07c41bf68dc3ab280bc3a925e960bbef6475)) -* **extra/redisotel:** set span.kind attribute to client ([065b200](https://github.com/go-redis/redis/commit/065b200070b41e6e949710b4f9e01b50ccc60ab2)) -* format ([96f53a0](https://github.com/go-redis/redis/commit/96f53a0159a28affa94beec1543a62234e7f8b32)) -* invalid type assert in stringArg ([de6c131](https://github.com/go-redis/redis/commit/de6c131865b8263400c8491777b295035f2408e4)) -* rename Golang to Go ([#2030](https://github.com/go-redis/redis/issues/2030)) ([b82a2d9](https://github.com/go-redis/redis/commit/b82a2d9d4d2de7b7cbe8fcd4895be62dbcacacbc)) -* set timeout for WAIT command. Fixes [#1963](https://github.com/go-redis/redis/issues/1963) ([333fee1](https://github.com/go-redis/redis/commit/333fee1a8fd98a2fbff1ab187c1b03246a7eb01f)) -* update some argument counts in pre-allocs ([f6974eb](https://github.com/go-redis/redis/commit/f6974ebb5c40a8adf90d2cacab6dc297f4eba4c2)) - - -### Features - -* Add redis v7's NX, XX, GT, LT expire variants ([e19bbb2](https://github.com/go-redis/redis/commit/e19bbb26e2e395c6e077b48d80d79e99f729a8b8)) -* add support for acl sentinel auth in universal client ([ab0ccc4](https://github.com/go-redis/redis/commit/ab0ccc47413f9b2a6eabc852fed5005a3ee1af6e)) -* add support for COPY command ([#2016](https://github.com/go-redis/redis/issues/2016)) ([730afbc](https://github.com/go-redis/redis/commit/730afbcffb93760e8a36cc06cfe55ab102b693a7)) -* add support for passing extra attributes added to spans ([39faaa1](https://github.com/go-redis/redis/commit/39faaa171523834ba527c9789710c4fde87f5a2e)) -* add support for time.Duration write and scan ([2f1b74e](https://github.com/go-redis/redis/commit/2f1b74e20cdd7719b2aecf0768d3e3ae7c3e781b)) -* **redisotel:** ability to override TracerProvider ([#1998](https://github.com/go-redis/redis/issues/1998)) ([bf8d4aa](https://github.com/go-redis/redis/commit/bf8d4aa60c00366cda2e98c3ddddc8cf68507417)) -* set net.peer.name and net.peer.port in otel example ([69bf454](https://github.com/go-redis/redis/commit/69bf454f706204211cd34835f76b2e8192d3766d)) - - - -## [8.11.4](https://github.com/go-redis/redis/compare/v8.11.3...v8.11.4) (2021-10-04) - - -### Features - -* add acl auth support for sentinels ([f66582f](https://github.com/go-redis/redis/commit/f66582f44f3dc3a4705a5260f982043fde4aa634)) -* add Cmd.{String,Int,Float,Bool}Slice helpers and an example ([5d3d293](https://github.com/go-redis/redis/commit/5d3d293cc9c60b90871e2420602001463708ce24)) -* add SetVal method for each command ([168981d](https://github.com/go-redis/redis/commit/168981da2d84ee9e07d15d3e74d738c162e264c4)) - - - -## v8.11 - -- Remove OpenTelemetry metrics. -- Supports more redis commands and options. - -## v8.10 - -- Removed extra OpenTelemetry spans from go-redis core. Now go-redis instrumentation only adds a - single span with a Redis command (instead of 4 spans). There are multiple reasons behind this - decision: - - - Traces become smaller and less noisy. - - It may be costly to process those 3 extra spans for each query. - - go-redis no longer depends on OpenTelemetry. - - Eventually we hope to replace the information that we no longer collect with OpenTelemetry - Metrics. - -## v8.9 - -- Changed `PubSub.Channel` to only rely on `Ping` result. You can now use `WithChannelSize`, - `WithChannelHealthCheckInterval`, and `WithChannelSendTimeout` to override default settings. - -## v8.8 - -- To make updating easier, extra modules now have the same version as go-redis does. That means that - you need to update your imports: - -``` -github.com/go-redis/redis/extra/redisotel -> github.com/go-redis/redis/extra/redisotel/v8 -github.com/go-redis/redis/extra/rediscensus -> github.com/go-redis/redis/extra/rediscensus/v8 -``` - -## v8.5 - -- [knadh](https://github.com/knadh) contributed long-awaited ability to scan Redis Hash into a - struct: - -```go -err := rdb.HGetAll(ctx, "hash").Scan(&data) - -err := rdb.MGet(ctx, "key1", "key2").Scan(&data) -``` - -- Please check [redismock](https://github.com/go-redis/redismock) by - [monkey92t](https://github.com/monkey92t) if you are looking for mocking Redis Client. - -## v8 - -- All commands require `context.Context` as a first argument, e.g. `rdb.Ping(ctx)`. If you are not - using `context.Context` yet, the simplest option is to define global package variable - `var ctx = context.TODO()` and use it when `ctx` is required. - -- Full support for `context.Context` canceling. - -- Added `redis.NewFailoverClusterClient` that supports routing read-only commands to a slave node. - -- Added `redisext.OpenTemetryHook` that adds - [Redis OpenTelemetry instrumentation](https://redis.uptrace.dev/tracing/). - -- Redis slow log support. - -- Ring uses Rendezvous Hashing by default which provides better distribution. You need to move - existing keys to a new location or keys will be inaccessible / lost. To use old hashing scheme: - -```go -import "github.com/golang/groupcache/consistenthash" - -ring := redis.NewRing(&redis.RingOptions{ - NewConsistentHash: func() { - return consistenthash.New(100, crc32.ChecksumIEEE) - }, -}) -``` - -- `ClusterOptions.MaxRedirects` default value is changed from 8 to 3. -- `Options.MaxRetries` default value is changed from 0 to 3. - -- `Cluster.ForEachNode` is renamed to `ForEachShard` for consistency with `Ring`. - -## v7.3 - -- New option `Options.Username` which causes client to use `AuthACL`. Be aware if your connection - URL contains username. - -## v7.2 - -- Existing `HMSet` is renamed to `HSet` and old deprecated `HMSet` is restored for Redis 3 users. - -## v7.1 - -- Existing `Cmd.String` is renamed to `Cmd.Text`. New `Cmd.String` implements `fmt.Stringer` - interface. - -## v7 - -- _Important_. Tx.Pipeline now returns a non-transactional pipeline. Use Tx.TxPipeline for a - transactional pipeline. -- WrapProcess is replaced with more convenient AddHook that has access to context.Context. -- WithContext now can not be used to create a shallow copy of the client. -- New methods ProcessContext, DoContext, and ExecContext. -- Client respects Context.Deadline when setting net.Conn deadline. -- Client listens on Context.Done while waiting for a connection from the pool and returns an error - when context context is cancelled. -- Add PubSub.ChannelWithSubscriptions that sends `*Subscription` in addition to `*Message` to allow - detecting reconnections. -- `time.Time` is now marshalled in RFC3339 format. `rdb.Get("foo").Time()` helper is added to parse - the time. -- `SetLimiter` is removed and added `Options.Limiter` instead. -- `HMSet` is deprecated as of Redis v4. - -## v6.15 - -- Cluster and Ring pipelines process commands for each node in its own goroutine. - -## 6.14 - -- Added Options.MinIdleConns. -- Added Options.MaxConnAge. -- PoolStats.FreeConns is renamed to PoolStats.IdleConns. -- Add Client.Do to simplify creating custom commands. -- Add Cmd.String, Cmd.Int, Cmd.Int64, Cmd.Uint64, Cmd.Float64, and Cmd.Bool helpers. -- Lower memory usage. - -## v6.13 - -- Ring got new options called `HashReplicas` and `Hash`. It is recommended to set - `HashReplicas = 1000` for better keys distribution between shards. -- Cluster client was optimized to use much less memory when reloading cluster state. -- PubSub.ReceiveMessage is re-worked to not use ReceiveTimeout so it does not lose data when timeout - occurres. In most cases it is recommended to use PubSub.Channel instead. -- Dialer.KeepAlive is set to 5 minutes by default. - -## v6.12 - -- ClusterClient got new option called `ClusterSlots` which allows to build cluster of normal Redis - Servers that don't have cluster mode enabled. See - https://godoc.org/github.com/go-redis/redis#example-NewClusterClient--ManualSetup diff --git a/vendor/github.com/go-redis/redis/v8/Makefile b/vendor/github.com/go-redis/redis/v8/Makefile deleted file mode 100644 index a4cfe0576..000000000 --- a/vendor/github.com/go-redis/redis/v8/Makefile +++ /dev/null @@ -1,35 +0,0 @@ -PACKAGE_DIRS := $(shell find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | sort) - -test: testdeps - go test ./... - go test ./... -short -race - go test ./... -run=NONE -bench=. -benchmem - env GOOS=linux GOARCH=386 go test ./... - go vet - -testdeps: testdata/redis/src/redis-server - -bench: testdeps - go test ./... -test.run=NONE -test.bench=. -test.benchmem - -.PHONY: all test testdeps bench - -testdata/redis: - mkdir -p $@ - wget -qO- https://download.redis.io/releases/redis-6.2.5.tar.gz | tar xvz --strip-components=1 -C $@ - -testdata/redis/src/redis-server: testdata/redis - cd $< && make all - -fmt: - gofmt -w -s ./ - goimports -w -local github.com/go-redis/redis ./ - -go_mod_tidy: - go get -u && go mod tidy - set -e; for dir in $(PACKAGE_DIRS); do \ - echo "go mod tidy in $${dir}"; \ - (cd "$${dir}" && \ - go get -u && \ - go mod tidy); \ - done diff --git a/vendor/github.com/go-redis/redis/v8/README.md b/vendor/github.com/go-redis/redis/v8/README.md deleted file mode 100644 index f3b6a018c..000000000 --- a/vendor/github.com/go-redis/redis/v8/README.md +++ /dev/null @@ -1,175 +0,0 @@ -# Redis client for Go - -![build workflow](https://github.com/go-redis/redis/actions/workflows/build.yml/badge.svg) -[![PkgGoDev](https://pkg.go.dev/badge/github.com/go-redis/redis/v8)](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc) -[![Documentation](https://img.shields.io/badge/redis-documentation-informational)](https://redis.uptrace.dev/) - -go-redis is brought to you by :star: [**uptrace/uptrace**](https://github.com/uptrace/uptrace). -Uptrace is an open source and blazingly fast **distributed tracing** backend powered by -OpenTelemetry and ClickHouse. Give it a star as well! - -## Resources - -- [Discussions](https://github.com/go-redis/redis/discussions) -- [Documentation](https://redis.uptrace.dev) -- [Reference](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc) -- [Examples](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#pkg-examples) -- [RealWorld example app](https://github.com/uptrace/go-treemux-realworld-example-app) - -Other projects you may like: - -- [Bun](https://bun.uptrace.dev) - fast and simple SQL client for PostgreSQL, MySQL, and SQLite. -- [BunRouter](https://bunrouter.uptrace.dev/) - fast and flexible HTTP router for Go. - -## Ecosystem - -- [Redis Mock](https://github.com/go-redis/redismock) -- [Distributed Locks](https://github.com/bsm/redislock) -- [Redis Cache](https://github.com/go-redis/cache) -- [Rate limiting](https://github.com/go-redis/redis_rate) - -## Features - -- Redis 3 commands except QUIT, MONITOR, and SYNC. -- Automatic connection pooling with - [circuit breaker](https://en.wikipedia.org/wiki/Circuit_breaker_design_pattern) support. -- [Pub/Sub](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#PubSub). -- [Transactions](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-TxPipeline). -- [Pipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client.Pipeline) and - [TxPipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client.TxPipeline). -- [Scripting](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Script). -- [Timeouts](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Options). -- [Redis Sentinel](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewFailoverClient). -- [Redis Cluster](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewClusterClient). -- [Cluster of Redis Servers](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-NewClusterClient-ManualSetup) - without using cluster mode and Redis Sentinel. -- [Ring](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewRing). -- [Instrumentation](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-package-Instrumentation). - -## Installation - -go-redis supports 2 last Go versions and requires a Go version with -[modules](https://github.com/golang/go/wiki/Modules) support. So make sure to initialize a Go -module: - -```shell -go mod init github.com/my/repo -``` - -And then install go-redis/v8 (note _v8_ in the import; omitting it is a popular mistake): - -```shell -go get github.com/go-redis/redis/v8 -``` - -## Quickstart - -```go -import ( - "context" - "github.com/go-redis/redis/v8" - "fmt" -) - -var ctx = context.Background() - -func ExampleClient() { - rdb := redis.NewClient(&redis.Options{ - Addr: "localhost:6379", - Password: "", // no password set - DB: 0, // use default DB - }) - - err := rdb.Set(ctx, "key", "value", 0).Err() - if err != nil { - panic(err) - } - - val, err := rdb.Get(ctx, "key").Result() - if err != nil { - panic(err) - } - fmt.Println("key", val) - - val2, err := rdb.Get(ctx, "key2").Result() - if err == redis.Nil { - fmt.Println("key2 does not exist") - } else if err != nil { - panic(err) - } else { - fmt.Println("key2", val2) - } - // Output: key value - // key2 does not exist -} -``` - -## Look and feel - -Some corner cases: - -```go -// SET key value EX 10 NX -set, err := rdb.SetNX(ctx, "key", "value", 10*time.Second).Result() - -// SET key value keepttl NX -set, err := rdb.SetNX(ctx, "key", "value", redis.KeepTTL).Result() - -// SORT list LIMIT 0 2 ASC -vals, err := rdb.Sort(ctx, "list", &redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result() - -// ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2 -vals, err := rdb.ZRangeByScoreWithScores(ctx, "zset", &redis.ZRangeBy{ - Min: "-inf", - Max: "+inf", - Offset: 0, - Count: 2, -}).Result() - -// ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM -vals, err := rdb.ZInterStore(ctx, "out", &redis.ZStore{ - Keys: []string{"zset1", "zset2"}, - Weights: []int64{2, 3} -}).Result() - -// EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello" -vals, err := rdb.Eval(ctx, "return {KEYS[1],ARGV[1]}", []string{"key"}, "hello").Result() - -// custom command -res, err := rdb.Do(ctx, "set", "key", "value").Result() -``` - -## Run the test - -go-redis will start a redis-server and run the test cases. - -The paths of redis-server bin file and redis config file are defined in `main_test.go`: - -``` -var ( - redisServerBin, _ = filepath.Abs(filepath.Join("testdata", "redis", "src", "redis-server")) - redisServerConf, _ = filepath.Abs(filepath.Join("testdata", "redis", "redis.conf")) -) -``` - -For local testing, you can change the variables to refer to your local files, or create a soft link -to the corresponding folder for redis-server and copy the config file to `testdata/redis/`: - -``` -ln -s /usr/bin/redis-server ./go-redis/testdata/redis/src -cp ./go-redis/testdata/redis.conf ./go-redis/testdata/redis/ -``` - -Lastly, run: - -``` -go test -``` - -## Contributors - -Thanks to all the people who already contributed! - - - - diff --git a/vendor/github.com/go-redis/redis/v8/RELEASING.md b/vendor/github.com/go-redis/redis/v8/RELEASING.md deleted file mode 100644 index 1115db4e3..000000000 --- a/vendor/github.com/go-redis/redis/v8/RELEASING.md +++ /dev/null @@ -1,15 +0,0 @@ -# Releasing - -1. Run `release.sh` script which updates versions in go.mod files and pushes a new branch to GitHub: - -```shell -TAG=v1.0.0 ./scripts/release.sh -``` - -2. Open a pull request and wait for the build to finish. - -3. Merge the pull request and run `tag.sh` to create tags for packages: - -```shell -TAG=v1.0.0 ./scripts/tag.sh -``` diff --git a/vendor/github.com/go-redis/redis/v8/cluster.go b/vendor/github.com/go-redis/redis/v8/cluster.go deleted file mode 100644 index a54f2f37e..000000000 --- a/vendor/github.com/go-redis/redis/v8/cluster.go +++ /dev/null @@ -1,1750 +0,0 @@ -package redis - -import ( - "context" - "crypto/tls" - "fmt" - "math" - "net" - "runtime" - "sort" - "sync" - "sync/atomic" - "time" - - "github.com/go-redis/redis/v8/internal" - "github.com/go-redis/redis/v8/internal/hashtag" - "github.com/go-redis/redis/v8/internal/pool" - "github.com/go-redis/redis/v8/internal/proto" - "github.com/go-redis/redis/v8/internal/rand" -) - -var errClusterNoNodes = fmt.Errorf("redis: cluster has no nodes") - -// ClusterOptions are used to configure a cluster client and should be -// passed to NewClusterClient. -type ClusterOptions struct { - // A seed list of host:port addresses of cluster nodes. - Addrs []string - - // NewClient creates a cluster node client with provided name and options. - NewClient func(opt *Options) *Client - - // The maximum number of retries before giving up. Command is retried - // on network errors and MOVED/ASK redirects. - // Default is 3 retries. - MaxRedirects int - - // Enables read-only commands on slave nodes. - ReadOnly bool - // Allows routing read-only commands to the closest master or slave node. - // It automatically enables ReadOnly. - RouteByLatency bool - // Allows routing read-only commands to the random master or slave node. - // It automatically enables ReadOnly. - RouteRandomly bool - - // Optional function that returns cluster slots information. - // It is useful to manually create cluster of standalone Redis servers - // and load-balance read/write operations between master and slaves. - // It can use service like ZooKeeper to maintain configuration information - // and Cluster.ReloadState to manually trigger state reloading. - ClusterSlots func(context.Context) ([]ClusterSlot, error) - - // Following options are copied from Options struct. - - Dialer func(ctx context.Context, network, addr string) (net.Conn, error) - - OnConnect func(ctx context.Context, cn *Conn) error - - Username string - Password string - - MaxRetries int - MinRetryBackoff time.Duration - MaxRetryBackoff time.Duration - - DialTimeout time.Duration - ReadTimeout time.Duration - WriteTimeout time.Duration - - // PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO). - PoolFIFO bool - - // PoolSize applies per cluster node and not for the whole cluster. - PoolSize int - MinIdleConns int - MaxConnAge time.Duration - PoolTimeout time.Duration - IdleTimeout time.Duration - IdleCheckFrequency time.Duration - - TLSConfig *tls.Config -} - -func (opt *ClusterOptions) init() { - if opt.MaxRedirects == -1 { - opt.MaxRedirects = 0 - } else if opt.MaxRedirects == 0 { - opt.MaxRedirects = 3 - } - - if opt.RouteByLatency || opt.RouteRandomly { - opt.ReadOnly = true - } - - if opt.PoolSize == 0 { - opt.PoolSize = 5 * runtime.GOMAXPROCS(0) - } - - switch opt.ReadTimeout { - case -1: - opt.ReadTimeout = 0 - case 0: - opt.ReadTimeout = 3 * time.Second - } - switch opt.WriteTimeout { - case -1: - opt.WriteTimeout = 0 - case 0: - opt.WriteTimeout = opt.ReadTimeout - } - - if opt.MaxRetries == 0 { - opt.MaxRetries = -1 - } - switch opt.MinRetryBackoff { - case -1: - opt.MinRetryBackoff = 0 - case 0: - opt.MinRetryBackoff = 8 * time.Millisecond - } - switch opt.MaxRetryBackoff { - case -1: - opt.MaxRetryBackoff = 0 - case 0: - opt.MaxRetryBackoff = 512 * time.Millisecond - } - - if opt.NewClient == nil { - opt.NewClient = NewClient - } -} - -func (opt *ClusterOptions) clientOptions() *Options { - const disableIdleCheck = -1 - - return &Options{ - Dialer: opt.Dialer, - OnConnect: opt.OnConnect, - - Username: opt.Username, - Password: opt.Password, - - MaxRetries: opt.MaxRetries, - MinRetryBackoff: opt.MinRetryBackoff, - MaxRetryBackoff: opt.MaxRetryBackoff, - - DialTimeout: opt.DialTimeout, - ReadTimeout: opt.ReadTimeout, - WriteTimeout: opt.WriteTimeout, - - PoolFIFO: opt.PoolFIFO, - PoolSize: opt.PoolSize, - MinIdleConns: opt.MinIdleConns, - MaxConnAge: opt.MaxConnAge, - PoolTimeout: opt.PoolTimeout, - IdleTimeout: opt.IdleTimeout, - IdleCheckFrequency: disableIdleCheck, - - TLSConfig: opt.TLSConfig, - // If ClusterSlots is populated, then we probably have an artificial - // cluster whose nodes are not in clustering mode (otherwise there isn't - // much use for ClusterSlots config). This means we cannot execute the - // READONLY command against that node -- setting readOnly to false in such - // situations in the options below will prevent that from happening. - readOnly: opt.ReadOnly && opt.ClusterSlots == nil, - } -} - -//------------------------------------------------------------------------------ - -type clusterNode struct { - Client *Client - - latency uint32 // atomic - generation uint32 // atomic - failing uint32 // atomic -} - -func newClusterNode(clOpt *ClusterOptions, addr string) *clusterNode { - opt := clOpt.clientOptions() - opt.Addr = addr - node := clusterNode{ - Client: clOpt.NewClient(opt), - } - - node.latency = math.MaxUint32 - if clOpt.RouteByLatency { - go node.updateLatency() - } - - return &node -} - -func (n *clusterNode) String() string { - return n.Client.String() -} - -func (n *clusterNode) Close() error { - return n.Client.Close() -} - -func (n *clusterNode) updateLatency() { - const numProbe = 10 - var dur uint64 - - for i := 0; i < numProbe; i++ { - time.Sleep(time.Duration(10+rand.Intn(10)) * time.Millisecond) - - start := time.Now() - n.Client.Ping(context.TODO()) - dur += uint64(time.Since(start) / time.Microsecond) - } - - latency := float64(dur) / float64(numProbe) - atomic.StoreUint32(&n.latency, uint32(latency+0.5)) -} - -func (n *clusterNode) Latency() time.Duration { - latency := atomic.LoadUint32(&n.latency) - return time.Duration(latency) * time.Microsecond -} - -func (n *clusterNode) MarkAsFailing() { - atomic.StoreUint32(&n.failing, uint32(time.Now().Unix())) -} - -func (n *clusterNode) Failing() bool { - const timeout = 15 // 15 seconds - - failing := atomic.LoadUint32(&n.failing) - if failing == 0 { - return false - } - if time.Now().Unix()-int64(failing) < timeout { - return true - } - atomic.StoreUint32(&n.failing, 0) - return false -} - -func (n *clusterNode) Generation() uint32 { - return atomic.LoadUint32(&n.generation) -} - -func (n *clusterNode) SetGeneration(gen uint32) { - for { - v := atomic.LoadUint32(&n.generation) - if gen < v || atomic.CompareAndSwapUint32(&n.generation, v, gen) { - break - } - } -} - -//------------------------------------------------------------------------------ - -type clusterNodes struct { - opt *ClusterOptions - - mu sync.RWMutex - addrs []string - nodes map[string]*clusterNode - activeAddrs []string - closed bool - - _generation uint32 // atomic -} - -func newClusterNodes(opt *ClusterOptions) *clusterNodes { - return &clusterNodes{ - opt: opt, - - addrs: opt.Addrs, - nodes: make(map[string]*clusterNode), - } -} - -func (c *clusterNodes) Close() error { - c.mu.Lock() - defer c.mu.Unlock() - - if c.closed { - return nil - } - c.closed = true - - var firstErr error - for _, node := range c.nodes { - if err := node.Client.Close(); err != nil && firstErr == nil { - firstErr = err - } - } - - c.nodes = nil - c.activeAddrs = nil - - return firstErr -} - -func (c *clusterNodes) Addrs() ([]string, error) { - var addrs []string - - c.mu.RLock() - closed := c.closed //nolint:ifshort - if !closed { - if len(c.activeAddrs) > 0 { - addrs = c.activeAddrs - } else { - addrs = c.addrs - } - } - c.mu.RUnlock() - - if closed { - return nil, pool.ErrClosed - } - if len(addrs) == 0 { - return nil, errClusterNoNodes - } - return addrs, nil -} - -func (c *clusterNodes) NextGeneration() uint32 { - return atomic.AddUint32(&c._generation, 1) -} - -// GC removes unused nodes. -func (c *clusterNodes) GC(generation uint32) { - //nolint:prealloc - var collected []*clusterNode - - c.mu.Lock() - - c.activeAddrs = c.activeAddrs[:0] - for addr, node := range c.nodes { - if node.Generation() >= generation { - c.activeAddrs = append(c.activeAddrs, addr) - if c.opt.RouteByLatency { - go node.updateLatency() - } - continue - } - - delete(c.nodes, addr) - collected = append(collected, node) - } - - c.mu.Unlock() - - for _, node := range collected { - _ = node.Client.Close() - } -} - -func (c *clusterNodes) GetOrCreate(addr string) (*clusterNode, error) { - node, err := c.get(addr) - if err != nil { - return nil, err - } - if node != nil { - return node, nil - } - - c.mu.Lock() - defer c.mu.Unlock() - - if c.closed { - return nil, pool.ErrClosed - } - - node, ok := c.nodes[addr] - if ok { - return node, nil - } - - node = newClusterNode(c.opt, addr) - - c.addrs = appendIfNotExists(c.addrs, addr) - c.nodes[addr] = node - - return node, nil -} - -func (c *clusterNodes) get(addr string) (*clusterNode, error) { - var node *clusterNode - var err error - c.mu.RLock() - if c.closed { - err = pool.ErrClosed - } else { - node = c.nodes[addr] - } - c.mu.RUnlock() - return node, err -} - -func (c *clusterNodes) All() ([]*clusterNode, error) { - c.mu.RLock() - defer c.mu.RUnlock() - - if c.closed { - return nil, pool.ErrClosed - } - - cp := make([]*clusterNode, 0, len(c.nodes)) - for _, node := range c.nodes { - cp = append(cp, node) - } - return cp, nil -} - -func (c *clusterNodes) Random() (*clusterNode, error) { - addrs, err := c.Addrs() - if err != nil { - return nil, err - } - - n := rand.Intn(len(addrs)) - return c.GetOrCreate(addrs[n]) -} - -//------------------------------------------------------------------------------ - -type clusterSlot struct { - start, end int - nodes []*clusterNode -} - -type clusterSlotSlice []*clusterSlot - -func (p clusterSlotSlice) Len() int { - return len(p) -} - -func (p clusterSlotSlice) Less(i, j int) bool { - return p[i].start < p[j].start -} - -func (p clusterSlotSlice) Swap(i, j int) { - p[i], p[j] = p[j], p[i] -} - -type clusterState struct { - nodes *clusterNodes - Masters []*clusterNode - Slaves []*clusterNode - - slots []*clusterSlot - - generation uint32 - createdAt time.Time -} - -func newClusterState( - nodes *clusterNodes, slots []ClusterSlot, origin string, -) (*clusterState, error) { - c := clusterState{ - nodes: nodes, - - slots: make([]*clusterSlot, 0, len(slots)), - - generation: nodes.NextGeneration(), - createdAt: time.Now(), - } - - originHost, _, _ := net.SplitHostPort(origin) - isLoopbackOrigin := isLoopback(originHost) - - for _, slot := range slots { - var nodes []*clusterNode - for i, slotNode := range slot.Nodes { - addr := slotNode.Addr - if !isLoopbackOrigin { - addr = replaceLoopbackHost(addr, originHost) - } - - node, err := c.nodes.GetOrCreate(addr) - if err != nil { - return nil, err - } - - node.SetGeneration(c.generation) - nodes = append(nodes, node) - - if i == 0 { - c.Masters = appendUniqueNode(c.Masters, node) - } else { - c.Slaves = appendUniqueNode(c.Slaves, node) - } - } - - c.slots = append(c.slots, &clusterSlot{ - start: slot.Start, - end: slot.End, - nodes: nodes, - }) - } - - sort.Sort(clusterSlotSlice(c.slots)) - - time.AfterFunc(time.Minute, func() { - nodes.GC(c.generation) - }) - - return &c, nil -} - -func replaceLoopbackHost(nodeAddr, originHost string) string { - nodeHost, nodePort, err := net.SplitHostPort(nodeAddr) - if err != nil { - return nodeAddr - } - - nodeIP := net.ParseIP(nodeHost) - if nodeIP == nil { - return nodeAddr - } - - if !nodeIP.IsLoopback() { - return nodeAddr - } - - // Use origin host which is not loopback and node port. - return net.JoinHostPort(originHost, nodePort) -} - -func isLoopback(host string) bool { - ip := net.ParseIP(host) - if ip == nil { - return true - } - return ip.IsLoopback() -} - -func (c *clusterState) slotMasterNode(slot int) (*clusterNode, error) { - nodes := c.slotNodes(slot) - if len(nodes) > 0 { - return nodes[0], nil - } - return c.nodes.Random() -} - -func (c *clusterState) slotSlaveNode(slot int) (*clusterNode, error) { - nodes := c.slotNodes(slot) - switch len(nodes) { - case 0: - return c.nodes.Random() - case 1: - return nodes[0], nil - case 2: - if slave := nodes[1]; !slave.Failing() { - return slave, nil - } - return nodes[0], nil - default: - var slave *clusterNode - for i := 0; i < 10; i++ { - n := rand.Intn(len(nodes)-1) + 1 - slave = nodes[n] - if !slave.Failing() { - return slave, nil - } - } - - // All slaves are loading - use master. - return nodes[0], nil - } -} - -func (c *clusterState) slotClosestNode(slot int) (*clusterNode, error) { - nodes := c.slotNodes(slot) - if len(nodes) == 0 { - return c.nodes.Random() - } - - var node *clusterNode - for _, n := range nodes { - if n.Failing() { - continue - } - if node == nil || n.Latency() < node.Latency() { - node = n - } - } - if node != nil { - return node, nil - } - - // If all nodes are failing - return random node - return c.nodes.Random() -} - -func (c *clusterState) slotRandomNode(slot int) (*clusterNode, error) { - nodes := c.slotNodes(slot) - if len(nodes) == 0 { - return c.nodes.Random() - } - if len(nodes) == 1 { - return nodes[0], nil - } - randomNodes := rand.Perm(len(nodes)) - for _, idx := range randomNodes { - if node := nodes[idx]; !node.Failing() { - return node, nil - } - } - return nodes[randomNodes[0]], nil -} - -func (c *clusterState) slotNodes(slot int) []*clusterNode { - i := sort.Search(len(c.slots), func(i int) bool { - return c.slots[i].end >= slot - }) - if i >= len(c.slots) { - return nil - } - x := c.slots[i] - if slot >= x.start && slot <= x.end { - return x.nodes - } - return nil -} - -//------------------------------------------------------------------------------ - -type clusterStateHolder struct { - load func(ctx context.Context) (*clusterState, error) - - state atomic.Value - reloading uint32 // atomic -} - -func newClusterStateHolder(fn func(ctx context.Context) (*clusterState, error)) *clusterStateHolder { - return &clusterStateHolder{ - load: fn, - } -} - -func (c *clusterStateHolder) Reload(ctx context.Context) (*clusterState, error) { - state, err := c.load(ctx) - if err != nil { - return nil, err - } - c.state.Store(state) - return state, nil -} - -func (c *clusterStateHolder) LazyReload() { - if !atomic.CompareAndSwapUint32(&c.reloading, 0, 1) { - return - } - go func() { - defer atomic.StoreUint32(&c.reloading, 0) - - _, err := c.Reload(context.Background()) - if err != nil { - return - } - time.Sleep(200 * time.Millisecond) - }() -} - -func (c *clusterStateHolder) Get(ctx context.Context) (*clusterState, error) { - v := c.state.Load() - if v == nil { - return c.Reload(ctx) - } - - state := v.(*clusterState) - if time.Since(state.createdAt) > 10*time.Second { - c.LazyReload() - } - return state, nil -} - -func (c *clusterStateHolder) ReloadOrGet(ctx context.Context) (*clusterState, error) { - state, err := c.Reload(ctx) - if err == nil { - return state, nil - } - return c.Get(ctx) -} - -//------------------------------------------------------------------------------ - -type clusterClient struct { - opt *ClusterOptions - nodes *clusterNodes - state *clusterStateHolder //nolint:structcheck - cmdsInfoCache *cmdsInfoCache //nolint:structcheck -} - -// ClusterClient is a Redis Cluster client representing a pool of zero -// or more underlying connections. It's safe for concurrent use by -// multiple goroutines. -type ClusterClient struct { - *clusterClient - cmdable - hooks - ctx context.Context -} - -// NewClusterClient returns a Redis Cluster client as described in -// http://redis.io/topics/cluster-spec. -func NewClusterClient(opt *ClusterOptions) *ClusterClient { - opt.init() - - c := &ClusterClient{ - clusterClient: &clusterClient{ - opt: opt, - nodes: newClusterNodes(opt), - }, - ctx: context.Background(), - } - c.state = newClusterStateHolder(c.loadState) - c.cmdsInfoCache = newCmdsInfoCache(c.cmdsInfo) - c.cmdable = c.Process - - if opt.IdleCheckFrequency > 0 { - go c.reaper(opt.IdleCheckFrequency) - } - - return c -} - -func (c *ClusterClient) Context() context.Context { - return c.ctx -} - -func (c *ClusterClient) WithContext(ctx context.Context) *ClusterClient { - if ctx == nil { - panic("nil context") - } - clone := *c - clone.cmdable = clone.Process - clone.hooks.lock() - clone.ctx = ctx - return &clone -} - -// Options returns read-only Options that were used to create the client. -func (c *ClusterClient) Options() *ClusterOptions { - return c.opt -} - -// ReloadState reloads cluster state. If available it calls ClusterSlots func -// to get cluster slots information. -func (c *ClusterClient) ReloadState(ctx context.Context) { - c.state.LazyReload() -} - -// Close closes the cluster client, releasing any open resources. -// -// It is rare to Close a ClusterClient, as the ClusterClient is meant -// to be long-lived and shared between many goroutines. -func (c *ClusterClient) Close() error { - return c.nodes.Close() -} - -// Do creates a Cmd from the args and processes the cmd. -func (c *ClusterClient) Do(ctx context.Context, args ...interface{}) *Cmd { - cmd := NewCmd(ctx, args...) - _ = c.Process(ctx, cmd) - return cmd -} - -func (c *ClusterClient) Process(ctx context.Context, cmd Cmder) error { - return c.hooks.process(ctx, cmd, c.process) -} - -func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error { - cmdInfo := c.cmdInfo(cmd.Name()) - slot := c.cmdSlot(cmd) - - var node *clusterNode - var ask bool - var lastErr error - for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { - if attempt > 0 { - if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { - return err - } - } - - if node == nil { - var err error - node, err = c.cmdNode(ctx, cmdInfo, slot) - if err != nil { - return err - } - } - - if ask { - pipe := node.Client.Pipeline() - _ = pipe.Process(ctx, NewCmd(ctx, "asking")) - _ = pipe.Process(ctx, cmd) - _, lastErr = pipe.Exec(ctx) - _ = pipe.Close() - ask = false - } else { - lastErr = node.Client.Process(ctx, cmd) - } - - // If there is no error - we are done. - if lastErr == nil { - return nil - } - if isReadOnly := isReadOnlyError(lastErr); isReadOnly || lastErr == pool.ErrClosed { - if isReadOnly { - c.state.LazyReload() - } - node = nil - continue - } - - // If slave is loading - pick another node. - if c.opt.ReadOnly && isLoadingError(lastErr) { - node.MarkAsFailing() - node = nil - continue - } - - var moved bool - var addr string - moved, ask, addr = isMovedError(lastErr) - if moved || ask { - c.state.LazyReload() - - var err error - node, err = c.nodes.GetOrCreate(addr) - if err != nil { - return err - } - continue - } - - if shouldRetry(lastErr, cmd.readTimeout() == nil) { - // First retry the same node. - if attempt == 0 { - continue - } - - // Second try another node. - node.MarkAsFailing() - node = nil - continue - } - - return lastErr - } - return lastErr -} - -// ForEachMaster concurrently calls the fn on each master node in the cluster. -// It returns the first error if any. -func (c *ClusterClient) ForEachMaster( - ctx context.Context, - fn func(ctx context.Context, client *Client) error, -) error { - state, err := c.state.ReloadOrGet(ctx) - if err != nil { - return err - } - - var wg sync.WaitGroup - errCh := make(chan error, 1) - - for _, master := range state.Masters { - wg.Add(1) - go func(node *clusterNode) { - defer wg.Done() - err := fn(ctx, node.Client) - if err != nil { - select { - case errCh <- err: - default: - } - } - }(master) - } - - wg.Wait() - - select { - case err := <-errCh: - return err - default: - return nil - } -} - -// ForEachSlave concurrently calls the fn on each slave node in the cluster. -// It returns the first error if any. -func (c *ClusterClient) ForEachSlave( - ctx context.Context, - fn func(ctx context.Context, client *Client) error, -) error { - state, err := c.state.ReloadOrGet(ctx) - if err != nil { - return err - } - - var wg sync.WaitGroup - errCh := make(chan error, 1) - - for _, slave := range state.Slaves { - wg.Add(1) - go func(node *clusterNode) { - defer wg.Done() - err := fn(ctx, node.Client) - if err != nil { - select { - case errCh <- err: - default: - } - } - }(slave) - } - - wg.Wait() - - select { - case err := <-errCh: - return err - default: - return nil - } -} - -// ForEachShard concurrently calls the fn on each known node in the cluster. -// It returns the first error if any. -func (c *ClusterClient) ForEachShard( - ctx context.Context, - fn func(ctx context.Context, client *Client) error, -) error { - state, err := c.state.ReloadOrGet(ctx) - if err != nil { - return err - } - - var wg sync.WaitGroup - errCh := make(chan error, 1) - - worker := func(node *clusterNode) { - defer wg.Done() - err := fn(ctx, node.Client) - if err != nil { - select { - case errCh <- err: - default: - } - } - } - - for _, node := range state.Masters { - wg.Add(1) - go worker(node) - } - for _, node := range state.Slaves { - wg.Add(1) - go worker(node) - } - - wg.Wait() - - select { - case err := <-errCh: - return err - default: - return nil - } -} - -// PoolStats returns accumulated connection pool stats. -func (c *ClusterClient) PoolStats() *PoolStats { - var acc PoolStats - - state, _ := c.state.Get(context.TODO()) - if state == nil { - return &acc - } - - for _, node := range state.Masters { - s := node.Client.connPool.Stats() - acc.Hits += s.Hits - acc.Misses += s.Misses - acc.Timeouts += s.Timeouts - - acc.TotalConns += s.TotalConns - acc.IdleConns += s.IdleConns - acc.StaleConns += s.StaleConns - } - - for _, node := range state.Slaves { - s := node.Client.connPool.Stats() - acc.Hits += s.Hits - acc.Misses += s.Misses - acc.Timeouts += s.Timeouts - - acc.TotalConns += s.TotalConns - acc.IdleConns += s.IdleConns - acc.StaleConns += s.StaleConns - } - - return &acc -} - -func (c *ClusterClient) loadState(ctx context.Context) (*clusterState, error) { - if c.opt.ClusterSlots != nil { - slots, err := c.opt.ClusterSlots(ctx) - if err != nil { - return nil, err - } - return newClusterState(c.nodes, slots, "") - } - - addrs, err := c.nodes.Addrs() - if err != nil { - return nil, err - } - - var firstErr error - - for _, idx := range rand.Perm(len(addrs)) { - addr := addrs[idx] - - node, err := c.nodes.GetOrCreate(addr) - if err != nil { - if firstErr == nil { - firstErr = err - } - continue - } - - slots, err := node.Client.ClusterSlots(ctx).Result() - if err != nil { - if firstErr == nil { - firstErr = err - } - continue - } - - return newClusterState(c.nodes, slots, node.Client.opt.Addr) - } - - /* - * No node is connectable. It's possible that all nodes' IP has changed. - * Clear activeAddrs to let client be able to re-connect using the initial - * setting of the addresses (e.g. [redis-cluster-0:6379, redis-cluster-1:6379]), - * which might have chance to resolve domain name and get updated IP address. - */ - c.nodes.mu.Lock() - c.nodes.activeAddrs = nil - c.nodes.mu.Unlock() - - return nil, firstErr -} - -// reaper closes idle connections to the cluster. -func (c *ClusterClient) reaper(idleCheckFrequency time.Duration) { - ticker := time.NewTicker(idleCheckFrequency) - defer ticker.Stop() - - for range ticker.C { - nodes, err := c.nodes.All() - if err != nil { - break - } - - for _, node := range nodes { - _, err := node.Client.connPool.(*pool.ConnPool).ReapStaleConns() - if err != nil { - internal.Logger.Printf(c.Context(), "ReapStaleConns failed: %s", err) - } - } - } -} - -func (c *ClusterClient) Pipeline() Pipeliner { - pipe := Pipeline{ - ctx: c.ctx, - exec: c.processPipeline, - } - pipe.init() - return &pipe -} - -func (c *ClusterClient) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { - return c.Pipeline().Pipelined(ctx, fn) -} - -func (c *ClusterClient) processPipeline(ctx context.Context, cmds []Cmder) error { - return c.hooks.processPipeline(ctx, cmds, c._processPipeline) -} - -func (c *ClusterClient) _processPipeline(ctx context.Context, cmds []Cmder) error { - cmdsMap := newCmdsMap() - err := c.mapCmdsByNode(ctx, cmdsMap, cmds) - if err != nil { - setCmdsErr(cmds, err) - return err - } - - for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { - if attempt > 0 { - if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { - setCmdsErr(cmds, err) - return err - } - } - - failedCmds := newCmdsMap() - var wg sync.WaitGroup - - for node, cmds := range cmdsMap.m { - wg.Add(1) - go func(node *clusterNode, cmds []Cmder) { - defer wg.Done() - - err := c._processPipelineNode(ctx, node, cmds, failedCmds) - if err == nil { - return - } - if attempt < c.opt.MaxRedirects { - if err := c.mapCmdsByNode(ctx, failedCmds, cmds); err != nil { - setCmdsErr(cmds, err) - } - } else { - setCmdsErr(cmds, err) - } - }(node, cmds) - } - - wg.Wait() - if len(failedCmds.m) == 0 { - break - } - cmdsMap = failedCmds - } - - return cmdsFirstErr(cmds) -} - -func (c *ClusterClient) mapCmdsByNode(ctx context.Context, cmdsMap *cmdsMap, cmds []Cmder) error { - state, err := c.state.Get(ctx) - if err != nil { - return err - } - - if c.opt.ReadOnly && c.cmdsAreReadOnly(cmds) { - for _, cmd := range cmds { - slot := c.cmdSlot(cmd) - node, err := c.slotReadOnlyNode(state, slot) - if err != nil { - return err - } - cmdsMap.Add(node, cmd) - } - return nil - } - - for _, cmd := range cmds { - slot := c.cmdSlot(cmd) - node, err := state.slotMasterNode(slot) - if err != nil { - return err - } - cmdsMap.Add(node, cmd) - } - return nil -} - -func (c *ClusterClient) cmdsAreReadOnly(cmds []Cmder) bool { - for _, cmd := range cmds { - cmdInfo := c.cmdInfo(cmd.Name()) - if cmdInfo == nil || !cmdInfo.ReadOnly { - return false - } - } - return true -} - -func (c *ClusterClient) _processPipelineNode( - ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap, -) error { - return node.Client.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error { - return node.Client.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error { - err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error { - return writeCmds(wr, cmds) - }) - if err != nil { - return err - } - - return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error { - return c.pipelineReadCmds(ctx, node, rd, cmds, failedCmds) - }) - }) - }) -} - -func (c *ClusterClient) pipelineReadCmds( - ctx context.Context, - node *clusterNode, - rd *proto.Reader, - cmds []Cmder, - failedCmds *cmdsMap, -) error { - for _, cmd := range cmds { - err := cmd.readReply(rd) - cmd.SetErr(err) - - if err == nil { - continue - } - - if c.checkMovedErr(ctx, cmd, err, failedCmds) { - continue - } - - if c.opt.ReadOnly && isLoadingError(err) { - node.MarkAsFailing() - return err - } - if isRedisError(err) { - continue - } - return err - } - return nil -} - -func (c *ClusterClient) checkMovedErr( - ctx context.Context, cmd Cmder, err error, failedCmds *cmdsMap, -) bool { - moved, ask, addr := isMovedError(err) - if !moved && !ask { - return false - } - - node, err := c.nodes.GetOrCreate(addr) - if err != nil { - return false - } - - if moved { - c.state.LazyReload() - failedCmds.Add(node, cmd) - return true - } - - if ask { - failedCmds.Add(node, NewCmd(ctx, "asking"), cmd) - return true - } - - panic("not reached") -} - -// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC. -func (c *ClusterClient) TxPipeline() Pipeliner { - pipe := Pipeline{ - ctx: c.ctx, - exec: c.processTxPipeline, - } - pipe.init() - return &pipe -} - -func (c *ClusterClient) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { - return c.TxPipeline().Pipelined(ctx, fn) -} - -func (c *ClusterClient) processTxPipeline(ctx context.Context, cmds []Cmder) error { - return c.hooks.processTxPipeline(ctx, cmds, c._processTxPipeline) -} - -func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) error { - // Trim multi .. exec. - cmds = cmds[1 : len(cmds)-1] - - state, err := c.state.Get(ctx) - if err != nil { - setCmdsErr(cmds, err) - return err - } - - cmdsMap := c.mapCmdsBySlot(cmds) - for slot, cmds := range cmdsMap { - node, err := state.slotMasterNode(slot) - if err != nil { - setCmdsErr(cmds, err) - continue - } - - cmdsMap := map[*clusterNode][]Cmder{node: cmds} - for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { - if attempt > 0 { - if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { - setCmdsErr(cmds, err) - return err - } - } - - failedCmds := newCmdsMap() - var wg sync.WaitGroup - - for node, cmds := range cmdsMap { - wg.Add(1) - go func(node *clusterNode, cmds []Cmder) { - defer wg.Done() - - err := c._processTxPipelineNode(ctx, node, cmds, failedCmds) - if err == nil { - return - } - - if attempt < c.opt.MaxRedirects { - if err := c.mapCmdsByNode(ctx, failedCmds, cmds); err != nil { - setCmdsErr(cmds, err) - } - } else { - setCmdsErr(cmds, err) - } - }(node, cmds) - } - - wg.Wait() - if len(failedCmds.m) == 0 { - break - } - cmdsMap = failedCmds.m - } - } - - return cmdsFirstErr(cmds) -} - -func (c *ClusterClient) mapCmdsBySlot(cmds []Cmder) map[int][]Cmder { - cmdsMap := make(map[int][]Cmder) - for _, cmd := range cmds { - slot := c.cmdSlot(cmd) - cmdsMap[slot] = append(cmdsMap[slot], cmd) - } - return cmdsMap -} - -func (c *ClusterClient) _processTxPipelineNode( - ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap, -) error { - return node.Client.hooks.processTxPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error { - return node.Client.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error { - err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error { - return writeCmds(wr, cmds) - }) - if err != nil { - return err - } - - return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error { - statusCmd := cmds[0].(*StatusCmd) - // Trim multi and exec. - cmds = cmds[1 : len(cmds)-1] - - err := c.txPipelineReadQueued(ctx, rd, statusCmd, cmds, failedCmds) - if err != nil { - moved, ask, addr := isMovedError(err) - if moved || ask { - return c.cmdsMoved(ctx, cmds, moved, ask, addr, failedCmds) - } - return err - } - - return pipelineReadCmds(rd, cmds) - }) - }) - }) -} - -func (c *ClusterClient) txPipelineReadQueued( - ctx context.Context, - rd *proto.Reader, - statusCmd *StatusCmd, - cmds []Cmder, - failedCmds *cmdsMap, -) error { - // Parse queued replies. - if err := statusCmd.readReply(rd); err != nil { - return err - } - - for _, cmd := range cmds { - err := statusCmd.readReply(rd) - if err == nil || c.checkMovedErr(ctx, cmd, err, failedCmds) || isRedisError(err) { - continue - } - return err - } - - // Parse number of replies. - line, err := rd.ReadLine() - if err != nil { - if err == Nil { - err = TxFailedErr - } - return err - } - - switch line[0] { - case proto.ErrorReply: - return proto.ParseErrorReply(line) - case proto.ArrayReply: - // ok - default: - return fmt.Errorf("redis: expected '*', but got line %q", line) - } - - return nil -} - -func (c *ClusterClient) cmdsMoved( - ctx context.Context, cmds []Cmder, - moved, ask bool, - addr string, - failedCmds *cmdsMap, -) error { - node, err := c.nodes.GetOrCreate(addr) - if err != nil { - return err - } - - if moved { - c.state.LazyReload() - for _, cmd := range cmds { - failedCmds.Add(node, cmd) - } - return nil - } - - if ask { - for _, cmd := range cmds { - failedCmds.Add(node, NewCmd(ctx, "asking"), cmd) - } - return nil - } - - return nil -} - -func (c *ClusterClient) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error { - if len(keys) == 0 { - return fmt.Errorf("redis: Watch requires at least one key") - } - - slot := hashtag.Slot(keys[0]) - for _, key := range keys[1:] { - if hashtag.Slot(key) != slot { - err := fmt.Errorf("redis: Watch requires all keys to be in the same slot") - return err - } - } - - node, err := c.slotMasterNode(ctx, slot) - if err != nil { - return err - } - - for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { - if attempt > 0 { - if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { - return err - } - } - - err = node.Client.Watch(ctx, fn, keys...) - if err == nil { - break - } - - moved, ask, addr := isMovedError(err) - if moved || ask { - node, err = c.nodes.GetOrCreate(addr) - if err != nil { - return err - } - continue - } - - if isReadOnly := isReadOnlyError(err); isReadOnly || err == pool.ErrClosed { - if isReadOnly { - c.state.LazyReload() - } - node, err = c.slotMasterNode(ctx, slot) - if err != nil { - return err - } - continue - } - - if shouldRetry(err, true) { - continue - } - - return err - } - - return err -} - -func (c *ClusterClient) pubSub() *PubSub { - var node *clusterNode - pubsub := &PubSub{ - opt: c.opt.clientOptions(), - - newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) { - if node != nil { - panic("node != nil") - } - - var err error - if len(channels) > 0 { - slot := hashtag.Slot(channels[0]) - node, err = c.slotMasterNode(ctx, slot) - } else { - node, err = c.nodes.Random() - } - if err != nil { - return nil, err - } - - cn, err := node.Client.newConn(context.TODO()) - if err != nil { - node = nil - - return nil, err - } - - return cn, nil - }, - closeConn: func(cn *pool.Conn) error { - err := node.Client.connPool.CloseConn(cn) - node = nil - return err - }, - } - pubsub.init() - - return pubsub -} - -// Subscribe subscribes the client to the specified channels. -// Channels can be omitted to create empty subscription. -func (c *ClusterClient) Subscribe(ctx context.Context, channels ...string) *PubSub { - pubsub := c.pubSub() - if len(channels) > 0 { - _ = pubsub.Subscribe(ctx, channels...) - } - return pubsub -} - -// PSubscribe subscribes the client to the given patterns. -// Patterns can be omitted to create empty subscription. -func (c *ClusterClient) PSubscribe(ctx context.Context, channels ...string) *PubSub { - pubsub := c.pubSub() - if len(channels) > 0 { - _ = pubsub.PSubscribe(ctx, channels...) - } - return pubsub -} - -func (c *ClusterClient) retryBackoff(attempt int) time.Duration { - return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff) -} - -func (c *ClusterClient) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, error) { - // Try 3 random nodes. - const nodeLimit = 3 - - addrs, err := c.nodes.Addrs() - if err != nil { - return nil, err - } - - var firstErr error - - perm := rand.Perm(len(addrs)) - if len(perm) > nodeLimit { - perm = perm[:nodeLimit] - } - - for _, idx := range perm { - addr := addrs[idx] - - node, err := c.nodes.GetOrCreate(addr) - if err != nil { - if firstErr == nil { - firstErr = err - } - continue - } - - info, err := node.Client.Command(ctx).Result() - if err == nil { - return info, nil - } - if firstErr == nil { - firstErr = err - } - } - - if firstErr == nil { - panic("not reached") - } - return nil, firstErr -} - -func (c *ClusterClient) cmdInfo(name string) *CommandInfo { - cmdsInfo, err := c.cmdsInfoCache.Get(c.ctx) - if err != nil { - return nil - } - - info := cmdsInfo[name] - if info == nil { - internal.Logger.Printf(c.Context(), "info for cmd=%s not found", name) - } - return info -} - -func (c *ClusterClient) cmdSlot(cmd Cmder) int { - args := cmd.Args() - if args[0] == "cluster" && args[1] == "getkeysinslot" { - return args[2].(int) - } - - cmdInfo := c.cmdInfo(cmd.Name()) - return cmdSlot(cmd, cmdFirstKeyPos(cmd, cmdInfo)) -} - -func cmdSlot(cmd Cmder, pos int) int { - if pos == 0 { - return hashtag.RandomSlot() - } - firstKey := cmd.stringArg(pos) - return hashtag.Slot(firstKey) -} - -func (c *ClusterClient) cmdNode( - ctx context.Context, - cmdInfo *CommandInfo, - slot int, -) (*clusterNode, error) { - state, err := c.state.Get(ctx) - if err != nil { - return nil, err - } - - if c.opt.ReadOnly && cmdInfo != nil && cmdInfo.ReadOnly { - return c.slotReadOnlyNode(state, slot) - } - return state.slotMasterNode(slot) -} - -func (c *clusterClient) slotReadOnlyNode(state *clusterState, slot int) (*clusterNode, error) { - if c.opt.RouteByLatency { - return state.slotClosestNode(slot) - } - if c.opt.RouteRandomly { - return state.slotRandomNode(slot) - } - return state.slotSlaveNode(slot) -} - -func (c *ClusterClient) slotMasterNode(ctx context.Context, slot int) (*clusterNode, error) { - state, err := c.state.Get(ctx) - if err != nil { - return nil, err - } - return state.slotMasterNode(slot) -} - -// SlaveForKey gets a client for a replica node to run any command on it. -// This is especially useful if we want to run a particular lua script which has -// only read only commands on the replica. -// This is because other redis commands generally have a flag that points that -// they are read only and automatically run on the replica nodes -// if ClusterOptions.ReadOnly flag is set to true. -func (c *ClusterClient) SlaveForKey(ctx context.Context, key string) (*Client, error) { - state, err := c.state.Get(ctx) - if err != nil { - return nil, err - } - slot := hashtag.Slot(key) - node, err := c.slotReadOnlyNode(state, slot) - if err != nil { - return nil, err - } - return node.Client, err -} - -// MasterForKey return a client to the master node for a particular key. -func (c *ClusterClient) MasterForKey(ctx context.Context, key string) (*Client, error) { - slot := hashtag.Slot(key) - node, err := c.slotMasterNode(ctx, slot) - if err != nil { - return nil, err - } - return node.Client, err -} - -func appendUniqueNode(nodes []*clusterNode, node *clusterNode) []*clusterNode { - for _, n := range nodes { - if n == node { - return nodes - } - } - return append(nodes, node) -} - -func appendIfNotExists(ss []string, es ...string) []string { -loop: - for _, e := range es { - for _, s := range ss { - if s == e { - continue loop - } - } - ss = append(ss, e) - } - return ss -} - -//------------------------------------------------------------------------------ - -type cmdsMap struct { - mu sync.Mutex - m map[*clusterNode][]Cmder -} - -func newCmdsMap() *cmdsMap { - return &cmdsMap{ - m: make(map[*clusterNode][]Cmder), - } -} - -func (m *cmdsMap) Add(node *clusterNode, cmds ...Cmder) { - m.mu.Lock() - m.m[node] = append(m.m[node], cmds...) - m.mu.Unlock() -} diff --git a/vendor/github.com/go-redis/redis/v8/cluster_commands.go b/vendor/github.com/go-redis/redis/v8/cluster_commands.go deleted file mode 100644 index 085bce83d..000000000 --- a/vendor/github.com/go-redis/redis/v8/cluster_commands.go +++ /dev/null @@ -1,109 +0,0 @@ -package redis - -import ( - "context" - "sync" - "sync/atomic" -) - -func (c *ClusterClient) DBSize(ctx context.Context) *IntCmd { - cmd := NewIntCmd(ctx, "dbsize") - _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error { - var size int64 - err := c.ForEachMaster(ctx, func(ctx context.Context, master *Client) error { - n, err := master.DBSize(ctx).Result() - if err != nil { - return err - } - atomic.AddInt64(&size, n) - return nil - }) - if err != nil { - cmd.SetErr(err) - } else { - cmd.val = size - } - return nil - }) - return cmd -} - -func (c *ClusterClient) ScriptLoad(ctx context.Context, script string) *StringCmd { - cmd := NewStringCmd(ctx, "script", "load", script) - _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error { - mu := &sync.Mutex{} - err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error { - val, err := shard.ScriptLoad(ctx, script).Result() - if err != nil { - return err - } - - mu.Lock() - if cmd.Val() == "" { - cmd.val = val - } - mu.Unlock() - - return nil - }) - if err != nil { - cmd.SetErr(err) - } - return nil - }) - return cmd -} - -func (c *ClusterClient) ScriptFlush(ctx context.Context) *StatusCmd { - cmd := NewStatusCmd(ctx, "script", "flush") - _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error { - err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error { - return shard.ScriptFlush(ctx).Err() - }) - if err != nil { - cmd.SetErr(err) - } - return nil - }) - return cmd -} - -func (c *ClusterClient) ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd { - args := make([]interface{}, 2+len(hashes)) - args[0] = "script" - args[1] = "exists" - for i, hash := range hashes { - args[2+i] = hash - } - cmd := NewBoolSliceCmd(ctx, args...) - - result := make([]bool, len(hashes)) - for i := range result { - result[i] = true - } - - _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error { - mu := &sync.Mutex{} - err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error { - val, err := shard.ScriptExists(ctx, hashes...).Result() - if err != nil { - return err - } - - mu.Lock() - for i, v := range val { - result[i] = result[i] && v - } - mu.Unlock() - - return nil - }) - if err != nil { - cmd.SetErr(err) - } else { - cmd.val = result - } - return nil - }) - return cmd -} diff --git a/vendor/github.com/go-redis/redis/v8/command.go b/vendor/github.com/go-redis/redis/v8/command.go deleted file mode 100644 index 4bb12a85b..000000000 --- a/vendor/github.com/go-redis/redis/v8/command.go +++ /dev/null @@ -1,3478 +0,0 @@ -package redis - -import ( - "context" - "fmt" - "net" - "strconv" - "time" - - "github.com/go-redis/redis/v8/internal" - "github.com/go-redis/redis/v8/internal/hscan" - "github.com/go-redis/redis/v8/internal/proto" - "github.com/go-redis/redis/v8/internal/util" -) - -type Cmder interface { - Name() string - FullName() string - Args() []interface{} - String() string - stringArg(int) string - firstKeyPos() int8 - SetFirstKeyPos(int8) - - readTimeout() *time.Duration - readReply(rd *proto.Reader) error - - SetErr(error) - Err() error -} - -func setCmdsErr(cmds []Cmder, e error) { - for _, cmd := range cmds { - if cmd.Err() == nil { - cmd.SetErr(e) - } - } -} - -func cmdsFirstErr(cmds []Cmder) error { - for _, cmd := range cmds { - if err := cmd.Err(); err != nil { - return err - } - } - return nil -} - -func writeCmds(wr *proto.Writer, cmds []Cmder) error { - for _, cmd := range cmds { - if err := writeCmd(wr, cmd); err != nil { - return err - } - } - return nil -} - -func writeCmd(wr *proto.Writer, cmd Cmder) error { - return wr.WriteArgs(cmd.Args()) -} - -func cmdFirstKeyPos(cmd Cmder, info *CommandInfo) int { - if pos := cmd.firstKeyPos(); pos != 0 { - return int(pos) - } - - switch cmd.Name() { - case "eval", "evalsha": - if cmd.stringArg(2) != "0" { - return 3 - } - - return 0 - case "publish": - return 1 - case "memory": - // https://github.com/redis/redis/issues/7493 - if cmd.stringArg(1) == "usage" { - return 2 - } - } - - if info != nil { - return int(info.FirstKeyPos) - } - return 0 -} - -func cmdString(cmd Cmder, val interface{}) string { - b := make([]byte, 0, 64) - - for i, arg := range cmd.Args() { - if i > 0 { - b = append(b, ' ') - } - b = internal.AppendArg(b, arg) - } - - if err := cmd.Err(); err != nil { - b = append(b, ": "...) - b = append(b, err.Error()...) - } else if val != nil { - b = append(b, ": "...) - b = internal.AppendArg(b, val) - } - - return internal.String(b) -} - -//------------------------------------------------------------------------------ - -type baseCmd struct { - ctx context.Context - args []interface{} - err error - keyPos int8 - - _readTimeout *time.Duration -} - -var _ Cmder = (*Cmd)(nil) - -func (cmd *baseCmd) Name() string { - if len(cmd.args) == 0 { - return "" - } - // Cmd name must be lower cased. - return internal.ToLower(cmd.stringArg(0)) -} - -func (cmd *baseCmd) FullName() string { - switch name := cmd.Name(); name { - case "cluster", "command": - if len(cmd.args) == 1 { - return name - } - if s2, ok := cmd.args[1].(string); ok { - return name + " " + s2 - } - return name - default: - return name - } -} - -func (cmd *baseCmd) Args() []interface{} { - return cmd.args -} - -func (cmd *baseCmd) stringArg(pos int) string { - if pos < 0 || pos >= len(cmd.args) { - return "" - } - arg := cmd.args[pos] - switch v := arg.(type) { - case string: - return v - default: - // TODO: consider using appendArg - return fmt.Sprint(v) - } -} - -func (cmd *baseCmd) firstKeyPos() int8 { - return cmd.keyPos -} - -func (cmd *baseCmd) SetFirstKeyPos(keyPos int8) { - cmd.keyPos = keyPos -} - -func (cmd *baseCmd) SetErr(e error) { - cmd.err = e -} - -func (cmd *baseCmd) Err() error { - return cmd.err -} - -func (cmd *baseCmd) readTimeout() *time.Duration { - return cmd._readTimeout -} - -func (cmd *baseCmd) setReadTimeout(d time.Duration) { - cmd._readTimeout = &d -} - -//------------------------------------------------------------------------------ - -type Cmd struct { - baseCmd - - val interface{} -} - -func NewCmd(ctx context.Context, args ...interface{}) *Cmd { - return &Cmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *Cmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *Cmd) SetVal(val interface{}) { - cmd.val = val -} - -func (cmd *Cmd) Val() interface{} { - return cmd.val -} - -func (cmd *Cmd) Result() (interface{}, error) { - return cmd.val, cmd.err -} - -func (cmd *Cmd) Text() (string, error) { - if cmd.err != nil { - return "", cmd.err - } - return toString(cmd.val) -} - -func toString(val interface{}) (string, error) { - switch val := val.(type) { - case string: - return val, nil - default: - err := fmt.Errorf("redis: unexpected type=%T for String", val) - return "", err - } -} - -func (cmd *Cmd) Int() (int, error) { - if cmd.err != nil { - return 0, cmd.err - } - switch val := cmd.val.(type) { - case int64: - return int(val), nil - case string: - return strconv.Atoi(val) - default: - err := fmt.Errorf("redis: unexpected type=%T for Int", val) - return 0, err - } -} - -func (cmd *Cmd) Int64() (int64, error) { - if cmd.err != nil { - return 0, cmd.err - } - return toInt64(cmd.val) -} - -func toInt64(val interface{}) (int64, error) { - switch val := val.(type) { - case int64: - return val, nil - case string: - return strconv.ParseInt(val, 10, 64) - default: - err := fmt.Errorf("redis: unexpected type=%T for Int64", val) - return 0, err - } -} - -func (cmd *Cmd) Uint64() (uint64, error) { - if cmd.err != nil { - return 0, cmd.err - } - return toUint64(cmd.val) -} - -func toUint64(val interface{}) (uint64, error) { - switch val := val.(type) { - case int64: - return uint64(val), nil - case string: - return strconv.ParseUint(val, 10, 64) - default: - err := fmt.Errorf("redis: unexpected type=%T for Uint64", val) - return 0, err - } -} - -func (cmd *Cmd) Float32() (float32, error) { - if cmd.err != nil { - return 0, cmd.err - } - return toFloat32(cmd.val) -} - -func toFloat32(val interface{}) (float32, error) { - switch val := val.(type) { - case int64: - return float32(val), nil - case string: - f, err := strconv.ParseFloat(val, 32) - if err != nil { - return 0, err - } - return float32(f), nil - default: - err := fmt.Errorf("redis: unexpected type=%T for Float32", val) - return 0, err - } -} - -func (cmd *Cmd) Float64() (float64, error) { - if cmd.err != nil { - return 0, cmd.err - } - return toFloat64(cmd.val) -} - -func toFloat64(val interface{}) (float64, error) { - switch val := val.(type) { - case int64: - return float64(val), nil - case string: - return strconv.ParseFloat(val, 64) - default: - err := fmt.Errorf("redis: unexpected type=%T for Float64", val) - return 0, err - } -} - -func (cmd *Cmd) Bool() (bool, error) { - if cmd.err != nil { - return false, cmd.err - } - return toBool(cmd.val) -} - -func toBool(val interface{}) (bool, error) { - switch val := val.(type) { - case int64: - return val != 0, nil - case string: - return strconv.ParseBool(val) - default: - err := fmt.Errorf("redis: unexpected type=%T for Bool", val) - return false, err - } -} - -func (cmd *Cmd) Slice() ([]interface{}, error) { - if cmd.err != nil { - return nil, cmd.err - } - switch val := cmd.val.(type) { - case []interface{}: - return val, nil - default: - return nil, fmt.Errorf("redis: unexpected type=%T for Slice", val) - } -} - -func (cmd *Cmd) StringSlice() ([]string, error) { - slice, err := cmd.Slice() - if err != nil { - return nil, err - } - - ss := make([]string, len(slice)) - for i, iface := range slice { - val, err := toString(iface) - if err != nil { - return nil, err - } - ss[i] = val - } - return ss, nil -} - -func (cmd *Cmd) Int64Slice() ([]int64, error) { - slice, err := cmd.Slice() - if err != nil { - return nil, err - } - - nums := make([]int64, len(slice)) - for i, iface := range slice { - val, err := toInt64(iface) - if err != nil { - return nil, err - } - nums[i] = val - } - return nums, nil -} - -func (cmd *Cmd) Uint64Slice() ([]uint64, error) { - slice, err := cmd.Slice() - if err != nil { - return nil, err - } - - nums := make([]uint64, len(slice)) - for i, iface := range slice { - val, err := toUint64(iface) - if err != nil { - return nil, err - } - nums[i] = val - } - return nums, nil -} - -func (cmd *Cmd) Float32Slice() ([]float32, error) { - slice, err := cmd.Slice() - if err != nil { - return nil, err - } - - floats := make([]float32, len(slice)) - for i, iface := range slice { - val, err := toFloat32(iface) - if err != nil { - return nil, err - } - floats[i] = val - } - return floats, nil -} - -func (cmd *Cmd) Float64Slice() ([]float64, error) { - slice, err := cmd.Slice() - if err != nil { - return nil, err - } - - floats := make([]float64, len(slice)) - for i, iface := range slice { - val, err := toFloat64(iface) - if err != nil { - return nil, err - } - floats[i] = val - } - return floats, nil -} - -func (cmd *Cmd) BoolSlice() ([]bool, error) { - slice, err := cmd.Slice() - if err != nil { - return nil, err - } - - bools := make([]bool, len(slice)) - for i, iface := range slice { - val, err := toBool(iface) - if err != nil { - return nil, err - } - bools[i] = val - } - return bools, nil -} - -func (cmd *Cmd) readReply(rd *proto.Reader) (err error) { - cmd.val, err = rd.ReadReply(sliceParser) - return err -} - -// sliceParser implements proto.MultiBulkParse. -func sliceParser(rd *proto.Reader, n int64) (interface{}, error) { - vals := make([]interface{}, n) - for i := 0; i < len(vals); i++ { - v, err := rd.ReadReply(sliceParser) - if err != nil { - if err == Nil { - vals[i] = nil - continue - } - if err, ok := err.(proto.RedisError); ok { - vals[i] = err - continue - } - return nil, err - } - vals[i] = v - } - return vals, nil -} - -//------------------------------------------------------------------------------ - -type SliceCmd struct { - baseCmd - - val []interface{} -} - -var _ Cmder = (*SliceCmd)(nil) - -func NewSliceCmd(ctx context.Context, args ...interface{}) *SliceCmd { - return &SliceCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *SliceCmd) SetVal(val []interface{}) { - cmd.val = val -} - -func (cmd *SliceCmd) Val() []interface{} { - return cmd.val -} - -func (cmd *SliceCmd) Result() ([]interface{}, error) { - return cmd.val, cmd.err -} - -func (cmd *SliceCmd) String() string { - return cmdString(cmd, cmd.val) -} - -// Scan scans the results from the map into a destination struct. The map keys -// are matched in the Redis struct fields by the `redis:"field"` tag. -func (cmd *SliceCmd) Scan(dst interface{}) error { - if cmd.err != nil { - return cmd.err - } - - // Pass the list of keys and values. - // Skip the first two args for: HMGET key - var args []interface{} - if cmd.args[0] == "hmget" { - args = cmd.args[2:] - } else { - // Otherwise, it's: MGET field field ... - args = cmd.args[1:] - } - - return hscan.Scan(dst, args, cmd.val) -} - -func (cmd *SliceCmd) readReply(rd *proto.Reader) error { - v, err := rd.ReadArrayReply(sliceParser) - if err != nil { - return err - } - cmd.val = v.([]interface{}) - return nil -} - -//------------------------------------------------------------------------------ - -type StatusCmd struct { - baseCmd - - val string -} - -var _ Cmder = (*StatusCmd)(nil) - -func NewStatusCmd(ctx context.Context, args ...interface{}) *StatusCmd { - return &StatusCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *StatusCmd) SetVal(val string) { - cmd.val = val -} - -func (cmd *StatusCmd) Val() string { - return cmd.val -} - -func (cmd *StatusCmd) Result() (string, error) { - return cmd.val, cmd.err -} - -func (cmd *StatusCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *StatusCmd) readReply(rd *proto.Reader) (err error) { - cmd.val, err = rd.ReadString() - return err -} - -//------------------------------------------------------------------------------ - -type IntCmd struct { - baseCmd - - val int64 -} - -var _ Cmder = (*IntCmd)(nil) - -func NewIntCmd(ctx context.Context, args ...interface{}) *IntCmd { - return &IntCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *IntCmd) SetVal(val int64) { - cmd.val = val -} - -func (cmd *IntCmd) Val() int64 { - return cmd.val -} - -func (cmd *IntCmd) Result() (int64, error) { - return cmd.val, cmd.err -} - -func (cmd *IntCmd) Uint64() (uint64, error) { - return uint64(cmd.val), cmd.err -} - -func (cmd *IntCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *IntCmd) readReply(rd *proto.Reader) (err error) { - cmd.val, err = rd.ReadIntReply() - return err -} - -//------------------------------------------------------------------------------ - -type IntSliceCmd struct { - baseCmd - - val []int64 -} - -var _ Cmder = (*IntSliceCmd)(nil) - -func NewIntSliceCmd(ctx context.Context, args ...interface{}) *IntSliceCmd { - return &IntSliceCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *IntSliceCmd) SetVal(val []int64) { - cmd.val = val -} - -func (cmd *IntSliceCmd) Val() []int64 { - return cmd.val -} - -func (cmd *IntSliceCmd) Result() ([]int64, error) { - return cmd.val, cmd.err -} - -func (cmd *IntSliceCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *IntSliceCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.val = make([]int64, n) - for i := 0; i < len(cmd.val); i++ { - num, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - cmd.val[i] = num - } - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type DurationCmd struct { - baseCmd - - val time.Duration - precision time.Duration -} - -var _ Cmder = (*DurationCmd)(nil) - -func NewDurationCmd(ctx context.Context, precision time.Duration, args ...interface{}) *DurationCmd { - return &DurationCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - precision: precision, - } -} - -func (cmd *DurationCmd) SetVal(val time.Duration) { - cmd.val = val -} - -func (cmd *DurationCmd) Val() time.Duration { - return cmd.val -} - -func (cmd *DurationCmd) Result() (time.Duration, error) { - return cmd.val, cmd.err -} - -func (cmd *DurationCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *DurationCmd) readReply(rd *proto.Reader) error { - n, err := rd.ReadIntReply() - if err != nil { - return err - } - switch n { - // -2 if the key does not exist - // -1 if the key exists but has no associated expire - case -2, -1: - cmd.val = time.Duration(n) - default: - cmd.val = time.Duration(n) * cmd.precision - } - return nil -} - -//------------------------------------------------------------------------------ - -type TimeCmd struct { - baseCmd - - val time.Time -} - -var _ Cmder = (*TimeCmd)(nil) - -func NewTimeCmd(ctx context.Context, args ...interface{}) *TimeCmd { - return &TimeCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *TimeCmd) SetVal(val time.Time) { - cmd.val = val -} - -func (cmd *TimeCmd) Val() time.Time { - return cmd.val -} - -func (cmd *TimeCmd) Result() (time.Time, error) { - return cmd.val, cmd.err -} - -func (cmd *TimeCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *TimeCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - if n != 2 { - return nil, fmt.Errorf("got %d elements, expected 2", n) - } - - sec, err := rd.ReadInt() - if err != nil { - return nil, err - } - - microsec, err := rd.ReadInt() - if err != nil { - return nil, err - } - - cmd.val = time.Unix(sec, microsec*1000) - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type BoolCmd struct { - baseCmd - - val bool -} - -var _ Cmder = (*BoolCmd)(nil) - -func NewBoolCmd(ctx context.Context, args ...interface{}) *BoolCmd { - return &BoolCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *BoolCmd) SetVal(val bool) { - cmd.val = val -} - -func (cmd *BoolCmd) Val() bool { - return cmd.val -} - -func (cmd *BoolCmd) Result() (bool, error) { - return cmd.val, cmd.err -} - -func (cmd *BoolCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *BoolCmd) readReply(rd *proto.Reader) error { - v, err := rd.ReadReply(nil) - // `SET key value NX` returns nil when key already exists. But - // `SETNX key value` returns bool (0/1). So convert nil to bool. - if err == Nil { - cmd.val = false - return nil - } - if err != nil { - return err - } - switch v := v.(type) { - case int64: - cmd.val = v == 1 - return nil - case string: - cmd.val = v == "OK" - return nil - default: - return fmt.Errorf("got %T, wanted int64 or string", v) - } -} - -//------------------------------------------------------------------------------ - -type StringCmd struct { - baseCmd - - val string -} - -var _ Cmder = (*StringCmd)(nil) - -func NewStringCmd(ctx context.Context, args ...interface{}) *StringCmd { - return &StringCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *StringCmd) SetVal(val string) { - cmd.val = val -} - -func (cmd *StringCmd) Val() string { - return cmd.val -} - -func (cmd *StringCmd) Result() (string, error) { - return cmd.Val(), cmd.err -} - -func (cmd *StringCmd) Bytes() ([]byte, error) { - return util.StringToBytes(cmd.val), cmd.err -} - -func (cmd *StringCmd) Bool() (bool, error) { - if cmd.err != nil { - return false, cmd.err - } - return strconv.ParseBool(cmd.val) -} - -func (cmd *StringCmd) Int() (int, error) { - if cmd.err != nil { - return 0, cmd.err - } - return strconv.Atoi(cmd.Val()) -} - -func (cmd *StringCmd) Int64() (int64, error) { - if cmd.err != nil { - return 0, cmd.err - } - return strconv.ParseInt(cmd.Val(), 10, 64) -} - -func (cmd *StringCmd) Uint64() (uint64, error) { - if cmd.err != nil { - return 0, cmd.err - } - return strconv.ParseUint(cmd.Val(), 10, 64) -} - -func (cmd *StringCmd) Float32() (float32, error) { - if cmd.err != nil { - return 0, cmd.err - } - f, err := strconv.ParseFloat(cmd.Val(), 32) - if err != nil { - return 0, err - } - return float32(f), nil -} - -func (cmd *StringCmd) Float64() (float64, error) { - if cmd.err != nil { - return 0, cmd.err - } - return strconv.ParseFloat(cmd.Val(), 64) -} - -func (cmd *StringCmd) Time() (time.Time, error) { - if cmd.err != nil { - return time.Time{}, cmd.err - } - return time.Parse(time.RFC3339Nano, cmd.Val()) -} - -func (cmd *StringCmd) Scan(val interface{}) error { - if cmd.err != nil { - return cmd.err - } - return proto.Scan([]byte(cmd.val), val) -} - -func (cmd *StringCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *StringCmd) readReply(rd *proto.Reader) (err error) { - cmd.val, err = rd.ReadString() - return err -} - -//------------------------------------------------------------------------------ - -type FloatCmd struct { - baseCmd - - val float64 -} - -var _ Cmder = (*FloatCmd)(nil) - -func NewFloatCmd(ctx context.Context, args ...interface{}) *FloatCmd { - return &FloatCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *FloatCmd) SetVal(val float64) { - cmd.val = val -} - -func (cmd *FloatCmd) Val() float64 { - return cmd.val -} - -func (cmd *FloatCmd) Result() (float64, error) { - return cmd.Val(), cmd.Err() -} - -func (cmd *FloatCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *FloatCmd) readReply(rd *proto.Reader) (err error) { - cmd.val, err = rd.ReadFloatReply() - return err -} - -//------------------------------------------------------------------------------ - -type FloatSliceCmd struct { - baseCmd - - val []float64 -} - -var _ Cmder = (*FloatSliceCmd)(nil) - -func NewFloatSliceCmd(ctx context.Context, args ...interface{}) *FloatSliceCmd { - return &FloatSliceCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *FloatSliceCmd) SetVal(val []float64) { - cmd.val = val -} - -func (cmd *FloatSliceCmd) Val() []float64 { - return cmd.val -} - -func (cmd *FloatSliceCmd) Result() ([]float64, error) { - return cmd.val, cmd.err -} - -func (cmd *FloatSliceCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *FloatSliceCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.val = make([]float64, n) - for i := 0; i < len(cmd.val); i++ { - switch num, err := rd.ReadFloatReply(); { - case err == Nil: - cmd.val[i] = 0 - case err != nil: - return nil, err - default: - cmd.val[i] = num - } - } - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type StringSliceCmd struct { - baseCmd - - val []string -} - -var _ Cmder = (*StringSliceCmd)(nil) - -func NewStringSliceCmd(ctx context.Context, args ...interface{}) *StringSliceCmd { - return &StringSliceCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *StringSliceCmd) SetVal(val []string) { - cmd.val = val -} - -func (cmd *StringSliceCmd) Val() []string { - return cmd.val -} - -func (cmd *StringSliceCmd) Result() ([]string, error) { - return cmd.Val(), cmd.Err() -} - -func (cmd *StringSliceCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *StringSliceCmd) ScanSlice(container interface{}) error { - return proto.ScanSlice(cmd.Val(), container) -} - -func (cmd *StringSliceCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.val = make([]string, n) - for i := 0; i < len(cmd.val); i++ { - switch s, err := rd.ReadString(); { - case err == Nil: - cmd.val[i] = "" - case err != nil: - return nil, err - default: - cmd.val[i] = s - } - } - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type BoolSliceCmd struct { - baseCmd - - val []bool -} - -var _ Cmder = (*BoolSliceCmd)(nil) - -func NewBoolSliceCmd(ctx context.Context, args ...interface{}) *BoolSliceCmd { - return &BoolSliceCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *BoolSliceCmd) SetVal(val []bool) { - cmd.val = val -} - -func (cmd *BoolSliceCmd) Val() []bool { - return cmd.val -} - -func (cmd *BoolSliceCmd) Result() ([]bool, error) { - return cmd.val, cmd.err -} - -func (cmd *BoolSliceCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *BoolSliceCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.val = make([]bool, n) - for i := 0; i < len(cmd.val); i++ { - n, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - cmd.val[i] = n == 1 - } - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type StringStringMapCmd struct { - baseCmd - - val map[string]string -} - -var _ Cmder = (*StringStringMapCmd)(nil) - -func NewStringStringMapCmd(ctx context.Context, args ...interface{}) *StringStringMapCmd { - return &StringStringMapCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *StringStringMapCmd) SetVal(val map[string]string) { - cmd.val = val -} - -func (cmd *StringStringMapCmd) Val() map[string]string { - return cmd.val -} - -func (cmd *StringStringMapCmd) Result() (map[string]string, error) { - return cmd.val, cmd.err -} - -func (cmd *StringStringMapCmd) String() string { - return cmdString(cmd, cmd.val) -} - -// Scan scans the results from the map into a destination struct. The map keys -// are matched in the Redis struct fields by the `redis:"field"` tag. -func (cmd *StringStringMapCmd) Scan(dest interface{}) error { - if cmd.err != nil { - return cmd.err - } - - strct, err := hscan.Struct(dest) - if err != nil { - return err - } - - for k, v := range cmd.val { - if err := strct.Scan(k, v); err != nil { - return err - } - } - - return nil -} - -func (cmd *StringStringMapCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.val = make(map[string]string, n/2) - for i := int64(0); i < n; i += 2 { - key, err := rd.ReadString() - if err != nil { - return nil, err - } - - value, err := rd.ReadString() - if err != nil { - return nil, err - } - - cmd.val[key] = value - } - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type StringIntMapCmd struct { - baseCmd - - val map[string]int64 -} - -var _ Cmder = (*StringIntMapCmd)(nil) - -func NewStringIntMapCmd(ctx context.Context, args ...interface{}) *StringIntMapCmd { - return &StringIntMapCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *StringIntMapCmd) SetVal(val map[string]int64) { - cmd.val = val -} - -func (cmd *StringIntMapCmd) Val() map[string]int64 { - return cmd.val -} - -func (cmd *StringIntMapCmd) Result() (map[string]int64, error) { - return cmd.val, cmd.err -} - -func (cmd *StringIntMapCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *StringIntMapCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.val = make(map[string]int64, n/2) - for i := int64(0); i < n; i += 2 { - key, err := rd.ReadString() - if err != nil { - return nil, err - } - - n, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - - cmd.val[key] = n - } - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type StringStructMapCmd struct { - baseCmd - - val map[string]struct{} -} - -var _ Cmder = (*StringStructMapCmd)(nil) - -func NewStringStructMapCmd(ctx context.Context, args ...interface{}) *StringStructMapCmd { - return &StringStructMapCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *StringStructMapCmd) SetVal(val map[string]struct{}) { - cmd.val = val -} - -func (cmd *StringStructMapCmd) Val() map[string]struct{} { - return cmd.val -} - -func (cmd *StringStructMapCmd) Result() (map[string]struct{}, error) { - return cmd.val, cmd.err -} - -func (cmd *StringStructMapCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *StringStructMapCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.val = make(map[string]struct{}, n) - for i := int64(0); i < n; i++ { - key, err := rd.ReadString() - if err != nil { - return nil, err - } - cmd.val[key] = struct{}{} - } - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type XMessage struct { - ID string - Values map[string]interface{} -} - -type XMessageSliceCmd struct { - baseCmd - - val []XMessage -} - -var _ Cmder = (*XMessageSliceCmd)(nil) - -func NewXMessageSliceCmd(ctx context.Context, args ...interface{}) *XMessageSliceCmd { - return &XMessageSliceCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *XMessageSliceCmd) SetVal(val []XMessage) { - cmd.val = val -} - -func (cmd *XMessageSliceCmd) Val() []XMessage { - return cmd.val -} - -func (cmd *XMessageSliceCmd) Result() ([]XMessage, error) { - return cmd.val, cmd.err -} - -func (cmd *XMessageSliceCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *XMessageSliceCmd) readReply(rd *proto.Reader) error { - var err error - cmd.val, err = readXMessageSlice(rd) - return err -} - -func readXMessageSlice(rd *proto.Reader) ([]XMessage, error) { - n, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - - msgs := make([]XMessage, n) - for i := 0; i < n; i++ { - var err error - msgs[i], err = readXMessage(rd) - if err != nil { - return nil, err - } - } - return msgs, nil -} - -func readXMessage(rd *proto.Reader) (XMessage, error) { - n, err := rd.ReadArrayLen() - if err != nil { - return XMessage{}, err - } - if n != 2 { - return XMessage{}, fmt.Errorf("got %d, wanted 2", n) - } - - id, err := rd.ReadString() - if err != nil { - return XMessage{}, err - } - - var values map[string]interface{} - - v, err := rd.ReadArrayReply(stringInterfaceMapParser) - if err != nil { - if err != proto.Nil { - return XMessage{}, err - } - } else { - values = v.(map[string]interface{}) - } - - return XMessage{ - ID: id, - Values: values, - }, nil -} - -// stringInterfaceMapParser implements proto.MultiBulkParse. -func stringInterfaceMapParser(rd *proto.Reader, n int64) (interface{}, error) { - m := make(map[string]interface{}, n/2) - for i := int64(0); i < n; i += 2 { - key, err := rd.ReadString() - if err != nil { - return nil, err - } - - value, err := rd.ReadString() - if err != nil { - return nil, err - } - - m[key] = value - } - return m, nil -} - -//------------------------------------------------------------------------------ - -type XStream struct { - Stream string - Messages []XMessage -} - -type XStreamSliceCmd struct { - baseCmd - - val []XStream -} - -var _ Cmder = (*XStreamSliceCmd)(nil) - -func NewXStreamSliceCmd(ctx context.Context, args ...interface{}) *XStreamSliceCmd { - return &XStreamSliceCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *XStreamSliceCmd) SetVal(val []XStream) { - cmd.val = val -} - -func (cmd *XStreamSliceCmd) Val() []XStream { - return cmd.val -} - -func (cmd *XStreamSliceCmd) Result() ([]XStream, error) { - return cmd.val, cmd.err -} - -func (cmd *XStreamSliceCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *XStreamSliceCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.val = make([]XStream, n) - for i := 0; i < len(cmd.val); i++ { - i := i - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - if n != 2 { - return nil, fmt.Errorf("got %d, wanted 2", n) - } - - stream, err := rd.ReadString() - if err != nil { - return nil, err - } - - msgs, err := readXMessageSlice(rd) - if err != nil { - return nil, err - } - - cmd.val[i] = XStream{ - Stream: stream, - Messages: msgs, - } - return nil, nil - }) - if err != nil { - return nil, err - } - } - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type XPending struct { - Count int64 - Lower string - Higher string - Consumers map[string]int64 -} - -type XPendingCmd struct { - baseCmd - val *XPending -} - -var _ Cmder = (*XPendingCmd)(nil) - -func NewXPendingCmd(ctx context.Context, args ...interface{}) *XPendingCmd { - return &XPendingCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *XPendingCmd) SetVal(val *XPending) { - cmd.val = val -} - -func (cmd *XPendingCmd) Val() *XPending { - return cmd.val -} - -func (cmd *XPendingCmd) Result() (*XPending, error) { - return cmd.val, cmd.err -} - -func (cmd *XPendingCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *XPendingCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - if n != 4 { - return nil, fmt.Errorf("got %d, wanted 4", n) - } - - count, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - - lower, err := rd.ReadString() - if err != nil && err != Nil { - return nil, err - } - - higher, err := rd.ReadString() - if err != nil && err != Nil { - return nil, err - } - - cmd.val = &XPending{ - Count: count, - Lower: lower, - Higher: higher, - } - _, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - for i := int64(0); i < n; i++ { - _, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - if n != 2 { - return nil, fmt.Errorf("got %d, wanted 2", n) - } - - consumerName, err := rd.ReadString() - if err != nil { - return nil, err - } - - consumerPending, err := rd.ReadInt() - if err != nil { - return nil, err - } - - if cmd.val.Consumers == nil { - cmd.val.Consumers = make(map[string]int64) - } - cmd.val.Consumers[consumerName] = consumerPending - - return nil, nil - }) - if err != nil { - return nil, err - } - } - return nil, nil - }) - if err != nil && err != Nil { - return nil, err - } - - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type XPendingExt struct { - ID string - Consumer string - Idle time.Duration - RetryCount int64 -} - -type XPendingExtCmd struct { - baseCmd - val []XPendingExt -} - -var _ Cmder = (*XPendingExtCmd)(nil) - -func NewXPendingExtCmd(ctx context.Context, args ...interface{}) *XPendingExtCmd { - return &XPendingExtCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *XPendingExtCmd) SetVal(val []XPendingExt) { - cmd.val = val -} - -func (cmd *XPendingExtCmd) Val() []XPendingExt { - return cmd.val -} - -func (cmd *XPendingExtCmd) Result() ([]XPendingExt, error) { - return cmd.val, cmd.err -} - -func (cmd *XPendingExtCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *XPendingExtCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.val = make([]XPendingExt, 0, n) - for i := int64(0); i < n; i++ { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - if n != 4 { - return nil, fmt.Errorf("got %d, wanted 4", n) - } - - id, err := rd.ReadString() - if err != nil { - return nil, err - } - - consumer, err := rd.ReadString() - if err != nil && err != Nil { - return nil, err - } - - idle, err := rd.ReadIntReply() - if err != nil && err != Nil { - return nil, err - } - - retryCount, err := rd.ReadIntReply() - if err != nil && err != Nil { - return nil, err - } - - cmd.val = append(cmd.val, XPendingExt{ - ID: id, - Consumer: consumer, - Idle: time.Duration(idle) * time.Millisecond, - RetryCount: retryCount, - }) - return nil, nil - }) - if err != nil { - return nil, err - } - } - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type XAutoClaimCmd struct { - baseCmd - - start string - val []XMessage -} - -var _ Cmder = (*XAutoClaimCmd)(nil) - -func NewXAutoClaimCmd(ctx context.Context, args ...interface{}) *XAutoClaimCmd { - return &XAutoClaimCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *XAutoClaimCmd) SetVal(val []XMessage, start string) { - cmd.val = val - cmd.start = start -} - -func (cmd *XAutoClaimCmd) Val() (messages []XMessage, start string) { - return cmd.val, cmd.start -} - -func (cmd *XAutoClaimCmd) Result() (messages []XMessage, start string, err error) { - return cmd.val, cmd.start, cmd.err -} - -func (cmd *XAutoClaimCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *XAutoClaimCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - if n != 2 { - return nil, fmt.Errorf("got %d, wanted 2", n) - } - var err error - - cmd.start, err = rd.ReadString() - if err != nil { - return nil, err - } - - cmd.val, err = readXMessageSlice(rd) - if err != nil { - return nil, err - } - - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type XAutoClaimJustIDCmd struct { - baseCmd - - start string - val []string -} - -var _ Cmder = (*XAutoClaimJustIDCmd)(nil) - -func NewXAutoClaimJustIDCmd(ctx context.Context, args ...interface{}) *XAutoClaimJustIDCmd { - return &XAutoClaimJustIDCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *XAutoClaimJustIDCmd) SetVal(val []string, start string) { - cmd.val = val - cmd.start = start -} - -func (cmd *XAutoClaimJustIDCmd) Val() (ids []string, start string) { - return cmd.val, cmd.start -} - -func (cmd *XAutoClaimJustIDCmd) Result() (ids []string, start string, err error) { - return cmd.val, cmd.start, cmd.err -} - -func (cmd *XAutoClaimJustIDCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *XAutoClaimJustIDCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - if n != 2 { - return nil, fmt.Errorf("got %d, wanted 2", n) - } - var err error - - cmd.start, err = rd.ReadString() - if err != nil { - return nil, err - } - - nn, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - - cmd.val = make([]string, nn) - for i := 0; i < nn; i++ { - cmd.val[i], err = rd.ReadString() - if err != nil { - return nil, err - } - } - - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type XInfoConsumersCmd struct { - baseCmd - val []XInfoConsumer -} - -type XInfoConsumer struct { - Name string - Pending int64 - Idle int64 -} - -var _ Cmder = (*XInfoConsumersCmd)(nil) - -func NewXInfoConsumersCmd(ctx context.Context, stream string, group string) *XInfoConsumersCmd { - return &XInfoConsumersCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: []interface{}{"xinfo", "consumers", stream, group}, - }, - } -} - -func (cmd *XInfoConsumersCmd) SetVal(val []XInfoConsumer) { - cmd.val = val -} - -func (cmd *XInfoConsumersCmd) Val() []XInfoConsumer { - return cmd.val -} - -func (cmd *XInfoConsumersCmd) Result() ([]XInfoConsumer, error) { - return cmd.val, cmd.err -} - -func (cmd *XInfoConsumersCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *XInfoConsumersCmd) readReply(rd *proto.Reader) error { - n, err := rd.ReadArrayLen() - if err != nil { - return err - } - - cmd.val = make([]XInfoConsumer, n) - - for i := 0; i < n; i++ { - cmd.val[i], err = readXConsumerInfo(rd) - if err != nil { - return err - } - } - - return nil -} - -func readXConsumerInfo(rd *proto.Reader) (XInfoConsumer, error) { - var consumer XInfoConsumer - - n, err := rd.ReadArrayLen() - if err != nil { - return consumer, err - } - if n != 6 { - return consumer, fmt.Errorf("redis: got %d elements in XINFO CONSUMERS reply, wanted 6", n) - } - - for i := 0; i < 3; i++ { - key, err := rd.ReadString() - if err != nil { - return consumer, err - } - - val, err := rd.ReadString() - if err != nil { - return consumer, err - } - - switch key { - case "name": - consumer.Name = val - case "pending": - consumer.Pending, err = strconv.ParseInt(val, 0, 64) - if err != nil { - return consumer, err - } - case "idle": - consumer.Idle, err = strconv.ParseInt(val, 0, 64) - if err != nil { - return consumer, err - } - default: - return consumer, fmt.Errorf("redis: unexpected content %s in XINFO CONSUMERS reply", key) - } - } - - return consumer, nil -} - -//------------------------------------------------------------------------------ - -type XInfoGroupsCmd struct { - baseCmd - val []XInfoGroup -} - -type XInfoGroup struct { - Name string - Consumers int64 - Pending int64 - LastDeliveredID string -} - -var _ Cmder = (*XInfoGroupsCmd)(nil) - -func NewXInfoGroupsCmd(ctx context.Context, stream string) *XInfoGroupsCmd { - return &XInfoGroupsCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: []interface{}{"xinfo", "groups", stream}, - }, - } -} - -func (cmd *XInfoGroupsCmd) SetVal(val []XInfoGroup) { - cmd.val = val -} - -func (cmd *XInfoGroupsCmd) Val() []XInfoGroup { - return cmd.val -} - -func (cmd *XInfoGroupsCmd) Result() ([]XInfoGroup, error) { - return cmd.val, cmd.err -} - -func (cmd *XInfoGroupsCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *XInfoGroupsCmd) readReply(rd *proto.Reader) error { - n, err := rd.ReadArrayLen() - if err != nil { - return err - } - - cmd.val = make([]XInfoGroup, n) - - for i := 0; i < n; i++ { - cmd.val[i], err = readXGroupInfo(rd) - if err != nil { - return err - } - } - - return nil -} - -func readXGroupInfo(rd *proto.Reader) (XInfoGroup, error) { - var group XInfoGroup - - n, err := rd.ReadArrayLen() - if err != nil { - return group, err - } - if n != 8 { - return group, fmt.Errorf("redis: got %d elements in XINFO GROUPS reply, wanted 8", n) - } - - for i := 0; i < 4; i++ { - key, err := rd.ReadString() - if err != nil { - return group, err - } - - val, err := rd.ReadString() - if err != nil { - return group, err - } - - switch key { - case "name": - group.Name = val - case "consumers": - group.Consumers, err = strconv.ParseInt(val, 0, 64) - if err != nil { - return group, err - } - case "pending": - group.Pending, err = strconv.ParseInt(val, 0, 64) - if err != nil { - return group, err - } - case "last-delivered-id": - group.LastDeliveredID = val - default: - return group, fmt.Errorf("redis: unexpected content %s in XINFO GROUPS reply", key) - } - } - - return group, nil -} - -//------------------------------------------------------------------------------ - -type XInfoStreamCmd struct { - baseCmd - val *XInfoStream -} - -type XInfoStream struct { - Length int64 - RadixTreeKeys int64 - RadixTreeNodes int64 - Groups int64 - LastGeneratedID string - FirstEntry XMessage - LastEntry XMessage -} - -var _ Cmder = (*XInfoStreamCmd)(nil) - -func NewXInfoStreamCmd(ctx context.Context, stream string) *XInfoStreamCmd { - return &XInfoStreamCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: []interface{}{"xinfo", "stream", stream}, - }, - } -} - -func (cmd *XInfoStreamCmd) SetVal(val *XInfoStream) { - cmd.val = val -} - -func (cmd *XInfoStreamCmd) Val() *XInfoStream { - return cmd.val -} - -func (cmd *XInfoStreamCmd) Result() (*XInfoStream, error) { - return cmd.val, cmd.err -} - -func (cmd *XInfoStreamCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *XInfoStreamCmd) readReply(rd *proto.Reader) error { - v, err := rd.ReadReply(xStreamInfoParser) - if err != nil { - return err - } - cmd.val = v.(*XInfoStream) - return nil -} - -func xStreamInfoParser(rd *proto.Reader, n int64) (interface{}, error) { - if n != 14 { - return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM reply,"+ - "wanted 14", n) - } - var info XInfoStream - for i := 0; i < 7; i++ { - key, err := rd.ReadString() - if err != nil { - return nil, err - } - switch key { - case "length": - info.Length, err = rd.ReadIntReply() - case "radix-tree-keys": - info.RadixTreeKeys, err = rd.ReadIntReply() - case "radix-tree-nodes": - info.RadixTreeNodes, err = rd.ReadIntReply() - case "groups": - info.Groups, err = rd.ReadIntReply() - case "last-generated-id": - info.LastGeneratedID, err = rd.ReadString() - case "first-entry": - info.FirstEntry, err = readXMessage(rd) - if err == Nil { - err = nil - } - case "last-entry": - info.LastEntry, err = readXMessage(rd) - if err == Nil { - err = nil - } - default: - return nil, fmt.Errorf("redis: unexpected content %s "+ - "in XINFO STREAM reply", key) - } - if err != nil { - return nil, err - } - } - return &info, nil -} - -//------------------------------------------------------------------------------ - -type XInfoStreamFullCmd struct { - baseCmd - val *XInfoStreamFull -} - -type XInfoStreamFull struct { - Length int64 - RadixTreeKeys int64 - RadixTreeNodes int64 - LastGeneratedID string - Entries []XMessage - Groups []XInfoStreamGroup -} - -type XInfoStreamGroup struct { - Name string - LastDeliveredID string - PelCount int64 - Pending []XInfoStreamGroupPending - Consumers []XInfoStreamConsumer -} - -type XInfoStreamGroupPending struct { - ID string - Consumer string - DeliveryTime time.Time - DeliveryCount int64 -} - -type XInfoStreamConsumer struct { - Name string - SeenTime time.Time - PelCount int64 - Pending []XInfoStreamConsumerPending -} - -type XInfoStreamConsumerPending struct { - ID string - DeliveryTime time.Time - DeliveryCount int64 -} - -var _ Cmder = (*XInfoStreamFullCmd)(nil) - -func NewXInfoStreamFullCmd(ctx context.Context, args ...interface{}) *XInfoStreamFullCmd { - return &XInfoStreamFullCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *XInfoStreamFullCmd) SetVal(val *XInfoStreamFull) { - cmd.val = val -} - -func (cmd *XInfoStreamFullCmd) Val() *XInfoStreamFull { - return cmd.val -} - -func (cmd *XInfoStreamFullCmd) Result() (*XInfoStreamFull, error) { - return cmd.val, cmd.err -} - -func (cmd *XInfoStreamFullCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *XInfoStreamFullCmd) readReply(rd *proto.Reader) error { - n, err := rd.ReadArrayLen() - if err != nil { - return err - } - if n != 12 { - return fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+ - "wanted 12", n) - } - - cmd.val = &XInfoStreamFull{} - - for i := 0; i < 6; i++ { - key, err := rd.ReadString() - if err != nil { - return err - } - - switch key { - case "length": - cmd.val.Length, err = rd.ReadIntReply() - case "radix-tree-keys": - cmd.val.RadixTreeKeys, err = rd.ReadIntReply() - case "radix-tree-nodes": - cmd.val.RadixTreeNodes, err = rd.ReadIntReply() - case "last-generated-id": - cmd.val.LastGeneratedID, err = rd.ReadString() - case "entries": - cmd.val.Entries, err = readXMessageSlice(rd) - case "groups": - cmd.val.Groups, err = readStreamGroups(rd) - default: - return fmt.Errorf("redis: unexpected content %s "+ - "in XINFO STREAM reply", key) - } - if err != nil { - return err - } - } - return nil -} - -func readStreamGroups(rd *proto.Reader) ([]XInfoStreamGroup, error) { - n, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - groups := make([]XInfoStreamGroup, 0, n) - for i := 0; i < n; i++ { - nn, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - if nn != 10 { - return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+ - "wanted 10", nn) - } - - group := XInfoStreamGroup{} - - for f := 0; f < 5; f++ { - key, err := rd.ReadString() - if err != nil { - return nil, err - } - - switch key { - case "name": - group.Name, err = rd.ReadString() - case "last-delivered-id": - group.LastDeliveredID, err = rd.ReadString() - case "pel-count": - group.PelCount, err = rd.ReadIntReply() - case "pending": - group.Pending, err = readXInfoStreamGroupPending(rd) - case "consumers": - group.Consumers, err = readXInfoStreamConsumers(rd) - default: - return nil, fmt.Errorf("redis: unexpected content %s "+ - "in XINFO STREAM reply", key) - } - - if err != nil { - return nil, err - } - } - - groups = append(groups, group) - } - - return groups, nil -} - -func readXInfoStreamGroupPending(rd *proto.Reader) ([]XInfoStreamGroupPending, error) { - n, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - - pending := make([]XInfoStreamGroupPending, 0, n) - - for i := 0; i < n; i++ { - nn, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - if nn != 4 { - return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+ - "wanted 4", nn) - } - - p := XInfoStreamGroupPending{} - - p.ID, err = rd.ReadString() - if err != nil { - return nil, err - } - - p.Consumer, err = rd.ReadString() - if err != nil { - return nil, err - } - - delivery, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond)) - - p.DeliveryCount, err = rd.ReadIntReply() - if err != nil { - return nil, err - } - - pending = append(pending, p) - } - - return pending, nil -} - -func readXInfoStreamConsumers(rd *proto.Reader) ([]XInfoStreamConsumer, error) { - n, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - - consumers := make([]XInfoStreamConsumer, 0, n) - - for i := 0; i < n; i++ { - nn, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - if nn != 8 { - return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+ - "wanted 8", nn) - } - - c := XInfoStreamConsumer{} - - for f := 0; f < 4; f++ { - cKey, err := rd.ReadString() - if err != nil { - return nil, err - } - - switch cKey { - case "name": - c.Name, err = rd.ReadString() - case "seen-time": - seen, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - c.SeenTime = time.Unix(seen/1000, seen%1000*int64(time.Millisecond)) - case "pel-count": - c.PelCount, err = rd.ReadIntReply() - case "pending": - pendingNumber, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - - c.Pending = make([]XInfoStreamConsumerPending, 0, pendingNumber) - - for pn := 0; pn < pendingNumber; pn++ { - nn, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - if nn != 3 { - return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM reply,"+ - "wanted 3", nn) - } - - p := XInfoStreamConsumerPending{} - - p.ID, err = rd.ReadString() - if err != nil { - return nil, err - } - - delivery, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond)) - - p.DeliveryCount, err = rd.ReadIntReply() - if err != nil { - return nil, err - } - - c.Pending = append(c.Pending, p) - } - default: - return nil, fmt.Errorf("redis: unexpected content %s "+ - "in XINFO STREAM reply", cKey) - } - if err != nil { - return nil, err - } - } - consumers = append(consumers, c) - } - - return consumers, nil -} - -//------------------------------------------------------------------------------ - -type ZSliceCmd struct { - baseCmd - - val []Z -} - -var _ Cmder = (*ZSliceCmd)(nil) - -func NewZSliceCmd(ctx context.Context, args ...interface{}) *ZSliceCmd { - return &ZSliceCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *ZSliceCmd) SetVal(val []Z) { - cmd.val = val -} - -func (cmd *ZSliceCmd) Val() []Z { - return cmd.val -} - -func (cmd *ZSliceCmd) Result() ([]Z, error) { - return cmd.val, cmd.err -} - -func (cmd *ZSliceCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *ZSliceCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.val = make([]Z, n/2) - for i := 0; i < len(cmd.val); i++ { - member, err := rd.ReadString() - if err != nil { - return nil, err - } - - score, err := rd.ReadFloatReply() - if err != nil { - return nil, err - } - - cmd.val[i] = Z{ - Member: member, - Score: score, - } - } - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type ZWithKeyCmd struct { - baseCmd - - val *ZWithKey -} - -var _ Cmder = (*ZWithKeyCmd)(nil) - -func NewZWithKeyCmd(ctx context.Context, args ...interface{}) *ZWithKeyCmd { - return &ZWithKeyCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *ZWithKeyCmd) SetVal(val *ZWithKey) { - cmd.val = val -} - -func (cmd *ZWithKeyCmd) Val() *ZWithKey { - return cmd.val -} - -func (cmd *ZWithKeyCmd) Result() (*ZWithKey, error) { - return cmd.Val(), cmd.Err() -} - -func (cmd *ZWithKeyCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *ZWithKeyCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - if n != 3 { - return nil, fmt.Errorf("got %d elements, expected 3", n) - } - - cmd.val = &ZWithKey{} - var err error - - cmd.val.Key, err = rd.ReadString() - if err != nil { - return nil, err - } - - cmd.val.Member, err = rd.ReadString() - if err != nil { - return nil, err - } - - cmd.val.Score, err = rd.ReadFloatReply() - if err != nil { - return nil, err - } - - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type ScanCmd struct { - baseCmd - - page []string - cursor uint64 - - process cmdable -} - -var _ Cmder = (*ScanCmd)(nil) - -func NewScanCmd(ctx context.Context, process cmdable, args ...interface{}) *ScanCmd { - return &ScanCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - process: process, - } -} - -func (cmd *ScanCmd) SetVal(page []string, cursor uint64) { - cmd.page = page - cmd.cursor = cursor -} - -func (cmd *ScanCmd) Val() (keys []string, cursor uint64) { - return cmd.page, cmd.cursor -} - -func (cmd *ScanCmd) Result() (keys []string, cursor uint64, err error) { - return cmd.page, cmd.cursor, cmd.err -} - -func (cmd *ScanCmd) String() string { - return cmdString(cmd, cmd.page) -} - -func (cmd *ScanCmd) readReply(rd *proto.Reader) (err error) { - cmd.page, cmd.cursor, err = rd.ReadScanReply() - return err -} - -// Iterator creates a new ScanIterator. -func (cmd *ScanCmd) Iterator() *ScanIterator { - return &ScanIterator{ - cmd: cmd, - } -} - -//------------------------------------------------------------------------------ - -type ClusterNode struct { - ID string - Addr string -} - -type ClusterSlot struct { - Start int - End int - Nodes []ClusterNode -} - -type ClusterSlotsCmd struct { - baseCmd - - val []ClusterSlot -} - -var _ Cmder = (*ClusterSlotsCmd)(nil) - -func NewClusterSlotsCmd(ctx context.Context, args ...interface{}) *ClusterSlotsCmd { - return &ClusterSlotsCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *ClusterSlotsCmd) SetVal(val []ClusterSlot) { - cmd.val = val -} - -func (cmd *ClusterSlotsCmd) Val() []ClusterSlot { - return cmd.val -} - -func (cmd *ClusterSlotsCmd) Result() ([]ClusterSlot, error) { - return cmd.Val(), cmd.Err() -} - -func (cmd *ClusterSlotsCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *ClusterSlotsCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.val = make([]ClusterSlot, n) - for i := 0; i < len(cmd.val); i++ { - n, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - if n < 2 { - err := fmt.Errorf("redis: got %d elements in cluster info, expected at least 2", n) - return nil, err - } - - start, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - - end, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - - nodes := make([]ClusterNode, n-2) - for j := 0; j < len(nodes); j++ { - n, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - if n != 2 && n != 3 { - err := fmt.Errorf("got %d elements in cluster info address, expected 2 or 3", n) - return nil, err - } - - ip, err := rd.ReadString() - if err != nil { - return nil, err - } - - port, err := rd.ReadString() - if err != nil { - return nil, err - } - - nodes[j].Addr = net.JoinHostPort(ip, port) - - if n == 3 { - id, err := rd.ReadString() - if err != nil { - return nil, err - } - nodes[j].ID = id - } - } - - cmd.val[i] = ClusterSlot{ - Start: int(start), - End: int(end), - Nodes: nodes, - } - } - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -// GeoLocation is used with GeoAdd to add geospatial location. -type GeoLocation struct { - Name string - Longitude, Latitude, Dist float64 - GeoHash int64 -} - -// GeoRadiusQuery is used with GeoRadius to query geospatial index. -type GeoRadiusQuery struct { - Radius float64 - // Can be m, km, ft, or mi. Default is km. - Unit string - WithCoord bool - WithDist bool - WithGeoHash bool - Count int - // Can be ASC or DESC. Default is no sort order. - Sort string - Store string - StoreDist string -} - -type GeoLocationCmd struct { - baseCmd - - q *GeoRadiusQuery - locations []GeoLocation -} - -var _ Cmder = (*GeoLocationCmd)(nil) - -func NewGeoLocationCmd(ctx context.Context, q *GeoRadiusQuery, args ...interface{}) *GeoLocationCmd { - return &GeoLocationCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: geoLocationArgs(q, args...), - }, - q: q, - } -} - -func geoLocationArgs(q *GeoRadiusQuery, args ...interface{}) []interface{} { - args = append(args, q.Radius) - if q.Unit != "" { - args = append(args, q.Unit) - } else { - args = append(args, "km") - } - if q.WithCoord { - args = append(args, "withcoord") - } - if q.WithDist { - args = append(args, "withdist") - } - if q.WithGeoHash { - args = append(args, "withhash") - } - if q.Count > 0 { - args = append(args, "count", q.Count) - } - if q.Sort != "" { - args = append(args, q.Sort) - } - if q.Store != "" { - args = append(args, "store") - args = append(args, q.Store) - } - if q.StoreDist != "" { - args = append(args, "storedist") - args = append(args, q.StoreDist) - } - return args -} - -func (cmd *GeoLocationCmd) SetVal(locations []GeoLocation) { - cmd.locations = locations -} - -func (cmd *GeoLocationCmd) Val() []GeoLocation { - return cmd.locations -} - -func (cmd *GeoLocationCmd) Result() ([]GeoLocation, error) { - return cmd.locations, cmd.err -} - -func (cmd *GeoLocationCmd) String() string { - return cmdString(cmd, cmd.locations) -} - -func (cmd *GeoLocationCmd) readReply(rd *proto.Reader) error { - v, err := rd.ReadArrayReply(newGeoLocationSliceParser(cmd.q)) - if err != nil { - return err - } - cmd.locations = v.([]GeoLocation) - return nil -} - -func newGeoLocationSliceParser(q *GeoRadiusQuery) proto.MultiBulkParse { - return func(rd *proto.Reader, n int64) (interface{}, error) { - locs := make([]GeoLocation, 0, n) - for i := int64(0); i < n; i++ { - v, err := rd.ReadReply(newGeoLocationParser(q)) - if err != nil { - return nil, err - } - switch vv := v.(type) { - case string: - locs = append(locs, GeoLocation{ - Name: vv, - }) - case *GeoLocation: - // TODO: avoid copying - locs = append(locs, *vv) - default: - return nil, fmt.Errorf("got %T, expected string or *GeoLocation", v) - } - } - return locs, nil - } -} - -func newGeoLocationParser(q *GeoRadiusQuery) proto.MultiBulkParse { - return func(rd *proto.Reader, n int64) (interface{}, error) { - var loc GeoLocation - var err error - - loc.Name, err = rd.ReadString() - if err != nil { - return nil, err - } - if q.WithDist { - loc.Dist, err = rd.ReadFloatReply() - if err != nil { - return nil, err - } - } - if q.WithGeoHash { - loc.GeoHash, err = rd.ReadIntReply() - if err != nil { - return nil, err - } - } - if q.WithCoord { - n, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - if n != 2 { - return nil, fmt.Errorf("got %d coordinates, expected 2", n) - } - - loc.Longitude, err = rd.ReadFloatReply() - if err != nil { - return nil, err - } - loc.Latitude, err = rd.ReadFloatReply() - if err != nil { - return nil, err - } - } - - return &loc, nil - } -} - -//------------------------------------------------------------------------------ - -// GeoSearchQuery is used for GEOSearch/GEOSearchStore command query. -type GeoSearchQuery struct { - Member string - - // Latitude and Longitude when using FromLonLat option. - Longitude float64 - Latitude float64 - - // Distance and unit when using ByRadius option. - // Can use m, km, ft, or mi. Default is km. - Radius float64 - RadiusUnit string - - // Height, width and unit when using ByBox option. - // Can be m, km, ft, or mi. Default is km. - BoxWidth float64 - BoxHeight float64 - BoxUnit string - - // Can be ASC or DESC. Default is no sort order. - Sort string - Count int - CountAny bool -} - -type GeoSearchLocationQuery struct { - GeoSearchQuery - - WithCoord bool - WithDist bool - WithHash bool -} - -type GeoSearchStoreQuery struct { - GeoSearchQuery - - // When using the StoreDist option, the command stores the items in a - // sorted set populated with their distance from the center of the circle or box, - // as a floating-point number, in the same unit specified for that shape. - StoreDist bool -} - -func geoSearchLocationArgs(q *GeoSearchLocationQuery, args []interface{}) []interface{} { - args = geoSearchArgs(&q.GeoSearchQuery, args) - - if q.WithCoord { - args = append(args, "withcoord") - } - if q.WithDist { - args = append(args, "withdist") - } - if q.WithHash { - args = append(args, "withhash") - } - - return args -} - -func geoSearchArgs(q *GeoSearchQuery, args []interface{}) []interface{} { - if q.Member != "" { - args = append(args, "frommember", q.Member) - } else { - args = append(args, "fromlonlat", q.Longitude, q.Latitude) - } - - if q.Radius > 0 { - if q.RadiusUnit == "" { - q.RadiusUnit = "km" - } - args = append(args, "byradius", q.Radius, q.RadiusUnit) - } else { - if q.BoxUnit == "" { - q.BoxUnit = "km" - } - args = append(args, "bybox", q.BoxWidth, q.BoxHeight, q.BoxUnit) - } - - if q.Sort != "" { - args = append(args, q.Sort) - } - - if q.Count > 0 { - args = append(args, "count", q.Count) - if q.CountAny { - args = append(args, "any") - } - } - - return args -} - -type GeoSearchLocationCmd struct { - baseCmd - - opt *GeoSearchLocationQuery - val []GeoLocation -} - -var _ Cmder = (*GeoSearchLocationCmd)(nil) - -func NewGeoSearchLocationCmd( - ctx context.Context, opt *GeoSearchLocationQuery, args ...interface{}, -) *GeoSearchLocationCmd { - return &GeoSearchLocationCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - opt: opt, - } -} - -func (cmd *GeoSearchLocationCmd) SetVal(val []GeoLocation) { - cmd.val = val -} - -func (cmd *GeoSearchLocationCmd) Val() []GeoLocation { - return cmd.val -} - -func (cmd *GeoSearchLocationCmd) Result() ([]GeoLocation, error) { - return cmd.val, cmd.err -} - -func (cmd *GeoSearchLocationCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *GeoSearchLocationCmd) readReply(rd *proto.Reader) error { - n, err := rd.ReadArrayLen() - if err != nil { - return err - } - - cmd.val = make([]GeoLocation, n) - for i := 0; i < n; i++ { - _, err = rd.ReadArrayLen() - if err != nil { - return err - } - - var loc GeoLocation - - loc.Name, err = rd.ReadString() - if err != nil { - return err - } - if cmd.opt.WithDist { - loc.Dist, err = rd.ReadFloatReply() - if err != nil { - return err - } - } - if cmd.opt.WithHash { - loc.GeoHash, err = rd.ReadIntReply() - if err != nil { - return err - } - } - if cmd.opt.WithCoord { - nn, err := rd.ReadArrayLen() - if err != nil { - return err - } - if nn != 2 { - return fmt.Errorf("got %d coordinates, expected 2", nn) - } - - loc.Longitude, err = rd.ReadFloatReply() - if err != nil { - return err - } - loc.Latitude, err = rd.ReadFloatReply() - if err != nil { - return err - } - } - - cmd.val[i] = loc - } - - return nil -} - -//------------------------------------------------------------------------------ - -type GeoPos struct { - Longitude, Latitude float64 -} - -type GeoPosCmd struct { - baseCmd - - val []*GeoPos -} - -var _ Cmder = (*GeoPosCmd)(nil) - -func NewGeoPosCmd(ctx context.Context, args ...interface{}) *GeoPosCmd { - return &GeoPosCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *GeoPosCmd) SetVal(val []*GeoPos) { - cmd.val = val -} - -func (cmd *GeoPosCmd) Val() []*GeoPos { - return cmd.val -} - -func (cmd *GeoPosCmd) Result() ([]*GeoPos, error) { - return cmd.Val(), cmd.Err() -} - -func (cmd *GeoPosCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *GeoPosCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.val = make([]*GeoPos, n) - for i := 0; i < len(cmd.val); i++ { - i := i - _, err := rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) { - longitude, err := rd.ReadFloatReply() - if err != nil { - return nil, err - } - - latitude, err := rd.ReadFloatReply() - if err != nil { - return nil, err - } - - cmd.val[i] = &GeoPos{ - Longitude: longitude, - Latitude: latitude, - } - return nil, nil - }) - if err != nil { - if err == Nil { - cmd.val[i] = nil - continue - } - return nil, err - } - } - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type CommandInfo struct { - Name string - Arity int8 - Flags []string - ACLFlags []string - FirstKeyPos int8 - LastKeyPos int8 - StepCount int8 - ReadOnly bool -} - -type CommandsInfoCmd struct { - baseCmd - - val map[string]*CommandInfo -} - -var _ Cmder = (*CommandsInfoCmd)(nil) - -func NewCommandsInfoCmd(ctx context.Context, args ...interface{}) *CommandsInfoCmd { - return &CommandsInfoCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *CommandsInfoCmd) SetVal(val map[string]*CommandInfo) { - cmd.val = val -} - -func (cmd *CommandsInfoCmd) Val() map[string]*CommandInfo { - return cmd.val -} - -func (cmd *CommandsInfoCmd) Result() (map[string]*CommandInfo, error) { - return cmd.Val(), cmd.Err() -} - -func (cmd *CommandsInfoCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *CommandsInfoCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.val = make(map[string]*CommandInfo, n) - for i := int64(0); i < n; i++ { - v, err := rd.ReadReply(commandInfoParser) - if err != nil { - return nil, err - } - vv := v.(*CommandInfo) - cmd.val[vv.Name] = vv - } - return nil, nil - }) - return err -} - -func commandInfoParser(rd *proto.Reader, n int64) (interface{}, error) { - const numArgRedis5 = 6 - const numArgRedis6 = 7 - - switch n { - case numArgRedis5, numArgRedis6: - // continue - default: - return nil, fmt.Errorf("redis: got %d elements in COMMAND reply, wanted 7", n) - } - - var cmd CommandInfo - var err error - - cmd.Name, err = rd.ReadString() - if err != nil { - return nil, err - } - - arity, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - cmd.Arity = int8(arity) - - _, err = rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.Flags = make([]string, n) - for i := 0; i < len(cmd.Flags); i++ { - switch s, err := rd.ReadString(); { - case err == Nil: - cmd.Flags[i] = "" - case err != nil: - return nil, err - default: - cmd.Flags[i] = s - } - } - return nil, nil - }) - if err != nil { - return nil, err - } - - firstKeyPos, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - cmd.FirstKeyPos = int8(firstKeyPos) - - lastKeyPos, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - cmd.LastKeyPos = int8(lastKeyPos) - - stepCount, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - cmd.StepCount = int8(stepCount) - - for _, flag := range cmd.Flags { - if flag == "readonly" { - cmd.ReadOnly = true - break - } - } - - if n == numArgRedis5 { - return &cmd, nil - } - - _, err = rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.ACLFlags = make([]string, n) - for i := 0; i < len(cmd.ACLFlags); i++ { - switch s, err := rd.ReadString(); { - case err == Nil: - cmd.ACLFlags[i] = "" - case err != nil: - return nil, err - default: - cmd.ACLFlags[i] = s - } - } - return nil, nil - }) - if err != nil { - return nil, err - } - - return &cmd, nil -} - -//------------------------------------------------------------------------------ - -type cmdsInfoCache struct { - fn func(ctx context.Context) (map[string]*CommandInfo, error) - - once internal.Once - cmds map[string]*CommandInfo -} - -func newCmdsInfoCache(fn func(ctx context.Context) (map[string]*CommandInfo, error)) *cmdsInfoCache { - return &cmdsInfoCache{ - fn: fn, - } -} - -func (c *cmdsInfoCache) Get(ctx context.Context) (map[string]*CommandInfo, error) { - err := c.once.Do(func() error { - cmds, err := c.fn(ctx) - if err != nil { - return err - } - - // Extensions have cmd names in upper case. Convert them to lower case. - for k, v := range cmds { - lower := internal.ToLower(k) - if lower != k { - cmds[lower] = v - } - } - - c.cmds = cmds - return nil - }) - return c.cmds, err -} - -//------------------------------------------------------------------------------ - -type SlowLog struct { - ID int64 - Time time.Time - Duration time.Duration - Args []string - // These are also optional fields emitted only by Redis 4.0 or greater: - // https://redis.io/commands/slowlog#output-format - ClientAddr string - ClientName string -} - -type SlowLogCmd struct { - baseCmd - - val []SlowLog -} - -var _ Cmder = (*SlowLogCmd)(nil) - -func NewSlowLogCmd(ctx context.Context, args ...interface{}) *SlowLogCmd { - return &SlowLogCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *SlowLogCmd) SetVal(val []SlowLog) { - cmd.val = val -} - -func (cmd *SlowLogCmd) Val() []SlowLog { - return cmd.val -} - -func (cmd *SlowLogCmd) Result() ([]SlowLog, error) { - return cmd.Val(), cmd.Err() -} - -func (cmd *SlowLogCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *SlowLogCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.val = make([]SlowLog, n) - for i := 0; i < len(cmd.val); i++ { - n, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - if n < 4 { - err := fmt.Errorf("redis: got %d elements in slowlog get, expected at least 4", n) - return nil, err - } - - id, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - - createdAt, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - createdAtTime := time.Unix(createdAt, 0) - - costs, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - costsDuration := time.Duration(costs) * time.Microsecond - - cmdLen, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - if cmdLen < 1 { - err := fmt.Errorf("redis: got %d elements commands reply in slowlog get, expected at least 1", cmdLen) - return nil, err - } - - cmdString := make([]string, cmdLen) - for i := 0; i < cmdLen; i++ { - cmdString[i], err = rd.ReadString() - if err != nil { - return nil, err - } - } - - var address, name string - for i := 4; i < n; i++ { - str, err := rd.ReadString() - if err != nil { - return nil, err - } - if i == 4 { - address = str - } else if i == 5 { - name = str - } - } - - cmd.val[i] = SlowLog{ - ID: id, - Time: createdAtTime, - Duration: costsDuration, - Args: cmdString, - ClientAddr: address, - ClientName: name, - } - } - return nil, nil - }) - return err -} diff --git a/vendor/github.com/go-redis/redis/v8/commands.go b/vendor/github.com/go-redis/redis/v8/commands.go deleted file mode 100644 index bbfe089df..000000000 --- a/vendor/github.com/go-redis/redis/v8/commands.go +++ /dev/null @@ -1,3475 +0,0 @@ -package redis - -import ( - "context" - "errors" - "io" - "time" - - "github.com/go-redis/redis/v8/internal" -) - -// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0, -// otherwise you will receive an error: (error) ERR syntax error. -// For example: -// -// rdb.Set(ctx, key, value, redis.KeepTTL) -const KeepTTL = -1 - -func usePrecise(dur time.Duration) bool { - return dur < time.Second || dur%time.Second != 0 -} - -func formatMs(ctx context.Context, dur time.Duration) int64 { - if dur > 0 && dur < time.Millisecond { - internal.Logger.Printf( - ctx, - "specified duration is %s, but minimal supported value is %s - truncating to 1ms", - dur, time.Millisecond, - ) - return 1 - } - return int64(dur / time.Millisecond) -} - -func formatSec(ctx context.Context, dur time.Duration) int64 { - if dur > 0 && dur < time.Second { - internal.Logger.Printf( - ctx, - "specified duration is %s, but minimal supported value is %s - truncating to 1s", - dur, time.Second, - ) - return 1 - } - return int64(dur / time.Second) -} - -func appendArgs(dst, src []interface{}) []interface{} { - if len(src) == 1 { - return appendArg(dst, src[0]) - } - - dst = append(dst, src...) - return dst -} - -func appendArg(dst []interface{}, arg interface{}) []interface{} { - switch arg := arg.(type) { - case []string: - for _, s := range arg { - dst = append(dst, s) - } - return dst - case []interface{}: - dst = append(dst, arg...) - return dst - case map[string]interface{}: - for k, v := range arg { - dst = append(dst, k, v) - } - return dst - case map[string]string: - for k, v := range arg { - dst = append(dst, k, v) - } - return dst - default: - return append(dst, arg) - } -} - -type Cmdable interface { - Pipeline() Pipeliner - Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) - - TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) - TxPipeline() Pipeliner - - Command(ctx context.Context) *CommandsInfoCmd - ClientGetName(ctx context.Context) *StringCmd - Echo(ctx context.Context, message interface{}) *StringCmd - Ping(ctx context.Context) *StatusCmd - Quit(ctx context.Context) *StatusCmd - Del(ctx context.Context, keys ...string) *IntCmd - Unlink(ctx context.Context, keys ...string) *IntCmd - Dump(ctx context.Context, key string) *StringCmd - Exists(ctx context.Context, keys ...string) *IntCmd - Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd - ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd - ExpireNX(ctx context.Context, key string, expiration time.Duration) *BoolCmd - ExpireXX(ctx context.Context, key string, expiration time.Duration) *BoolCmd - ExpireGT(ctx context.Context, key string, expiration time.Duration) *BoolCmd - ExpireLT(ctx context.Context, key string, expiration time.Duration) *BoolCmd - Keys(ctx context.Context, pattern string) *StringSliceCmd - Migrate(ctx context.Context, host, port, key string, db int, timeout time.Duration) *StatusCmd - Move(ctx context.Context, key string, db int) *BoolCmd - ObjectRefCount(ctx context.Context, key string) *IntCmd - ObjectEncoding(ctx context.Context, key string) *StringCmd - ObjectIdleTime(ctx context.Context, key string) *DurationCmd - Persist(ctx context.Context, key string) *BoolCmd - PExpire(ctx context.Context, key string, expiration time.Duration) *BoolCmd - PExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd - PTTL(ctx context.Context, key string) *DurationCmd - RandomKey(ctx context.Context) *StringCmd - Rename(ctx context.Context, key, newkey string) *StatusCmd - RenameNX(ctx context.Context, key, newkey string) *BoolCmd - Restore(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd - RestoreReplace(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd - Sort(ctx context.Context, key string, sort *Sort) *StringSliceCmd - SortStore(ctx context.Context, key, store string, sort *Sort) *IntCmd - SortInterfaces(ctx context.Context, key string, sort *Sort) *SliceCmd - Touch(ctx context.Context, keys ...string) *IntCmd - TTL(ctx context.Context, key string) *DurationCmd - Type(ctx context.Context, key string) *StatusCmd - Append(ctx context.Context, key, value string) *IntCmd - Decr(ctx context.Context, key string) *IntCmd - DecrBy(ctx context.Context, key string, decrement int64) *IntCmd - Get(ctx context.Context, key string) *StringCmd - GetRange(ctx context.Context, key string, start, end int64) *StringCmd - GetSet(ctx context.Context, key string, value interface{}) *StringCmd - GetEx(ctx context.Context, key string, expiration time.Duration) *StringCmd - GetDel(ctx context.Context, key string) *StringCmd - Incr(ctx context.Context, key string) *IntCmd - IncrBy(ctx context.Context, key string, value int64) *IntCmd - IncrByFloat(ctx context.Context, key string, value float64) *FloatCmd - MGet(ctx context.Context, keys ...string) *SliceCmd - MSet(ctx context.Context, values ...interface{}) *StatusCmd - MSetNX(ctx context.Context, values ...interface{}) *BoolCmd - Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd - SetArgs(ctx context.Context, key string, value interface{}, a SetArgs) *StatusCmd - // TODO: rename to SetEx - SetEX(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd - SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd - SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd - SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd - StrLen(ctx context.Context, key string) *IntCmd - Copy(ctx context.Context, sourceKey string, destKey string, db int, replace bool) *IntCmd - - GetBit(ctx context.Context, key string, offset int64) *IntCmd - SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd - BitCount(ctx context.Context, key string, bitCount *BitCount) *IntCmd - BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd - BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd - BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd - BitOpNot(ctx context.Context, destKey string, key string) *IntCmd - BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd - BitField(ctx context.Context, key string, args ...interface{}) *IntSliceCmd - - Scan(ctx context.Context, cursor uint64, match string, count int64) *ScanCmd - ScanType(ctx context.Context, cursor uint64, match string, count int64, keyType string) *ScanCmd - SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd - HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd - ZScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd - - HDel(ctx context.Context, key string, fields ...string) *IntCmd - HExists(ctx context.Context, key, field string) *BoolCmd - HGet(ctx context.Context, key, field string) *StringCmd - HGetAll(ctx context.Context, key string) *StringStringMapCmd - HIncrBy(ctx context.Context, key, field string, incr int64) *IntCmd - HIncrByFloat(ctx context.Context, key, field string, incr float64) *FloatCmd - HKeys(ctx context.Context, key string) *StringSliceCmd - HLen(ctx context.Context, key string) *IntCmd - HMGet(ctx context.Context, key string, fields ...string) *SliceCmd - HSet(ctx context.Context, key string, values ...interface{}) *IntCmd - HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd - HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd - HVals(ctx context.Context, key string) *StringSliceCmd - HRandField(ctx context.Context, key string, count int, withValues bool) *StringSliceCmd - - BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd - BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd - BRPopLPush(ctx context.Context, source, destination string, timeout time.Duration) *StringCmd - LIndex(ctx context.Context, key string, index int64) *StringCmd - LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd - LInsertBefore(ctx context.Context, key string, pivot, value interface{}) *IntCmd - LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd - LLen(ctx context.Context, key string) *IntCmd - LPop(ctx context.Context, key string) *StringCmd - LPopCount(ctx context.Context, key string, count int) *StringSliceCmd - LPos(ctx context.Context, key string, value string, args LPosArgs) *IntCmd - LPosCount(ctx context.Context, key string, value string, count int64, args LPosArgs) *IntSliceCmd - LPush(ctx context.Context, key string, values ...interface{}) *IntCmd - LPushX(ctx context.Context, key string, values ...interface{}) *IntCmd - LRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd - LRem(ctx context.Context, key string, count int64, value interface{}) *IntCmd - LSet(ctx context.Context, key string, index int64, value interface{}) *StatusCmd - LTrim(ctx context.Context, key string, start, stop int64) *StatusCmd - RPop(ctx context.Context, key string) *StringCmd - RPopCount(ctx context.Context, key string, count int) *StringSliceCmd - RPopLPush(ctx context.Context, source, destination string) *StringCmd - RPush(ctx context.Context, key string, values ...interface{}) *IntCmd - RPushX(ctx context.Context, key string, values ...interface{}) *IntCmd - LMove(ctx context.Context, source, destination, srcpos, destpos string) *StringCmd - BLMove(ctx context.Context, source, destination, srcpos, destpos string, timeout time.Duration) *StringCmd - - SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd - SCard(ctx context.Context, key string) *IntCmd - SDiff(ctx context.Context, keys ...string) *StringSliceCmd - SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd - SInter(ctx context.Context, keys ...string) *StringSliceCmd - SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd - SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd - SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd - SMembers(ctx context.Context, key string) *StringSliceCmd - SMembersMap(ctx context.Context, key string) *StringStructMapCmd - SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd - SPop(ctx context.Context, key string) *StringCmd - SPopN(ctx context.Context, key string, count int64) *StringSliceCmd - SRandMember(ctx context.Context, key string) *StringCmd - SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd - SRem(ctx context.Context, key string, members ...interface{}) *IntCmd - SUnion(ctx context.Context, keys ...string) *StringSliceCmd - SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd - - XAdd(ctx context.Context, a *XAddArgs) *StringCmd - XDel(ctx context.Context, stream string, ids ...string) *IntCmd - XLen(ctx context.Context, stream string) *IntCmd - XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd - XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd - XRevRange(ctx context.Context, stream string, start, stop string) *XMessageSliceCmd - XRevRangeN(ctx context.Context, stream string, start, stop string, count int64) *XMessageSliceCmd - XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd - XReadStreams(ctx context.Context, streams ...string) *XStreamSliceCmd - XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd - XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd - XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd - XGroupDestroy(ctx context.Context, stream, group string) *IntCmd - XGroupCreateConsumer(ctx context.Context, stream, group, consumer string) *IntCmd - XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd - XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd - XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd - XPending(ctx context.Context, stream, group string) *XPendingCmd - XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd - XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd - XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd - XAutoClaim(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimCmd - XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd - - // TODO: XTrim and XTrimApprox remove in v9. - XTrim(ctx context.Context, key string, maxLen int64) *IntCmd - XTrimApprox(ctx context.Context, key string, maxLen int64) *IntCmd - XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd - XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd - XTrimMinID(ctx context.Context, key string, minID string) *IntCmd - XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd - XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd - XInfoStream(ctx context.Context, key string) *XInfoStreamCmd - XInfoStreamFull(ctx context.Context, key string, count int) *XInfoStreamFullCmd - XInfoConsumers(ctx context.Context, key string, group string) *XInfoConsumersCmd - - BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd - BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd - - // TODO: remove - // ZAddCh - // ZIncr - // ZAddNXCh - // ZAddXXCh - // ZIncrNX - // ZIncrXX - // in v9. - // use ZAddArgs and ZAddArgsIncr. - - ZAdd(ctx context.Context, key string, members ...*Z) *IntCmd - ZAddNX(ctx context.Context, key string, members ...*Z) *IntCmd - ZAddXX(ctx context.Context, key string, members ...*Z) *IntCmd - ZAddCh(ctx context.Context, key string, members ...*Z) *IntCmd - ZAddNXCh(ctx context.Context, key string, members ...*Z) *IntCmd - ZAddXXCh(ctx context.Context, key string, members ...*Z) *IntCmd - ZAddArgs(ctx context.Context, key string, args ZAddArgs) *IntCmd - ZAddArgsIncr(ctx context.Context, key string, args ZAddArgs) *FloatCmd - ZIncr(ctx context.Context, key string, member *Z) *FloatCmd - ZIncrNX(ctx context.Context, key string, member *Z) *FloatCmd - ZIncrXX(ctx context.Context, key string, member *Z) *FloatCmd - ZCard(ctx context.Context, key string) *IntCmd - ZCount(ctx context.Context, key, min, max string) *IntCmd - ZLexCount(ctx context.Context, key, min, max string) *IntCmd - ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd - ZInter(ctx context.Context, store *ZStore) *StringSliceCmd - ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd - ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd - ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd - ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd - ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd - ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd - ZRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd - ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd - ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd - ZRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd - ZRangeArgs(ctx context.Context, z ZRangeArgs) *StringSliceCmd - ZRangeArgsWithScores(ctx context.Context, z ZRangeArgs) *ZSliceCmd - ZRangeStore(ctx context.Context, dst string, z ZRangeArgs) *IntCmd - ZRank(ctx context.Context, key, member string) *IntCmd - ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd - ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd - ZRemRangeByScore(ctx context.Context, key, min, max string) *IntCmd - ZRemRangeByLex(ctx context.Context, key, min, max string) *IntCmd - ZRevRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd - ZRevRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd - ZRevRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd - ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd - ZRevRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd - ZRevRank(ctx context.Context, key, member string) *IntCmd - ZScore(ctx context.Context, key, member string) *FloatCmd - ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd - ZUnion(ctx context.Context, store ZStore) *StringSliceCmd - ZUnionWithScores(ctx context.Context, store ZStore) *ZSliceCmd - ZRandMember(ctx context.Context, key string, count int, withScores bool) *StringSliceCmd - ZDiff(ctx context.Context, keys ...string) *StringSliceCmd - ZDiffWithScores(ctx context.Context, keys ...string) *ZSliceCmd - ZDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd - - PFAdd(ctx context.Context, key string, els ...interface{}) *IntCmd - PFCount(ctx context.Context, keys ...string) *IntCmd - PFMerge(ctx context.Context, dest string, keys ...string) *StatusCmd - - BgRewriteAOF(ctx context.Context) *StatusCmd - BgSave(ctx context.Context) *StatusCmd - ClientKill(ctx context.Context, ipPort string) *StatusCmd - ClientKillByFilter(ctx context.Context, keys ...string) *IntCmd - ClientList(ctx context.Context) *StringCmd - ClientPause(ctx context.Context, dur time.Duration) *BoolCmd - ClientID(ctx context.Context) *IntCmd - ConfigGet(ctx context.Context, parameter string) *SliceCmd - ConfigResetStat(ctx context.Context) *StatusCmd - ConfigSet(ctx context.Context, parameter, value string) *StatusCmd - ConfigRewrite(ctx context.Context) *StatusCmd - DBSize(ctx context.Context) *IntCmd - FlushAll(ctx context.Context) *StatusCmd - FlushAllAsync(ctx context.Context) *StatusCmd - FlushDB(ctx context.Context) *StatusCmd - FlushDBAsync(ctx context.Context) *StatusCmd - Info(ctx context.Context, section ...string) *StringCmd - LastSave(ctx context.Context) *IntCmd - Save(ctx context.Context) *StatusCmd - Shutdown(ctx context.Context) *StatusCmd - ShutdownSave(ctx context.Context) *StatusCmd - ShutdownNoSave(ctx context.Context) *StatusCmd - SlaveOf(ctx context.Context, host, port string) *StatusCmd - Time(ctx context.Context) *TimeCmd - DebugObject(ctx context.Context, key string) *StringCmd - ReadOnly(ctx context.Context) *StatusCmd - ReadWrite(ctx context.Context) *StatusCmd - MemoryUsage(ctx context.Context, key string, samples ...int) *IntCmd - - Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd - EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd - ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd - ScriptFlush(ctx context.Context) *StatusCmd - ScriptKill(ctx context.Context) *StatusCmd - ScriptLoad(ctx context.Context, script string) *StringCmd - - Publish(ctx context.Context, channel string, message interface{}) *IntCmd - PubSubChannels(ctx context.Context, pattern string) *StringSliceCmd - PubSubNumSub(ctx context.Context, channels ...string) *StringIntMapCmd - PubSubNumPat(ctx context.Context) *IntCmd - - ClusterSlots(ctx context.Context) *ClusterSlotsCmd - ClusterNodes(ctx context.Context) *StringCmd - ClusterMeet(ctx context.Context, host, port string) *StatusCmd - ClusterForget(ctx context.Context, nodeID string) *StatusCmd - ClusterReplicate(ctx context.Context, nodeID string) *StatusCmd - ClusterResetSoft(ctx context.Context) *StatusCmd - ClusterResetHard(ctx context.Context) *StatusCmd - ClusterInfo(ctx context.Context) *StringCmd - ClusterKeySlot(ctx context.Context, key string) *IntCmd - ClusterGetKeysInSlot(ctx context.Context, slot int, count int) *StringSliceCmd - ClusterCountFailureReports(ctx context.Context, nodeID string) *IntCmd - ClusterCountKeysInSlot(ctx context.Context, slot int) *IntCmd - ClusterDelSlots(ctx context.Context, slots ...int) *StatusCmd - ClusterDelSlotsRange(ctx context.Context, min, max int) *StatusCmd - ClusterSaveConfig(ctx context.Context) *StatusCmd - ClusterSlaves(ctx context.Context, nodeID string) *StringSliceCmd - ClusterFailover(ctx context.Context) *StatusCmd - ClusterAddSlots(ctx context.Context, slots ...int) *StatusCmd - ClusterAddSlotsRange(ctx context.Context, min, max int) *StatusCmd - - GeoAdd(ctx context.Context, key string, geoLocation ...*GeoLocation) *IntCmd - GeoPos(ctx context.Context, key string, members ...string) *GeoPosCmd - GeoRadius(ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd - GeoRadiusStore(ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery) *IntCmd - GeoRadiusByMember(ctx context.Context, key, member string, query *GeoRadiusQuery) *GeoLocationCmd - GeoRadiusByMemberStore(ctx context.Context, key, member string, query *GeoRadiusQuery) *IntCmd - GeoSearch(ctx context.Context, key string, q *GeoSearchQuery) *StringSliceCmd - GeoSearchLocation(ctx context.Context, key string, q *GeoSearchLocationQuery) *GeoSearchLocationCmd - GeoSearchStore(ctx context.Context, key, store string, q *GeoSearchStoreQuery) *IntCmd - GeoDist(ctx context.Context, key string, member1, member2, unit string) *FloatCmd - GeoHash(ctx context.Context, key string, members ...string) *StringSliceCmd -} - -type StatefulCmdable interface { - Cmdable - Auth(ctx context.Context, password string) *StatusCmd - AuthACL(ctx context.Context, username, password string) *StatusCmd - Select(ctx context.Context, index int) *StatusCmd - SwapDB(ctx context.Context, index1, index2 int) *StatusCmd - ClientSetName(ctx context.Context, name string) *BoolCmd -} - -var ( - _ Cmdable = (*Client)(nil) - _ Cmdable = (*Tx)(nil) - _ Cmdable = (*Ring)(nil) - _ Cmdable = (*ClusterClient)(nil) -) - -type cmdable func(ctx context.Context, cmd Cmder) error - -type statefulCmdable func(ctx context.Context, cmd Cmder) error - -//------------------------------------------------------------------------------ - -func (c statefulCmdable) Auth(ctx context.Context, password string) *StatusCmd { - cmd := NewStatusCmd(ctx, "auth", password) - _ = c(ctx, cmd) - return cmd -} - -// AuthACL Perform an AUTH command, using the given user and pass. -// Should be used to authenticate the current connection with one of the connections defined in the ACL list -// when connecting to a Redis 6.0 instance, or greater, that is using the Redis ACL system. -func (c statefulCmdable) AuthACL(ctx context.Context, username, password string) *StatusCmd { - cmd := NewStatusCmd(ctx, "auth", username, password) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) Wait(ctx context.Context, numSlaves int, timeout time.Duration) *IntCmd { - cmd := NewIntCmd(ctx, "wait", numSlaves, int(timeout/time.Millisecond)) - cmd.setReadTimeout(timeout) - _ = c(ctx, cmd) - return cmd -} - -func (c statefulCmdable) Select(ctx context.Context, index int) *StatusCmd { - cmd := NewStatusCmd(ctx, "select", index) - _ = c(ctx, cmd) - return cmd -} - -func (c statefulCmdable) SwapDB(ctx context.Context, index1, index2 int) *StatusCmd { - cmd := NewStatusCmd(ctx, "swapdb", index1, index2) - _ = c(ctx, cmd) - return cmd -} - -// ClientSetName assigns a name to the connection. -func (c statefulCmdable) ClientSetName(ctx context.Context, name string) *BoolCmd { - cmd := NewBoolCmd(ctx, "client", "setname", name) - _ = c(ctx, cmd) - return cmd -} - -//------------------------------------------------------------------------------ - -func (c cmdable) Command(ctx context.Context) *CommandsInfoCmd { - cmd := NewCommandsInfoCmd(ctx, "command") - _ = c(ctx, cmd) - return cmd -} - -// ClientGetName returns the name of the connection. -func (c cmdable) ClientGetName(ctx context.Context) *StringCmd { - cmd := NewStringCmd(ctx, "client", "getname") - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) Echo(ctx context.Context, message interface{}) *StringCmd { - cmd := NewStringCmd(ctx, "echo", message) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) Ping(ctx context.Context) *StatusCmd { - cmd := NewStatusCmd(ctx, "ping") - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) Quit(_ context.Context) *StatusCmd { - panic("not implemented") -} - -func (c cmdable) Del(ctx context.Context, keys ...string) *IntCmd { - args := make([]interface{}, 1+len(keys)) - args[0] = "del" - for i, key := range keys { - args[1+i] = key - } - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) Unlink(ctx context.Context, keys ...string) *IntCmd { - args := make([]interface{}, 1+len(keys)) - args[0] = "unlink" - for i, key := range keys { - args[1+i] = key - } - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) Dump(ctx context.Context, key string) *StringCmd { - cmd := NewStringCmd(ctx, "dump", key) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) Exists(ctx context.Context, keys ...string) *IntCmd { - args := make([]interface{}, 1+len(keys)) - args[0] = "exists" - for i, key := range keys { - args[1+i] = key - } - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd { - return c.expire(ctx, key, expiration, "") -} - -func (c cmdable) ExpireNX(ctx context.Context, key string, expiration time.Duration) *BoolCmd { - return c.expire(ctx, key, expiration, "NX") -} - -func (c cmdable) ExpireXX(ctx context.Context, key string, expiration time.Duration) *BoolCmd { - return c.expire(ctx, key, expiration, "XX") -} - -func (c cmdable) ExpireGT(ctx context.Context, key string, expiration time.Duration) *BoolCmd { - return c.expire(ctx, key, expiration, "GT") -} - -func (c cmdable) ExpireLT(ctx context.Context, key string, expiration time.Duration) *BoolCmd { - return c.expire(ctx, key, expiration, "LT") -} - -func (c cmdable) expire( - ctx context.Context, key string, expiration time.Duration, mode string, -) *BoolCmd { - args := make([]interface{}, 3, 4) - args[0] = "expire" - args[1] = key - args[2] = formatSec(ctx, expiration) - if mode != "" { - args = append(args, mode) - } - - cmd := NewBoolCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd { - cmd := NewBoolCmd(ctx, "expireat", key, tm.Unix()) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) Keys(ctx context.Context, pattern string) *StringSliceCmd { - cmd := NewStringSliceCmd(ctx, "keys", pattern) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) Migrate(ctx context.Context, host, port, key string, db int, timeout time.Duration) *StatusCmd { - cmd := NewStatusCmd( - ctx, - "migrate", - host, - port, - key, - db, - formatMs(ctx, timeout), - ) - cmd.setReadTimeout(timeout) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) Move(ctx context.Context, key string, db int) *BoolCmd { - cmd := NewBoolCmd(ctx, "move", key, db) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ObjectRefCount(ctx context.Context, key string) *IntCmd { - cmd := NewIntCmd(ctx, "object", "refcount", key) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ObjectEncoding(ctx context.Context, key string) *StringCmd { - cmd := NewStringCmd(ctx, "object", "encoding", key) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ObjectIdleTime(ctx context.Context, key string) *DurationCmd { - cmd := NewDurationCmd(ctx, time.Second, "object", "idletime", key) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) Persist(ctx context.Context, key string) *BoolCmd { - cmd := NewBoolCmd(ctx, "persist", key) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) PExpire(ctx context.Context, key string, expiration time.Duration) *BoolCmd { - cmd := NewBoolCmd(ctx, "pexpire", key, formatMs(ctx, expiration)) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) PExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd { - cmd := NewBoolCmd( - ctx, - "pexpireat", - key, - tm.UnixNano()/int64(time.Millisecond), - ) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) PTTL(ctx context.Context, key string) *DurationCmd { - cmd := NewDurationCmd(ctx, time.Millisecond, "pttl", key) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) RandomKey(ctx context.Context) *StringCmd { - cmd := NewStringCmd(ctx, "randomkey") - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) Rename(ctx context.Context, key, newkey string) *StatusCmd { - cmd := NewStatusCmd(ctx, "rename", key, newkey) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) RenameNX(ctx context.Context, key, newkey string) *BoolCmd { - cmd := NewBoolCmd(ctx, "renamenx", key, newkey) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) Restore(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd { - cmd := NewStatusCmd( - ctx, - "restore", - key, - formatMs(ctx, ttl), - value, - ) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) RestoreReplace(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd { - cmd := NewStatusCmd( - ctx, - "restore", - key, - formatMs(ctx, ttl), - value, - "replace", - ) - _ = c(ctx, cmd) - return cmd -} - -type Sort struct { - By string - Offset, Count int64 - Get []string - Order string - Alpha bool -} - -func (sort *Sort) args(key string) []interface{} { - args := []interface{}{"sort", key} - if sort.By != "" { - args = append(args, "by", sort.By) - } - if sort.Offset != 0 || sort.Count != 0 { - args = append(args, "limit", sort.Offset, sort.Count) - } - for _, get := range sort.Get { - args = append(args, "get", get) - } - if sort.Order != "" { - args = append(args, sort.Order) - } - if sort.Alpha { - args = append(args, "alpha") - } - return args -} - -func (c cmdable) Sort(ctx context.Context, key string, sort *Sort) *StringSliceCmd { - cmd := NewStringSliceCmd(ctx, sort.args(key)...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) SortStore(ctx context.Context, key, store string, sort *Sort) *IntCmd { - args := sort.args(key) - if store != "" { - args = append(args, "store", store) - } - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) SortInterfaces(ctx context.Context, key string, sort *Sort) *SliceCmd { - cmd := NewSliceCmd(ctx, sort.args(key)...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) Touch(ctx context.Context, keys ...string) *IntCmd { - args := make([]interface{}, len(keys)+1) - args[0] = "touch" - for i, key := range keys { - args[i+1] = key - } - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) TTL(ctx context.Context, key string) *DurationCmd { - cmd := NewDurationCmd(ctx, time.Second, "ttl", key) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) Type(ctx context.Context, key string) *StatusCmd { - cmd := NewStatusCmd(ctx, "type", key) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) Append(ctx context.Context, key, value string) *IntCmd { - cmd := NewIntCmd(ctx, "append", key, value) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) Decr(ctx context.Context, key string) *IntCmd { - cmd := NewIntCmd(ctx, "decr", key) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) DecrBy(ctx context.Context, key string, decrement int64) *IntCmd { - cmd := NewIntCmd(ctx, "decrby", key, decrement) - _ = c(ctx, cmd) - return cmd -} - -// Get Redis `GET key` command. It returns redis.Nil error when key does not exist. -func (c cmdable) Get(ctx context.Context, key string) *StringCmd { - cmd := NewStringCmd(ctx, "get", key) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) GetRange(ctx context.Context, key string, start, end int64) *StringCmd { - cmd := NewStringCmd(ctx, "getrange", key, start, end) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) GetSet(ctx context.Context, key string, value interface{}) *StringCmd { - cmd := NewStringCmd(ctx, "getset", key, value) - _ = c(ctx, cmd) - return cmd -} - -// GetEx An expiration of zero removes the TTL associated with the key (i.e. GETEX key persist). -// Requires Redis >= 6.2.0. -func (c cmdable) GetEx(ctx context.Context, key string, expiration time.Duration) *StringCmd { - args := make([]interface{}, 0, 4) - args = append(args, "getex", key) - if expiration > 0 { - if usePrecise(expiration) { - args = append(args, "px", formatMs(ctx, expiration)) - } else { - args = append(args, "ex", formatSec(ctx, expiration)) - } - } else if expiration == 0 { - args = append(args, "persist") - } - - cmd := NewStringCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -// GetDel redis-server version >= 6.2.0. -func (c cmdable) GetDel(ctx context.Context, key string) *StringCmd { - cmd := NewStringCmd(ctx, "getdel", key) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) Incr(ctx context.Context, key string) *IntCmd { - cmd := NewIntCmd(ctx, "incr", key) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) IncrBy(ctx context.Context, key string, value int64) *IntCmd { - cmd := NewIntCmd(ctx, "incrby", key, value) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) IncrByFloat(ctx context.Context, key string, value float64) *FloatCmd { - cmd := NewFloatCmd(ctx, "incrbyfloat", key, value) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) MGet(ctx context.Context, keys ...string) *SliceCmd { - args := make([]interface{}, 1+len(keys)) - args[0] = "mget" - for i, key := range keys { - args[1+i] = key - } - cmd := NewSliceCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -// MSet is like Set but accepts multiple values: -// - MSet("key1", "value1", "key2", "value2") -// - MSet([]string{"key1", "value1", "key2", "value2"}) -// - MSet(map[string]interface{}{"key1": "value1", "key2": "value2"}) -func (c cmdable) MSet(ctx context.Context, values ...interface{}) *StatusCmd { - args := make([]interface{}, 1, 1+len(values)) - args[0] = "mset" - args = appendArgs(args, values) - cmd := NewStatusCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -// MSetNX is like SetNX but accepts multiple values: -// - MSetNX("key1", "value1", "key2", "value2") -// - MSetNX([]string{"key1", "value1", "key2", "value2"}) -// - MSetNX(map[string]interface{}{"key1": "value1", "key2": "value2"}) -func (c cmdable) MSetNX(ctx context.Context, values ...interface{}) *BoolCmd { - args := make([]interface{}, 1, 1+len(values)) - args[0] = "msetnx" - args = appendArgs(args, values) - cmd := NewBoolCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -// Set Redis `SET key value [expiration]` command. -// Use expiration for `SETEX`-like behavior. -// -// Zero expiration means the key has no expiration time. -// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0, -// otherwise you will receive an error: (error) ERR syntax error. -func (c cmdable) Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd { - args := make([]interface{}, 3, 5) - args[0] = "set" - args[1] = key - args[2] = value - if expiration > 0 { - if usePrecise(expiration) { - args = append(args, "px", formatMs(ctx, expiration)) - } else { - args = append(args, "ex", formatSec(ctx, expiration)) - } - } else if expiration == KeepTTL { - args = append(args, "keepttl") - } - - cmd := NewStatusCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -// SetArgs provides arguments for the SetArgs function. -type SetArgs struct { - // Mode can be `NX` or `XX` or empty. - Mode string - - // Zero `TTL` or `Expiration` means that the key has no expiration time. - TTL time.Duration - ExpireAt time.Time - - // When Get is true, the command returns the old value stored at key, or nil when key did not exist. - Get bool - - // KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0, - // otherwise you will receive an error: (error) ERR syntax error. - KeepTTL bool -} - -// SetArgs supports all the options that the SET command supports. -// It is the alternative to the Set function when you want -// to have more control over the options. -func (c cmdable) SetArgs(ctx context.Context, key string, value interface{}, a SetArgs) *StatusCmd { - args := []interface{}{"set", key, value} - - if a.KeepTTL { - args = append(args, "keepttl") - } - - if !a.ExpireAt.IsZero() { - args = append(args, "exat", a.ExpireAt.Unix()) - } - if a.TTL > 0 { - if usePrecise(a.TTL) { - args = append(args, "px", formatMs(ctx, a.TTL)) - } else { - args = append(args, "ex", formatSec(ctx, a.TTL)) - } - } - - if a.Mode != "" { - args = append(args, a.Mode) - } - - if a.Get { - args = append(args, "get") - } - - cmd := NewStatusCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -// SetEX Redis `SETEX key expiration value` command. -func (c cmdable) SetEX(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd { - cmd := NewStatusCmd(ctx, "setex", key, formatSec(ctx, expiration), value) - _ = c(ctx, cmd) - return cmd -} - -// SetNX Redis `SET key value [expiration] NX` command. -// -// Zero expiration means the key has no expiration time. -// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0, -// otherwise you will receive an error: (error) ERR syntax error. -func (c cmdable) SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd { - var cmd *BoolCmd - switch expiration { - case 0: - // Use old `SETNX` to support old Redis versions. - cmd = NewBoolCmd(ctx, "setnx", key, value) - case KeepTTL: - cmd = NewBoolCmd(ctx, "set", key, value, "keepttl", "nx") - default: - if usePrecise(expiration) { - cmd = NewBoolCmd(ctx, "set", key, value, "px", formatMs(ctx, expiration), "nx") - } else { - cmd = NewBoolCmd(ctx, "set", key, value, "ex", formatSec(ctx, expiration), "nx") - } - } - - _ = c(ctx, cmd) - return cmd -} - -// SetXX Redis `SET key value [expiration] XX` command. -// -// Zero expiration means the key has no expiration time. -// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0, -// otherwise you will receive an error: (error) ERR syntax error. -func (c cmdable) SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd { - var cmd *BoolCmd - switch expiration { - case 0: - cmd = NewBoolCmd(ctx, "set", key, value, "xx") - case KeepTTL: - cmd = NewBoolCmd(ctx, "set", key, value, "keepttl", "xx") - default: - if usePrecise(expiration) { - cmd = NewBoolCmd(ctx, "set", key, value, "px", formatMs(ctx, expiration), "xx") - } else { - cmd = NewBoolCmd(ctx, "set", key, value, "ex", formatSec(ctx, expiration), "xx") - } - } - - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd { - cmd := NewIntCmd(ctx, "setrange", key, offset, value) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) StrLen(ctx context.Context, key string) *IntCmd { - cmd := NewIntCmd(ctx, "strlen", key) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) Copy(ctx context.Context, sourceKey, destKey string, db int, replace bool) *IntCmd { - args := []interface{}{"copy", sourceKey, destKey, "DB", db} - if replace { - args = append(args, "REPLACE") - } - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -//------------------------------------------------------------------------------ - -func (c cmdable) GetBit(ctx context.Context, key string, offset int64) *IntCmd { - cmd := NewIntCmd(ctx, "getbit", key, offset) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd { - cmd := NewIntCmd( - ctx, - "setbit", - key, - offset, - value, - ) - _ = c(ctx, cmd) - return cmd -} - -type BitCount struct { - Start, End int64 -} - -func (c cmdable) BitCount(ctx context.Context, key string, bitCount *BitCount) *IntCmd { - args := []interface{}{"bitcount", key} - if bitCount != nil { - args = append( - args, - bitCount.Start, - bitCount.End, - ) - } - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) bitOp(ctx context.Context, op, destKey string, keys ...string) *IntCmd { - args := make([]interface{}, 3+len(keys)) - args[0] = "bitop" - args[1] = op - args[2] = destKey - for i, key := range keys { - args[3+i] = key - } - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd { - return c.bitOp(ctx, "and", destKey, keys...) -} - -func (c cmdable) BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd { - return c.bitOp(ctx, "or", destKey, keys...) -} - -func (c cmdable) BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd { - return c.bitOp(ctx, "xor", destKey, keys...) -} - -func (c cmdable) BitOpNot(ctx context.Context, destKey string, key string) *IntCmd { - return c.bitOp(ctx, "not", destKey, key) -} - -func (c cmdable) BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd { - args := make([]interface{}, 3+len(pos)) - args[0] = "bitpos" - args[1] = key - args[2] = bit - switch len(pos) { - case 0: - case 1: - args[3] = pos[0] - case 2: - args[3] = pos[0] - args[4] = pos[1] - default: - panic("too many arguments") - } - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) BitField(ctx context.Context, key string, args ...interface{}) *IntSliceCmd { - a := make([]interface{}, 0, 2+len(args)) - a = append(a, "bitfield") - a = append(a, key) - a = append(a, args...) - cmd := NewIntSliceCmd(ctx, a...) - _ = c(ctx, cmd) - return cmd -} - -//------------------------------------------------------------------------------ - -func (c cmdable) Scan(ctx context.Context, cursor uint64, match string, count int64) *ScanCmd { - args := []interface{}{"scan", cursor} - if match != "" { - args = append(args, "match", match) - } - if count > 0 { - args = append(args, "count", count) - } - cmd := NewScanCmd(ctx, c, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ScanType(ctx context.Context, cursor uint64, match string, count int64, keyType string) *ScanCmd { - args := []interface{}{"scan", cursor} - if match != "" { - args = append(args, "match", match) - } - if count > 0 { - args = append(args, "count", count) - } - if keyType != "" { - args = append(args, "type", keyType) - } - cmd := NewScanCmd(ctx, c, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd { - args := []interface{}{"sscan", key, cursor} - if match != "" { - args = append(args, "match", match) - } - if count > 0 { - args = append(args, "count", count) - } - cmd := NewScanCmd(ctx, c, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd { - args := []interface{}{"hscan", key, cursor} - if match != "" { - args = append(args, "match", match) - } - if count > 0 { - args = append(args, "count", count) - } - cmd := NewScanCmd(ctx, c, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd { - args := []interface{}{"zscan", key, cursor} - if match != "" { - args = append(args, "match", match) - } - if count > 0 { - args = append(args, "count", count) - } - cmd := NewScanCmd(ctx, c, args...) - _ = c(ctx, cmd) - return cmd -} - -//------------------------------------------------------------------------------ - -func (c cmdable) HDel(ctx context.Context, key string, fields ...string) *IntCmd { - args := make([]interface{}, 2+len(fields)) - args[0] = "hdel" - args[1] = key - for i, field := range fields { - args[2+i] = field - } - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) HExists(ctx context.Context, key, field string) *BoolCmd { - cmd := NewBoolCmd(ctx, "hexists", key, field) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) HGet(ctx context.Context, key, field string) *StringCmd { - cmd := NewStringCmd(ctx, "hget", key, field) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) HGetAll(ctx context.Context, key string) *StringStringMapCmd { - cmd := NewStringStringMapCmd(ctx, "hgetall", key) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) HIncrBy(ctx context.Context, key, field string, incr int64) *IntCmd { - cmd := NewIntCmd(ctx, "hincrby", key, field, incr) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) HIncrByFloat(ctx context.Context, key, field string, incr float64) *FloatCmd { - cmd := NewFloatCmd(ctx, "hincrbyfloat", key, field, incr) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) HKeys(ctx context.Context, key string) *StringSliceCmd { - cmd := NewStringSliceCmd(ctx, "hkeys", key) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) HLen(ctx context.Context, key string) *IntCmd { - cmd := NewIntCmd(ctx, "hlen", key) - _ = c(ctx, cmd) - return cmd -} - -// HMGet returns the values for the specified fields in the hash stored at key. -// It returns an interface{} to distinguish between empty string and nil value. -func (c cmdable) HMGet(ctx context.Context, key string, fields ...string) *SliceCmd { - args := make([]interface{}, 2+len(fields)) - args[0] = "hmget" - args[1] = key - for i, field := range fields { - args[2+i] = field - } - cmd := NewSliceCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -// HSet accepts values in following formats: -// - HSet("myhash", "key1", "value1", "key2", "value2") -// - HSet("myhash", []string{"key1", "value1", "key2", "value2"}) -// - HSet("myhash", map[string]interface{}{"key1": "value1", "key2": "value2"}) -// -// Note that it requires Redis v4 for multiple field/value pairs support. -func (c cmdable) HSet(ctx context.Context, key string, values ...interface{}) *IntCmd { - args := make([]interface{}, 2, 2+len(values)) - args[0] = "hset" - args[1] = key - args = appendArgs(args, values) - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -// HMSet is a deprecated version of HSet left for compatibility with Redis 3. -func (c cmdable) HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd { - args := make([]interface{}, 2, 2+len(values)) - args[0] = "hmset" - args[1] = key - args = appendArgs(args, values) - cmd := NewBoolCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd { - cmd := NewBoolCmd(ctx, "hsetnx", key, field, value) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) HVals(ctx context.Context, key string) *StringSliceCmd { - cmd := NewStringSliceCmd(ctx, "hvals", key) - _ = c(ctx, cmd) - return cmd -} - -// HRandField redis-server version >= 6.2.0. -func (c cmdable) HRandField(ctx context.Context, key string, count int, withValues bool) *StringSliceCmd { - args := make([]interface{}, 0, 4) - - // Although count=0 is meaningless, redis accepts count=0. - args = append(args, "hrandfield", key, count) - if withValues { - args = append(args, "withvalues") - } - - cmd := NewStringSliceCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -//------------------------------------------------------------------------------ - -func (c cmdable) BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd { - args := make([]interface{}, 1+len(keys)+1) - args[0] = "blpop" - for i, key := range keys { - args[1+i] = key - } - args[len(args)-1] = formatSec(ctx, timeout) - cmd := NewStringSliceCmd(ctx, args...) - cmd.setReadTimeout(timeout) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd { - args := make([]interface{}, 1+len(keys)+1) - args[0] = "brpop" - for i, key := range keys { - args[1+i] = key - } - args[len(keys)+1] = formatSec(ctx, timeout) - cmd := NewStringSliceCmd(ctx, args...) - cmd.setReadTimeout(timeout) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) BRPopLPush(ctx context.Context, source, destination string, timeout time.Duration) *StringCmd { - cmd := NewStringCmd( - ctx, - "brpoplpush", - source, - destination, - formatSec(ctx, timeout), - ) - cmd.setReadTimeout(timeout) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) LIndex(ctx context.Context, key string, index int64) *StringCmd { - cmd := NewStringCmd(ctx, "lindex", key, index) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd { - cmd := NewIntCmd(ctx, "linsert", key, op, pivot, value) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) LInsertBefore(ctx context.Context, key string, pivot, value interface{}) *IntCmd { - cmd := NewIntCmd(ctx, "linsert", key, "before", pivot, value) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd { - cmd := NewIntCmd(ctx, "linsert", key, "after", pivot, value) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) LLen(ctx context.Context, key string) *IntCmd { - cmd := NewIntCmd(ctx, "llen", key) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) LPop(ctx context.Context, key string) *StringCmd { - cmd := NewStringCmd(ctx, "lpop", key) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) LPopCount(ctx context.Context, key string, count int) *StringSliceCmd { - cmd := NewStringSliceCmd(ctx, "lpop", key, count) - _ = c(ctx, cmd) - return cmd -} - -type LPosArgs struct { - Rank, MaxLen int64 -} - -func (c cmdable) LPos(ctx context.Context, key string, value string, a LPosArgs) *IntCmd { - args := []interface{}{"lpos", key, value} - if a.Rank != 0 { - args = append(args, "rank", a.Rank) - } - if a.MaxLen != 0 { - args = append(args, "maxlen", a.MaxLen) - } - - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) LPosCount(ctx context.Context, key string, value string, count int64, a LPosArgs) *IntSliceCmd { - args := []interface{}{"lpos", key, value, "count", count} - if a.Rank != 0 { - args = append(args, "rank", a.Rank) - } - if a.MaxLen != 0 { - args = append(args, "maxlen", a.MaxLen) - } - cmd := NewIntSliceCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) LPush(ctx context.Context, key string, values ...interface{}) *IntCmd { - args := make([]interface{}, 2, 2+len(values)) - args[0] = "lpush" - args[1] = key - args = appendArgs(args, values) - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) LPushX(ctx context.Context, key string, values ...interface{}) *IntCmd { - args := make([]interface{}, 2, 2+len(values)) - args[0] = "lpushx" - args[1] = key - args = appendArgs(args, values) - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) LRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd { - cmd := NewStringSliceCmd( - ctx, - "lrange", - key, - start, - stop, - ) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) LRem(ctx context.Context, key string, count int64, value interface{}) *IntCmd { - cmd := NewIntCmd(ctx, "lrem", key, count, value) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) LSet(ctx context.Context, key string, index int64, value interface{}) *StatusCmd { - cmd := NewStatusCmd(ctx, "lset", key, index, value) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) LTrim(ctx context.Context, key string, start, stop int64) *StatusCmd { - cmd := NewStatusCmd( - ctx, - "ltrim", - key, - start, - stop, - ) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) RPop(ctx context.Context, key string) *StringCmd { - cmd := NewStringCmd(ctx, "rpop", key) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) RPopCount(ctx context.Context, key string, count int) *StringSliceCmd { - cmd := NewStringSliceCmd(ctx, "rpop", key, count) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) RPopLPush(ctx context.Context, source, destination string) *StringCmd { - cmd := NewStringCmd(ctx, "rpoplpush", source, destination) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) RPush(ctx context.Context, key string, values ...interface{}) *IntCmd { - args := make([]interface{}, 2, 2+len(values)) - args[0] = "rpush" - args[1] = key - args = appendArgs(args, values) - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) RPushX(ctx context.Context, key string, values ...interface{}) *IntCmd { - args := make([]interface{}, 2, 2+len(values)) - args[0] = "rpushx" - args[1] = key - args = appendArgs(args, values) - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) LMove(ctx context.Context, source, destination, srcpos, destpos string) *StringCmd { - cmd := NewStringCmd(ctx, "lmove", source, destination, srcpos, destpos) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) BLMove( - ctx context.Context, source, destination, srcpos, destpos string, timeout time.Duration, -) *StringCmd { - cmd := NewStringCmd(ctx, "blmove", source, destination, srcpos, destpos, formatSec(ctx, timeout)) - cmd.setReadTimeout(timeout) - _ = c(ctx, cmd) - return cmd -} - -//------------------------------------------------------------------------------ - -func (c cmdable) SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd { - args := make([]interface{}, 2, 2+len(members)) - args[0] = "sadd" - args[1] = key - args = appendArgs(args, members) - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) SCard(ctx context.Context, key string) *IntCmd { - cmd := NewIntCmd(ctx, "scard", key) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) SDiff(ctx context.Context, keys ...string) *StringSliceCmd { - args := make([]interface{}, 1+len(keys)) - args[0] = "sdiff" - for i, key := range keys { - args[1+i] = key - } - cmd := NewStringSliceCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd { - args := make([]interface{}, 2+len(keys)) - args[0] = "sdiffstore" - args[1] = destination - for i, key := range keys { - args[2+i] = key - } - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) SInter(ctx context.Context, keys ...string) *StringSliceCmd { - args := make([]interface{}, 1+len(keys)) - args[0] = "sinter" - for i, key := range keys { - args[1+i] = key - } - cmd := NewStringSliceCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd { - args := make([]interface{}, 2+len(keys)) - args[0] = "sinterstore" - args[1] = destination - for i, key := range keys { - args[2+i] = key - } - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd { - cmd := NewBoolCmd(ctx, "sismember", key, member) - _ = c(ctx, cmd) - return cmd -} - -// SMIsMember Redis `SMISMEMBER key member [member ...]` command. -func (c cmdable) SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd { - args := make([]interface{}, 2, 2+len(members)) - args[0] = "smismember" - args[1] = key - args = appendArgs(args, members) - cmd := NewBoolSliceCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -// SMembers Redis `SMEMBERS key` command output as a slice. -func (c cmdable) SMembers(ctx context.Context, key string) *StringSliceCmd { - cmd := NewStringSliceCmd(ctx, "smembers", key) - _ = c(ctx, cmd) - return cmd -} - -// SMembersMap Redis `SMEMBERS key` command output as a map. -func (c cmdable) SMembersMap(ctx context.Context, key string) *StringStructMapCmd { - cmd := NewStringStructMapCmd(ctx, "smembers", key) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd { - cmd := NewBoolCmd(ctx, "smove", source, destination, member) - _ = c(ctx, cmd) - return cmd -} - -// SPop Redis `SPOP key` command. -func (c cmdable) SPop(ctx context.Context, key string) *StringCmd { - cmd := NewStringCmd(ctx, "spop", key) - _ = c(ctx, cmd) - return cmd -} - -// SPopN Redis `SPOP key count` command. -func (c cmdable) SPopN(ctx context.Context, key string, count int64) *StringSliceCmd { - cmd := NewStringSliceCmd(ctx, "spop", key, count) - _ = c(ctx, cmd) - return cmd -} - -// SRandMember Redis `SRANDMEMBER key` command. -func (c cmdable) SRandMember(ctx context.Context, key string) *StringCmd { - cmd := NewStringCmd(ctx, "srandmember", key) - _ = c(ctx, cmd) - return cmd -} - -// SRandMemberN Redis `SRANDMEMBER key count` command. -func (c cmdable) SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd { - cmd := NewStringSliceCmd(ctx, "srandmember", key, count) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) SRem(ctx context.Context, key string, members ...interface{}) *IntCmd { - args := make([]interface{}, 2, 2+len(members)) - args[0] = "srem" - args[1] = key - args = appendArgs(args, members) - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) SUnion(ctx context.Context, keys ...string) *StringSliceCmd { - args := make([]interface{}, 1+len(keys)) - args[0] = "sunion" - for i, key := range keys { - args[1+i] = key - } - cmd := NewStringSliceCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd { - args := make([]interface{}, 2+len(keys)) - args[0] = "sunionstore" - args[1] = destination - for i, key := range keys { - args[2+i] = key - } - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -//------------------------------------------------------------------------------ - -// XAddArgs accepts values in the following formats: -// - XAddArgs.Values = []interface{}{"key1", "value1", "key2", "value2"} -// - XAddArgs.Values = []string("key1", "value1", "key2", "value2") -// - XAddArgs.Values = map[string]interface{}{"key1": "value1", "key2": "value2"} -// -// Note that map will not preserve the order of key-value pairs. -// MaxLen/MaxLenApprox and MinID are in conflict, only one of them can be used. -type XAddArgs struct { - Stream string - NoMkStream bool - MaxLen int64 // MAXLEN N - - // Deprecated: use MaxLen+Approx, remove in v9. - MaxLenApprox int64 // MAXLEN ~ N - - MinID string - // Approx causes MaxLen and MinID to use "~" matcher (instead of "="). - Approx bool - Limit int64 - ID string - Values interface{} -} - -// XAdd a.Limit has a bug, please confirm it and use it. -// issue: https://github.com/redis/redis/issues/9046 -func (c cmdable) XAdd(ctx context.Context, a *XAddArgs) *StringCmd { - args := make([]interface{}, 0, 11) - args = append(args, "xadd", a.Stream) - if a.NoMkStream { - args = append(args, "nomkstream") - } - switch { - case a.MaxLen > 0: - if a.Approx { - args = append(args, "maxlen", "~", a.MaxLen) - } else { - args = append(args, "maxlen", a.MaxLen) - } - case a.MaxLenApprox > 0: - // TODO remove in v9. - args = append(args, "maxlen", "~", a.MaxLenApprox) - case a.MinID != "": - if a.Approx { - args = append(args, "minid", "~", a.MinID) - } else { - args = append(args, "minid", a.MinID) - } - } - if a.Limit > 0 { - args = append(args, "limit", a.Limit) - } - if a.ID != "" { - args = append(args, a.ID) - } else { - args = append(args, "*") - } - args = appendArg(args, a.Values) - - cmd := NewStringCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) XDel(ctx context.Context, stream string, ids ...string) *IntCmd { - args := []interface{}{"xdel", stream} - for _, id := range ids { - args = append(args, id) - } - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) XLen(ctx context.Context, stream string) *IntCmd { - cmd := NewIntCmd(ctx, "xlen", stream) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd { - cmd := NewXMessageSliceCmd(ctx, "xrange", stream, start, stop) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd { - cmd := NewXMessageSliceCmd(ctx, "xrange", stream, start, stop, "count", count) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) XRevRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd { - cmd := NewXMessageSliceCmd(ctx, "xrevrange", stream, start, stop) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) XRevRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd { - cmd := NewXMessageSliceCmd(ctx, "xrevrange", stream, start, stop, "count", count) - _ = c(ctx, cmd) - return cmd -} - -type XReadArgs struct { - Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2 - Count int64 - Block time.Duration -} - -func (c cmdable) XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd { - args := make([]interface{}, 0, 6+len(a.Streams)) - args = append(args, "xread") - - keyPos := int8(1) - if a.Count > 0 { - args = append(args, "count") - args = append(args, a.Count) - keyPos += 2 - } - if a.Block >= 0 { - args = append(args, "block") - args = append(args, int64(a.Block/time.Millisecond)) - keyPos += 2 - } - args = append(args, "streams") - keyPos++ - for _, s := range a.Streams { - args = append(args, s) - } - - cmd := NewXStreamSliceCmd(ctx, args...) - if a.Block >= 0 { - cmd.setReadTimeout(a.Block) - } - cmd.SetFirstKeyPos(keyPos) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) XReadStreams(ctx context.Context, streams ...string) *XStreamSliceCmd { - return c.XRead(ctx, &XReadArgs{ - Streams: streams, - Block: -1, - }) -} - -func (c cmdable) XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd { - cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd { - cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start, "mkstream") - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd { - cmd := NewStatusCmd(ctx, "xgroup", "setid", stream, group, start) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) XGroupDestroy(ctx context.Context, stream, group string) *IntCmd { - cmd := NewIntCmd(ctx, "xgroup", "destroy", stream, group) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) XGroupCreateConsumer(ctx context.Context, stream, group, consumer string) *IntCmd { - cmd := NewIntCmd(ctx, "xgroup", "createconsumer", stream, group, consumer) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd { - cmd := NewIntCmd(ctx, "xgroup", "delconsumer", stream, group, consumer) - _ = c(ctx, cmd) - return cmd -} - -type XReadGroupArgs struct { - Group string - Consumer string - Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2 - Count int64 - Block time.Duration - NoAck bool -} - -func (c cmdable) XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd { - args := make([]interface{}, 0, 10+len(a.Streams)) - args = append(args, "xreadgroup", "group", a.Group, a.Consumer) - - keyPos := int8(4) - if a.Count > 0 { - args = append(args, "count", a.Count) - keyPos += 2 - } - if a.Block >= 0 { - args = append(args, "block", int64(a.Block/time.Millisecond)) - keyPos += 2 - } - if a.NoAck { - args = append(args, "noack") - keyPos++ - } - args = append(args, "streams") - keyPos++ - for _, s := range a.Streams { - args = append(args, s) - } - - cmd := NewXStreamSliceCmd(ctx, args...) - if a.Block >= 0 { - cmd.setReadTimeout(a.Block) - } - cmd.SetFirstKeyPos(keyPos) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd { - args := []interface{}{"xack", stream, group} - for _, id := range ids { - args = append(args, id) - } - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) XPending(ctx context.Context, stream, group string) *XPendingCmd { - cmd := NewXPendingCmd(ctx, "xpending", stream, group) - _ = c(ctx, cmd) - return cmd -} - -type XPendingExtArgs struct { - Stream string - Group string - Idle time.Duration - Start string - End string - Count int64 - Consumer string -} - -func (c cmdable) XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd { - args := make([]interface{}, 0, 9) - args = append(args, "xpending", a.Stream, a.Group) - if a.Idle != 0 { - args = append(args, "idle", formatMs(ctx, a.Idle)) - } - args = append(args, a.Start, a.End, a.Count) - if a.Consumer != "" { - args = append(args, a.Consumer) - } - cmd := NewXPendingExtCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -type XAutoClaimArgs struct { - Stream string - Group string - MinIdle time.Duration - Start string - Count int64 - Consumer string -} - -func (c cmdable) XAutoClaim(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimCmd { - args := xAutoClaimArgs(ctx, a) - cmd := NewXAutoClaimCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd { - args := xAutoClaimArgs(ctx, a) - args = append(args, "justid") - cmd := NewXAutoClaimJustIDCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func xAutoClaimArgs(ctx context.Context, a *XAutoClaimArgs) []interface{} { - args := make([]interface{}, 0, 8) - args = append(args, "xautoclaim", a.Stream, a.Group, a.Consumer, formatMs(ctx, a.MinIdle), a.Start) - if a.Count > 0 { - args = append(args, "count", a.Count) - } - return args -} - -type XClaimArgs struct { - Stream string - Group string - Consumer string - MinIdle time.Duration - Messages []string -} - -func (c cmdable) XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd { - args := xClaimArgs(a) - cmd := NewXMessageSliceCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd { - args := xClaimArgs(a) - args = append(args, "justid") - cmd := NewStringSliceCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func xClaimArgs(a *XClaimArgs) []interface{} { - args := make([]interface{}, 0, 5+len(a.Messages)) - args = append(args, - "xclaim", - a.Stream, - a.Group, a.Consumer, - int64(a.MinIdle/time.Millisecond)) - for _, id := range a.Messages { - args = append(args, id) - } - return args -} - -// xTrim If approx is true, add the "~" parameter, otherwise it is the default "=" (redis default). -// example: -// XTRIM key MAXLEN/MINID threshold LIMIT limit. -// XTRIM key MAXLEN/MINID ~ threshold LIMIT limit. -// The redis-server version is lower than 6.2, please set limit to 0. -func (c cmdable) xTrim( - ctx context.Context, key, strategy string, - approx bool, threshold interface{}, limit int64, -) *IntCmd { - args := make([]interface{}, 0, 7) - args = append(args, "xtrim", key, strategy) - if approx { - args = append(args, "~") - } - args = append(args, threshold) - if limit > 0 { - args = append(args, "limit", limit) - } - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -// Deprecated: use XTrimMaxLen, remove in v9. -func (c cmdable) XTrim(ctx context.Context, key string, maxLen int64) *IntCmd { - return c.xTrim(ctx, key, "maxlen", false, maxLen, 0) -} - -// Deprecated: use XTrimMaxLenApprox, remove in v9. -func (c cmdable) XTrimApprox(ctx context.Context, key string, maxLen int64) *IntCmd { - return c.xTrim(ctx, key, "maxlen", true, maxLen, 0) -} - -// XTrimMaxLen No `~` rules are used, `limit` cannot be used. -// cmd: XTRIM key MAXLEN maxLen -func (c cmdable) XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd { - return c.xTrim(ctx, key, "maxlen", false, maxLen, 0) -} - -// XTrimMaxLenApprox LIMIT has a bug, please confirm it and use it. -// issue: https://github.com/redis/redis/issues/9046 -// cmd: XTRIM key MAXLEN ~ maxLen LIMIT limit -func (c cmdable) XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd { - return c.xTrim(ctx, key, "maxlen", true, maxLen, limit) -} - -// XTrimMinID No `~` rules are used, `limit` cannot be used. -// cmd: XTRIM key MINID minID -func (c cmdable) XTrimMinID(ctx context.Context, key string, minID string) *IntCmd { - return c.xTrim(ctx, key, "minid", false, minID, 0) -} - -// XTrimMinIDApprox LIMIT has a bug, please confirm it and use it. -// issue: https://github.com/redis/redis/issues/9046 -// cmd: XTRIM key MINID ~ minID LIMIT limit -func (c cmdable) XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd { - return c.xTrim(ctx, key, "minid", true, minID, limit) -} - -func (c cmdable) XInfoConsumers(ctx context.Context, key string, group string) *XInfoConsumersCmd { - cmd := NewXInfoConsumersCmd(ctx, key, group) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd { - cmd := NewXInfoGroupsCmd(ctx, key) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) XInfoStream(ctx context.Context, key string) *XInfoStreamCmd { - cmd := NewXInfoStreamCmd(ctx, key) - _ = c(ctx, cmd) - return cmd -} - -// XInfoStreamFull XINFO STREAM FULL [COUNT count] -// redis-server >= 6.0. -func (c cmdable) XInfoStreamFull(ctx context.Context, key string, count int) *XInfoStreamFullCmd { - args := make([]interface{}, 0, 6) - args = append(args, "xinfo", "stream", key, "full") - if count > 0 { - args = append(args, "count", count) - } - cmd := NewXInfoStreamFullCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -//------------------------------------------------------------------------------ - -// Z represents sorted set member. -type Z struct { - Score float64 - Member interface{} -} - -// ZWithKey represents sorted set member including the name of the key where it was popped. -type ZWithKey struct { - Z - Key string -} - -// ZStore is used as an arg to ZInter/ZInterStore and ZUnion/ZUnionStore. -type ZStore struct { - Keys []string - Weights []float64 - // Can be SUM, MIN or MAX. - Aggregate string -} - -func (z ZStore) len() (n int) { - n = len(z.Keys) - if len(z.Weights) > 0 { - n += 1 + len(z.Weights) - } - if z.Aggregate != "" { - n += 2 - } - return n -} - -func (z ZStore) appendArgs(args []interface{}) []interface{} { - for _, key := range z.Keys { - args = append(args, key) - } - if len(z.Weights) > 0 { - args = append(args, "weights") - for _, weights := range z.Weights { - args = append(args, weights) - } - } - if z.Aggregate != "" { - args = append(args, "aggregate", z.Aggregate) - } - return args -} - -// BZPopMax Redis `BZPOPMAX key [key ...] timeout` command. -func (c cmdable) BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd { - args := make([]interface{}, 1+len(keys)+1) - args[0] = "bzpopmax" - for i, key := range keys { - args[1+i] = key - } - args[len(args)-1] = formatSec(ctx, timeout) - cmd := NewZWithKeyCmd(ctx, args...) - cmd.setReadTimeout(timeout) - _ = c(ctx, cmd) - return cmd -} - -// BZPopMin Redis `BZPOPMIN key [key ...] timeout` command. -func (c cmdable) BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd { - args := make([]interface{}, 1+len(keys)+1) - args[0] = "bzpopmin" - for i, key := range keys { - args[1+i] = key - } - args[len(args)-1] = formatSec(ctx, timeout) - cmd := NewZWithKeyCmd(ctx, args...) - cmd.setReadTimeout(timeout) - _ = c(ctx, cmd) - return cmd -} - -// ZAddArgs WARN: The GT, LT and NX options are mutually exclusive. -type ZAddArgs struct { - NX bool - XX bool - LT bool - GT bool - Ch bool - Members []Z -} - -func (c cmdable) zAddArgs(key string, args ZAddArgs, incr bool) []interface{} { - a := make([]interface{}, 0, 6+2*len(args.Members)) - a = append(a, "zadd", key) - - // The GT, LT and NX options are mutually exclusive. - if args.NX { - a = append(a, "nx") - } else { - if args.XX { - a = append(a, "xx") - } - if args.GT { - a = append(a, "gt") - } else if args.LT { - a = append(a, "lt") - } - } - if args.Ch { - a = append(a, "ch") - } - if incr { - a = append(a, "incr") - } - for _, m := range args.Members { - a = append(a, m.Score) - a = append(a, m.Member) - } - return a -} - -func (c cmdable) ZAddArgs(ctx context.Context, key string, args ZAddArgs) *IntCmd { - cmd := NewIntCmd(ctx, c.zAddArgs(key, args, false)...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZAddArgsIncr(ctx context.Context, key string, args ZAddArgs) *FloatCmd { - cmd := NewFloatCmd(ctx, c.zAddArgs(key, args, true)...) - _ = c(ctx, cmd) - return cmd -} - -// TODO: Compatible with v8 api, will be removed in v9. -func (c cmdable) zAdd(ctx context.Context, key string, args ZAddArgs, members ...*Z) *IntCmd { - args.Members = make([]Z, len(members)) - for i, m := range members { - args.Members[i] = *m - } - cmd := NewIntCmd(ctx, c.zAddArgs(key, args, false)...) - _ = c(ctx, cmd) - return cmd -} - -// ZAdd Redis `ZADD key score member [score member ...]` command. -func (c cmdable) ZAdd(ctx context.Context, key string, members ...*Z) *IntCmd { - return c.zAdd(ctx, key, ZAddArgs{}, members...) -} - -// ZAddNX Redis `ZADD key NX score member [score member ...]` command. -func (c cmdable) ZAddNX(ctx context.Context, key string, members ...*Z) *IntCmd { - return c.zAdd(ctx, key, ZAddArgs{ - NX: true, - }, members...) -} - -// ZAddXX Redis `ZADD key XX score member [score member ...]` command. -func (c cmdable) ZAddXX(ctx context.Context, key string, members ...*Z) *IntCmd { - return c.zAdd(ctx, key, ZAddArgs{ - XX: true, - }, members...) -} - -// ZAddCh Redis `ZADD key CH score member [score member ...]` command. -// Deprecated: Use -// client.ZAddArgs(ctx, ZAddArgs{ -// Ch: true, -// Members: []Z, -// }) -// remove in v9. -func (c cmdable) ZAddCh(ctx context.Context, key string, members ...*Z) *IntCmd { - return c.zAdd(ctx, key, ZAddArgs{ - Ch: true, - }, members...) -} - -// ZAddNXCh Redis `ZADD key NX CH score member [score member ...]` command. -// Deprecated: Use -// client.ZAddArgs(ctx, ZAddArgs{ -// NX: true, -// Ch: true, -// Members: []Z, -// }) -// remove in v9. -func (c cmdable) ZAddNXCh(ctx context.Context, key string, members ...*Z) *IntCmd { - return c.zAdd(ctx, key, ZAddArgs{ - NX: true, - Ch: true, - }, members...) -} - -// ZAddXXCh Redis `ZADD key XX CH score member [score member ...]` command. -// Deprecated: Use -// client.ZAddArgs(ctx, ZAddArgs{ -// XX: true, -// Ch: true, -// Members: []Z, -// }) -// remove in v9. -func (c cmdable) ZAddXXCh(ctx context.Context, key string, members ...*Z) *IntCmd { - return c.zAdd(ctx, key, ZAddArgs{ - XX: true, - Ch: true, - }, members...) -} - -// ZIncr Redis `ZADD key INCR score member` command. -// Deprecated: Use -// client.ZAddArgsIncr(ctx, ZAddArgs{ -// Members: []Z, -// }) -// remove in v9. -func (c cmdable) ZIncr(ctx context.Context, key string, member *Z) *FloatCmd { - return c.ZAddArgsIncr(ctx, key, ZAddArgs{ - Members: []Z{*member}, - }) -} - -// ZIncrNX Redis `ZADD key NX INCR score member` command. -// Deprecated: Use -// client.ZAddArgsIncr(ctx, ZAddArgs{ -// NX: true, -// Members: []Z, -// }) -// remove in v9. -func (c cmdable) ZIncrNX(ctx context.Context, key string, member *Z) *FloatCmd { - return c.ZAddArgsIncr(ctx, key, ZAddArgs{ - NX: true, - Members: []Z{*member}, - }) -} - -// ZIncrXX Redis `ZADD key XX INCR score member` command. -// Deprecated: Use -// client.ZAddArgsIncr(ctx, ZAddArgs{ -// XX: true, -// Members: []Z, -// }) -// remove in v9. -func (c cmdable) ZIncrXX(ctx context.Context, key string, member *Z) *FloatCmd { - return c.ZAddArgsIncr(ctx, key, ZAddArgs{ - XX: true, - Members: []Z{*member}, - }) -} - -func (c cmdable) ZCard(ctx context.Context, key string) *IntCmd { - cmd := NewIntCmd(ctx, "zcard", key) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZCount(ctx context.Context, key, min, max string) *IntCmd { - cmd := NewIntCmd(ctx, "zcount", key, min, max) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZLexCount(ctx context.Context, key, min, max string) *IntCmd { - cmd := NewIntCmd(ctx, "zlexcount", key, min, max) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd { - cmd := NewFloatCmd(ctx, "zincrby", key, increment, member) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd { - args := make([]interface{}, 0, 3+store.len()) - args = append(args, "zinterstore", destination, len(store.Keys)) - args = store.appendArgs(args) - cmd := NewIntCmd(ctx, args...) - cmd.SetFirstKeyPos(3) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZInter(ctx context.Context, store *ZStore) *StringSliceCmd { - args := make([]interface{}, 0, 2+store.len()) - args = append(args, "zinter", len(store.Keys)) - args = store.appendArgs(args) - cmd := NewStringSliceCmd(ctx, args...) - cmd.SetFirstKeyPos(2) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd { - args := make([]interface{}, 0, 3+store.len()) - args = append(args, "zinter", len(store.Keys)) - args = store.appendArgs(args) - args = append(args, "withscores") - cmd := NewZSliceCmd(ctx, args...) - cmd.SetFirstKeyPos(2) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd { - args := make([]interface{}, 2+len(members)) - args[0] = "zmscore" - args[1] = key - for i, member := range members { - args[2+i] = member - } - cmd := NewFloatSliceCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd { - args := []interface{}{ - "zpopmax", - key, - } - - switch len(count) { - case 0: - break - case 1: - args = append(args, count[0]) - default: - panic("too many arguments") - } - - cmd := NewZSliceCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd { - args := []interface{}{ - "zpopmin", - key, - } - - switch len(count) { - case 0: - break - case 1: - args = append(args, count[0]) - default: - panic("too many arguments") - } - - cmd := NewZSliceCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -// ZRangeArgs is all the options of the ZRange command. -// In version> 6.2.0, you can replace the(cmd): -// ZREVRANGE, -// ZRANGEBYSCORE, -// ZREVRANGEBYSCORE, -// ZRANGEBYLEX, -// ZREVRANGEBYLEX. -// Please pay attention to your redis-server version. -// -// Rev, ByScore, ByLex and Offset+Count options require redis-server 6.2.0 and higher. -type ZRangeArgs struct { - Key string - - // When the ByScore option is provided, the open interval(exclusive) can be set. - // By default, the score intervals specified by and are closed (inclusive). - // It is similar to the deprecated(6.2.0+) ZRangeByScore command. - // For example: - // ZRangeArgs{ - // Key: "example-key", - // Start: "(3", - // Stop: 8, - // ByScore: true, - // } - // cmd: "ZRange example-key (3 8 ByScore" (3 < score <= 8). - // - // For the ByLex option, it is similar to the deprecated(6.2.0+) ZRangeByLex command. - // You can set the and options as follows: - // ZRangeArgs{ - // Key: "example-key", - // Start: "[abc", - // Stop: "(def", - // ByLex: true, - // } - // cmd: "ZRange example-key [abc (def ByLex" - // - // For normal cases (ByScore==false && ByLex==false), and should be set to the index range (int). - // You can read the documentation for more information: https://redis.io/commands/zrange - Start interface{} - Stop interface{} - - // The ByScore and ByLex options are mutually exclusive. - ByScore bool - ByLex bool - - Rev bool - - // limit offset count. - Offset int64 - Count int64 -} - -func (z ZRangeArgs) appendArgs(args []interface{}) []interface{} { - // For Rev+ByScore/ByLex, we need to adjust the position of and . - if z.Rev && (z.ByScore || z.ByLex) { - args = append(args, z.Key, z.Stop, z.Start) - } else { - args = append(args, z.Key, z.Start, z.Stop) - } - - if z.ByScore { - args = append(args, "byscore") - } else if z.ByLex { - args = append(args, "bylex") - } - if z.Rev { - args = append(args, "rev") - } - if z.Offset != 0 || z.Count != 0 { - args = append(args, "limit", z.Offset, z.Count) - } - return args -} - -func (c cmdable) ZRangeArgs(ctx context.Context, z ZRangeArgs) *StringSliceCmd { - args := make([]interface{}, 0, 9) - args = append(args, "zrange") - args = z.appendArgs(args) - cmd := NewStringSliceCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZRangeArgsWithScores(ctx context.Context, z ZRangeArgs) *ZSliceCmd { - args := make([]interface{}, 0, 10) - args = append(args, "zrange") - args = z.appendArgs(args) - args = append(args, "withscores") - cmd := NewZSliceCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd { - return c.ZRangeArgs(ctx, ZRangeArgs{ - Key: key, - Start: start, - Stop: stop, - }) -} - -func (c cmdable) ZRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd { - return c.ZRangeArgsWithScores(ctx, ZRangeArgs{ - Key: key, - Start: start, - Stop: stop, - }) -} - -type ZRangeBy struct { - Min, Max string - Offset, Count int64 -} - -func (c cmdable) zRangeBy(ctx context.Context, zcmd, key string, opt *ZRangeBy, withScores bool) *StringSliceCmd { - args := []interface{}{zcmd, key, opt.Min, opt.Max} - if withScores { - args = append(args, "withscores") - } - if opt.Offset != 0 || opt.Count != 0 { - args = append( - args, - "limit", - opt.Offset, - opt.Count, - ) - } - cmd := NewStringSliceCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd { - return c.zRangeBy(ctx, "zrangebyscore", key, opt, false) -} - -func (c cmdable) ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd { - return c.zRangeBy(ctx, "zrangebylex", key, opt, false) -} - -func (c cmdable) ZRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd { - args := []interface{}{"zrangebyscore", key, opt.Min, opt.Max, "withscores"} - if opt.Offset != 0 || opt.Count != 0 { - args = append( - args, - "limit", - opt.Offset, - opt.Count, - ) - } - cmd := NewZSliceCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZRangeStore(ctx context.Context, dst string, z ZRangeArgs) *IntCmd { - args := make([]interface{}, 0, 10) - args = append(args, "zrangestore", dst) - args = z.appendArgs(args) - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZRank(ctx context.Context, key, member string) *IntCmd { - cmd := NewIntCmd(ctx, "zrank", key, member) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd { - args := make([]interface{}, 2, 2+len(members)) - args[0] = "zrem" - args[1] = key - args = appendArgs(args, members) - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd { - cmd := NewIntCmd( - ctx, - "zremrangebyrank", - key, - start, - stop, - ) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZRemRangeByScore(ctx context.Context, key, min, max string) *IntCmd { - cmd := NewIntCmd(ctx, "zremrangebyscore", key, min, max) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZRemRangeByLex(ctx context.Context, key, min, max string) *IntCmd { - cmd := NewIntCmd(ctx, "zremrangebylex", key, min, max) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZRevRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd { - cmd := NewStringSliceCmd(ctx, "zrevrange", key, start, stop) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZRevRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd { - cmd := NewZSliceCmd(ctx, "zrevrange", key, start, stop, "withscores") - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) zRevRangeBy(ctx context.Context, zcmd, key string, opt *ZRangeBy) *StringSliceCmd { - args := []interface{}{zcmd, key, opt.Max, opt.Min} - if opt.Offset != 0 || opt.Count != 0 { - args = append( - args, - "limit", - opt.Offset, - opt.Count, - ) - } - cmd := NewStringSliceCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZRevRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd { - return c.zRevRangeBy(ctx, "zrevrangebyscore", key, opt) -} - -func (c cmdable) ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd { - return c.zRevRangeBy(ctx, "zrevrangebylex", key, opt) -} - -func (c cmdable) ZRevRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd { - args := []interface{}{"zrevrangebyscore", key, opt.Max, opt.Min, "withscores"} - if opt.Offset != 0 || opt.Count != 0 { - args = append( - args, - "limit", - opt.Offset, - opt.Count, - ) - } - cmd := NewZSliceCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZRevRank(ctx context.Context, key, member string) *IntCmd { - cmd := NewIntCmd(ctx, "zrevrank", key, member) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZScore(ctx context.Context, key, member string) *FloatCmd { - cmd := NewFloatCmd(ctx, "zscore", key, member) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZUnion(ctx context.Context, store ZStore) *StringSliceCmd { - args := make([]interface{}, 0, 2+store.len()) - args = append(args, "zunion", len(store.Keys)) - args = store.appendArgs(args) - cmd := NewStringSliceCmd(ctx, args...) - cmd.SetFirstKeyPos(2) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZUnionWithScores(ctx context.Context, store ZStore) *ZSliceCmd { - args := make([]interface{}, 0, 3+store.len()) - args = append(args, "zunion", len(store.Keys)) - args = store.appendArgs(args) - args = append(args, "withscores") - cmd := NewZSliceCmd(ctx, args...) - cmd.SetFirstKeyPos(2) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd { - args := make([]interface{}, 0, 3+store.len()) - args = append(args, "zunionstore", dest, len(store.Keys)) - args = store.appendArgs(args) - cmd := NewIntCmd(ctx, args...) - cmd.SetFirstKeyPos(3) - _ = c(ctx, cmd) - return cmd -} - -// ZRandMember redis-server version >= 6.2.0. -func (c cmdable) ZRandMember(ctx context.Context, key string, count int, withScores bool) *StringSliceCmd { - args := make([]interface{}, 0, 4) - - // Although count=0 is meaningless, redis accepts count=0. - args = append(args, "zrandmember", key, count) - if withScores { - args = append(args, "withscores") - } - - cmd := NewStringSliceCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -// ZDiff redis-server version >= 6.2.0. -func (c cmdable) ZDiff(ctx context.Context, keys ...string) *StringSliceCmd { - args := make([]interface{}, 2+len(keys)) - args[0] = "zdiff" - args[1] = len(keys) - for i, key := range keys { - args[i+2] = key - } - - cmd := NewStringSliceCmd(ctx, args...) - cmd.SetFirstKeyPos(2) - _ = c(ctx, cmd) - return cmd -} - -// ZDiffWithScores redis-server version >= 6.2.0. -func (c cmdable) ZDiffWithScores(ctx context.Context, keys ...string) *ZSliceCmd { - args := make([]interface{}, 3+len(keys)) - args[0] = "zdiff" - args[1] = len(keys) - for i, key := range keys { - args[i+2] = key - } - args[len(keys)+2] = "withscores" - - cmd := NewZSliceCmd(ctx, args...) - cmd.SetFirstKeyPos(2) - _ = c(ctx, cmd) - return cmd -} - -// ZDiffStore redis-server version >=6.2.0. -func (c cmdable) ZDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd { - args := make([]interface{}, 0, 3+len(keys)) - args = append(args, "zdiffstore", destination, len(keys)) - for _, key := range keys { - args = append(args, key) - } - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -//------------------------------------------------------------------------------ - -func (c cmdable) PFAdd(ctx context.Context, key string, els ...interface{}) *IntCmd { - args := make([]interface{}, 2, 2+len(els)) - args[0] = "pfadd" - args[1] = key - args = appendArgs(args, els) - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) PFCount(ctx context.Context, keys ...string) *IntCmd { - args := make([]interface{}, 1+len(keys)) - args[0] = "pfcount" - for i, key := range keys { - args[1+i] = key - } - cmd := NewIntCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) PFMerge(ctx context.Context, dest string, keys ...string) *StatusCmd { - args := make([]interface{}, 2+len(keys)) - args[0] = "pfmerge" - args[1] = dest - for i, key := range keys { - args[2+i] = key - } - cmd := NewStatusCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -//------------------------------------------------------------------------------ - -func (c cmdable) BgRewriteAOF(ctx context.Context) *StatusCmd { - cmd := NewStatusCmd(ctx, "bgrewriteaof") - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) BgSave(ctx context.Context) *StatusCmd { - cmd := NewStatusCmd(ctx, "bgsave") - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) ClientKill(ctx context.Context, ipPort string) *StatusCmd { - cmd := NewStatusCmd(ctx, "client", "kill", ipPort) - _ = c(ctx, cmd) - return cmd -} - -// ClientKillByFilter is new style syntax, while the ClientKill is old -// -// CLIENT KILL

+ See changes to v1.14.x + +* Feb 22, 2022 (v1.14.4) + * flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503) + * zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502) + * zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501 + * huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500) + +* Feb 17, 2022 (v1.14.3) + * flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478) + * flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483) + * s2: Faster compression with Go v1.18 and amd64 microarch level 3+. [#484](https://github.com/klauspost/compress/pull/484) [#486](https://github.com/klauspost/compress/pull/486) + +* Jan 25, 2022 (v1.14.2) + * zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476) + * zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469) + * zstd: Remove unused decompression buffer [#470](https://github.com/klauspost/compress/pull/470) + * zstd: Fix logically dead code by @ningmingxiao [#472](https://github.com/klauspost/compress/pull/472) + * flate: Improve level 7-9 [#471](https://github.com/klauspost/compress/pull/471) [#473](https://github.com/klauspost/compress/pull/473) + * zstd: Add noasm tag for xxhash [#475](https://github.com/klauspost/compress/pull/475) + +* Jan 11, 2022 (v1.14.1) + * s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462) + * flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458) + * zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468) + * zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464) + * Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445) +
+ +
+ See changes to v1.13.x + +* Aug 30, 2021 (v1.13.5) + * gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425) + * s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413) + * zstd: pooledZipWriter should return Writers to the same pool [#426](https://github.com/klauspost/compress/pull/426) + * Removed golang/snappy as external dependency for tests [#421](https://github.com/klauspost/compress/pull/421) + +* Aug 12, 2021 (v1.13.4) + * Add [snappy replacement package](https://github.com/klauspost/compress/tree/master/snappy). + * zstd: Fix incorrect encoding in "best" mode [#415](https://github.com/klauspost/compress/pull/415) + +* Aug 3, 2021 (v1.13.3) + * zstd: Improve Best compression [#404](https://github.com/klauspost/compress/pull/404) + * zstd: Fix WriteTo error forwarding [#411](https://github.com/klauspost/compress/pull/411) + * gzhttp: Return http.HandlerFunc instead of http.Handler. Unlikely breaking change. [#406](https://github.com/klauspost/compress/pull/406) + * s2sx: Fix max size error [#399](https://github.com/klauspost/compress/pull/399) + * zstd: Add optional stream content size on reset [#401](https://github.com/klauspost/compress/pull/401) + * zstd: use SpeedBestCompression for level >= 10 [#410](https://github.com/klauspost/compress/pull/410) + +* Jun 14, 2021 (v1.13.1) + * s2: Add full Snappy output support [#396](https://github.com/klauspost/compress/pull/396) + * zstd: Add configurable [Decoder window](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithDecoderMaxWindow) size [#394](https://github.com/klauspost/compress/pull/394) + * gzhttp: Add header to skip compression [#389](https://github.com/klauspost/compress/pull/389) + * s2: Improve speed with bigger output margin [#395](https://github.com/klauspost/compress/pull/395) + +* Jun 3, 2021 (v1.13.0) + * Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors. + * zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382) + * zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380) +
+ + +
+ See changes to v1.12.x + +* May 25, 2021 (v1.12.3) + * deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374) + * deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375) + * zstd: Forward read errors [#373](https://github.com/klauspost/compress/pull/373) + +* Apr 27, 2021 (v1.12.2) + * zstd: Improve better/best compression [#360](https://github.com/klauspost/compress/pull/360) [#364](https://github.com/klauspost/compress/pull/364) [#365](https://github.com/klauspost/compress/pull/365) + * zstd: Add helpers to compress/decompress zstd inside zip files [#363](https://github.com/klauspost/compress/pull/363) + * deflate: Improve level 5+6 compression [#367](https://github.com/klauspost/compress/pull/367) + * s2: Improve better/best compression [#358](https://github.com/klauspost/compress/pull/358) [#359](https://github.com/klauspost/compress/pull/358) + * s2: Load after checking src limit on amd64. [#362](https://github.com/klauspost/compress/pull/362) + * s2sx: Limit max executable size [#368](https://github.com/klauspost/compress/pull/368) + +* Apr 14, 2021 (v1.12.1) + * snappy package removed. Upstream added as dependency. + * s2: Better compression in "best" mode [#353](https://github.com/klauspost/compress/pull/353) + * s2sx: Add stdin input and detect pre-compressed from signature [#352](https://github.com/klauspost/compress/pull/352) + * s2c/s2d: Add http as possible input [#348](https://github.com/klauspost/compress/pull/348) + * s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352) + * zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346) + * s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349) +
+ +
+ See changes to v1.11.x + +* Mar 26, 2021 (v1.11.13) + * zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345) + * zstd: Add [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) encoder option [#336](https://github.com/klauspost/compress/pull/336) + * deflate: Improve entropy compression [#338](https://github.com/klauspost/compress/pull/338) + * s2: Clean up and minor performance improvement in best [#341](https://github.com/klauspost/compress/pull/341) + +* Mar 5, 2021 (v1.11.12) + * s2: Add `s2sx` binary that creates [self extracting archives](https://github.com/klauspost/compress/tree/master/s2#s2sx-self-extracting-archives). + * s2: Speed up decompression on non-assembly platforms [#328](https://github.com/klauspost/compress/pull/328) + +* Mar 1, 2021 (v1.11.9) + * s2: Add ARM64 decompression assembly. Around 2x output speed. [#324](https://github.com/klauspost/compress/pull/324) + * s2: Improve "better" speed and efficiency. [#325](https://github.com/klauspost/compress/pull/325) + * s2: Fix binaries. + +* Feb 25, 2021 (v1.11.8) + * s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended. + * s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315) + * s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322) + * zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314) + * zip: Fix zip64 headers. [#313](https://github.com/klauspost/compress/pull/313) + +* Jan 14, 2021 (v1.11.7) + * Use Bytes() interface to get bytes across packages. [#309](https://github.com/klauspost/compress/pull/309) + * s2: Add 'best' compression option. [#310](https://github.com/klauspost/compress/pull/310) + * s2: Add ReaderMaxBlockSize, changes `s2.NewReader` signature to include varargs. [#311](https://github.com/klauspost/compress/pull/311) + * s2: Fix crash on small better buffers. [#308](https://github.com/klauspost/compress/pull/308) + * s2: Clean up decoder. [#312](https://github.com/klauspost/compress/pull/312) + +* Jan 7, 2021 (v1.11.6) + * zstd: Make decoder allocations smaller [#306](https://github.com/klauspost/compress/pull/306) + * zstd: Free Decoder resources when Reset is called with a nil io.Reader [#305](https://github.com/klauspost/compress/pull/305) + +* Dec 20, 2020 (v1.11.4) + * zstd: Add Best compression mode [#304](https://github.com/klauspost/compress/pull/304) + * Add header decoder [#299](https://github.com/klauspost/compress/pull/299) + * s2: Add uncompressed stream option [#297](https://github.com/klauspost/compress/pull/297) + * Simplify/speed up small blocks with known max size. [#300](https://github.com/klauspost/compress/pull/300) + * zstd: Always reset literal dict encoder [#303](https://github.com/klauspost/compress/pull/303) + +* Nov 15, 2020 (v1.11.3) + * inflate: 10-15% faster decompression [#293](https://github.com/klauspost/compress/pull/293) + * zstd: Tweak DecodeAll default allocation [#295](https://github.com/klauspost/compress/pull/295) + +* Oct 11, 2020 (v1.11.2) + * s2: Fix out of bounds read in "better" block compression [#291](https://github.com/klauspost/compress/pull/291) + +* Oct 1, 2020 (v1.11.1) + * zstd: Set allLitEntropy true in default configuration [#286](https://github.com/klauspost/compress/pull/286) + +* Sept 8, 2020 (v1.11.0) + * zstd: Add experimental compression [dictionaries](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) [#281](https://github.com/klauspost/compress/pull/281) + * zstd: Fix mixed Write and ReadFrom calls [#282](https://github.com/klauspost/compress/pull/282) + * inflate/gz: Limit variable shifts, ~5% faster decompression [#274](https://github.com/klauspost/compress/pull/274) +
+ +
+ See changes to v1.10.x + +* July 8, 2020 (v1.10.11) + * zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278) + * huff0: Also populate compression table when reading decoding table. [#275](https://github.com/klauspost/compress/pull/275) + +* June 23, 2020 (v1.10.10) + * zstd: Skip entropy compression in fastest mode when no matches. [#270](https://github.com/klauspost/compress/pull/270) + +* June 16, 2020 (v1.10.9): + * zstd: API change for specifying dictionaries. See [#268](https://github.com/klauspost/compress/pull/268) + * zip: update CreateHeaderRaw to handle zip64 fields. [#266](https://github.com/klauspost/compress/pull/266) + * Fuzzit tests removed. The service has been purchased and is no longer available. + +* June 5, 2020 (v1.10.8): + * 1.15x faster zstd block decompression. [#265](https://github.com/klauspost/compress/pull/265) + +* June 1, 2020 (v1.10.7): + * Added zstd decompression [dictionary support](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) + * Increase zstd decompression speed up to 1.19x. [#259](https://github.com/klauspost/compress/pull/259) + * Remove internal reset call in zstd compression and reduce allocations. [#263](https://github.com/klauspost/compress/pull/263) + +* May 21, 2020: (v1.10.6) + * zstd: Reduce allocations while decoding. [#258](https://github.com/klauspost/compress/pull/258), [#252](https://github.com/klauspost/compress/pull/252) + * zstd: Stricter decompression checks. + +* April 12, 2020: (v1.10.5) + * s2-commands: Flush output when receiving SIGINT. [#239](https://github.com/klauspost/compress/pull/239) + +* Apr 8, 2020: (v1.10.4) + * zstd: Minor/special case optimizations. [#251](https://github.com/klauspost/compress/pull/251), [#250](https://github.com/klauspost/compress/pull/250), [#249](https://github.com/klauspost/compress/pull/249), [#247](https://github.com/klauspost/compress/pull/247) +* Mar 11, 2020: (v1.10.3) + * s2: Use S2 encoder in pure Go mode for Snappy output as well. [#245](https://github.com/klauspost/compress/pull/245) + * s2: Fix pure Go block encoder. [#244](https://github.com/klauspost/compress/pull/244) + * zstd: Added "better compression" mode. [#240](https://github.com/klauspost/compress/pull/240) + * zstd: Improve speed of fastest compression mode by 5-10% [#241](https://github.com/klauspost/compress/pull/241) + * zstd: Skip creating encoders when not needed. [#238](https://github.com/klauspost/compress/pull/238) + +* Feb 27, 2020: (v1.10.2) + * Close to 50% speedup in inflate (gzip/zip decompression). [#236](https://github.com/klauspost/compress/pull/236) [#234](https://github.com/klauspost/compress/pull/234) [#232](https://github.com/klauspost/compress/pull/232) + * Reduce deflate level 1-6 memory usage up to 59%. [#227](https://github.com/klauspost/compress/pull/227) + +* Feb 18, 2020: (v1.10.1) + * Fix zstd crash when resetting multiple times without sending data. [#226](https://github.com/klauspost/compress/pull/226) + * deflate: Fix dictionary use on level 1-6. [#224](https://github.com/klauspost/compress/pull/224) + * Remove deflate writer reference when closing. [#224](https://github.com/klauspost/compress/pull/224) + +* Feb 4, 2020: (v1.10.0) + * Add optional dictionary to [stateless deflate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc#StatelessDeflate). Breaking change, send `nil` for previous behaviour. [#216](https://github.com/klauspost/compress/pull/216) + * Fix buffer overflow on repeated small block deflate. [#218](https://github.com/klauspost/compress/pull/218) + * Allow copying content from an existing ZIP file without decompressing+compressing. [#214](https://github.com/klauspost/compress/pull/214) + * Added [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) AMD64 assembler and various optimizations. Stream speed >10GB/s. [#186](https://github.com/klauspost/compress/pull/186) + +
+ +
+ See changes prior to v1.10.0 + +* Jan 20,2020 (v1.9.8) Optimize gzip/deflate with better size estimates and faster table generation. [#207](https://github.com/klauspost/compress/pull/207) by [luyu6056](https://github.com/luyu6056), [#206](https://github.com/klauspost/compress/pull/206). +* Jan 11, 2020: S2 Encode/Decode will use provided buffer if capacity is big enough. [#204](https://github.com/klauspost/compress/pull/204) +* Jan 5, 2020: (v1.9.7) Fix another zstd regression in v1.9.5 - v1.9.6 removed. +* Jan 4, 2020: (v1.9.6) Regression in v1.9.5 fixed causing corrupt zstd encodes in rare cases. +* Jan 4, 2020: Faster IO in [s2c + s2d commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) compression/decompression. [#192](https://github.com/klauspost/compress/pull/192) +* Dec 29, 2019: Removed v1.9.5 since fuzz tests showed a compatibility problem with the reference zstandard decoder. +* Dec 29, 2019: (v1.9.5) zstd: 10-20% faster block compression. [#199](https://github.com/klauspost/compress/pull/199) +* Dec 29, 2019: [zip](https://godoc.org/github.com/klauspost/compress/zip) package updated with latest Go features +* Dec 29, 2019: zstd: Single segment flag condintions tweaked. [#197](https://github.com/klauspost/compress/pull/197) +* Dec 18, 2019: s2: Faster compression when ReadFrom is used. [#198](https://github.com/klauspost/compress/pull/198) +* Dec 10, 2019: s2: Fix repeat length output when just above at 16MB limit. +* Dec 10, 2019: zstd: Add function to get decoder as io.ReadCloser. [#191](https://github.com/klauspost/compress/pull/191) +* Dec 3, 2019: (v1.9.4) S2: limit max repeat length. [#188](https://github.com/klauspost/compress/pull/188) +* Dec 3, 2019: Add [WithNoEntropyCompression](https://godoc.org/github.com/klauspost/compress/zstd#WithNoEntropyCompression) to zstd [#187](https://github.com/klauspost/compress/pull/187) +* Dec 3, 2019: Reduce memory use for tests. Check for leaked goroutines. +* Nov 28, 2019 (v1.9.3) Less allocations in stateless deflate. +* Nov 28, 2019: 5-20% Faster huff0 decode. Impacts zstd as well. [#184](https://github.com/klauspost/compress/pull/184) +* Nov 12, 2019 (v1.9.2) Added [Stateless Compression](#stateless-compression) for gzip/deflate. +* Nov 12, 2019: Fixed zstd decompression of large single blocks. [#180](https://github.com/klauspost/compress/pull/180) +* Nov 11, 2019: Set default [s2c](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) block size to 4MB. +* Nov 11, 2019: Reduce inflate memory use by 1KB. +* Nov 10, 2019: Less allocations in deflate bit writer. +* Nov 10, 2019: Fix inconsistent error returned by zstd decoder. +* Oct 28, 2019 (v1.9.1) ztsd: Fix crash when compressing blocks. [#174](https://github.com/klauspost/compress/pull/174) +* Oct 24, 2019 (v1.9.0) zstd: Fix rare data corruption [#173](https://github.com/klauspost/compress/pull/173) +* Oct 24, 2019 zstd: Fix huff0 out of buffer write [#171](https://github.com/klauspost/compress/pull/171) and always return errors [#172](https://github.com/klauspost/compress/pull/172) +* Oct 10, 2019: Big deflate rewrite, 30-40% faster with better compression [#105](https://github.com/klauspost/compress/pull/105) + +
+ +
+ See changes prior to v1.9.0 + +* Oct 10, 2019: (v1.8.6) zstd: Allow partial reads to get flushed data. [#169](https://github.com/klauspost/compress/pull/169) +* Oct 3, 2019: Fix inconsistent results on broken zstd streams. +* Sep 25, 2019: Added `-rm` (remove source files) and `-q` (no output except errors) to `s2c` and `s2d` [commands](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) +* Sep 16, 2019: (v1.8.4) Add `s2c` and `s2d` [commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools). +* Sep 10, 2019: (v1.8.3) Fix s2 decoder [Skip](https://godoc.org/github.com/klauspost/compress/s2#Reader.Skip). +* Sep 7, 2019: zstd: Added [WithWindowSize](https://godoc.org/github.com/klauspost/compress/zstd#WithWindowSize), contributed by [ianwilkes](https://github.com/ianwilkes). +* Sep 5, 2019: (v1.8.2) Add [WithZeroFrames](https://godoc.org/github.com/klauspost/compress/zstd#WithZeroFrames) which adds full zero payload block encoding option. +* Sep 5, 2019: Lazy initialization of zstandard predefined en/decoder tables. +* Aug 26, 2019: (v1.8.1) S2: 1-2% compression increase in "better" compression mode. +* Aug 26, 2019: zstd: Check maximum size of Huffman 1X compressed literals while decoding. +* Aug 24, 2019: (v1.8.0) Added [S2 compression](https://github.com/klauspost/compress/tree/master/s2#s2-compression), a high performance replacement for Snappy. +* Aug 21, 2019: (v1.7.6) Fixed minor issues found by fuzzer. One could lead to zstd not decompressing. +* Aug 18, 2019: Add [fuzzit](https://fuzzit.dev/) continuous fuzzing. +* Aug 14, 2019: zstd: Skip incompressible data 2x faster. [#147](https://github.com/klauspost/compress/pull/147) +* Aug 4, 2019 (v1.7.5): Better literal compression. [#146](https://github.com/klauspost/compress/pull/146) +* Aug 4, 2019: Faster zstd compression. [#143](https://github.com/klauspost/compress/pull/143) [#144](https://github.com/klauspost/compress/pull/144) +* Aug 4, 2019: Faster zstd decompression. [#145](https://github.com/klauspost/compress/pull/145) [#143](https://github.com/klauspost/compress/pull/143) [#142](https://github.com/klauspost/compress/pull/142) +* July 15, 2019 (v1.7.4): Fix double EOF block in rare cases on zstd encoder. +* July 15, 2019 (v1.7.3): Minor speedup/compression increase in default zstd encoder. +* July 14, 2019: zstd decoder: Fix decompression error on multiple uses with mixed content. +* July 7, 2019 (v1.7.2): Snappy update, zstd decoder potential race fix. +* June 17, 2019: zstd decompression bugfix. +* June 17, 2019: fix 32 bit builds. +* June 17, 2019: Easier use in modules (less dependencies). +* June 9, 2019: New stronger "default" [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression mode. Matches zstd default compression ratio. +* June 5, 2019: 20-40% throughput in [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and better compression. +* June 5, 2019: deflate/gzip compression: Reduce memory usage of lower compression levels. +* June 2, 2019: Added [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression! +* May 25, 2019: deflate/gzip: 10% faster bit writer, mostly visible in lower levels. +* Apr 22, 2019: [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) decompression added. +* Aug 1, 2018: Added [huff0 README](https://github.com/klauspost/compress/tree/master/huff0#huff0-entropy-compression). +* Jul 8, 2018: Added [Performance Update 2018](#performance-update-2018) below. +* Jun 23, 2018: Merged [Go 1.11 inflate optimizations](https://go-review.googlesource.com/c/go/+/102235). Go 1.9 is now required. Backwards compatible version tagged with [v1.3.0](https://github.com/klauspost/compress/releases/tag/v1.3.0). +* Apr 2, 2018: Added [huff0](https://godoc.org/github.com/klauspost/compress/huff0) en/decoder. Experimental for now, API may change. +* Mar 4, 2018: Added [FSE Entropy](https://godoc.org/github.com/klauspost/compress/fse) en/decoder. Experimental for now, API may change. +* Nov 3, 2017: Add compression [Estimate](https://godoc.org/github.com/klauspost/compress#Estimate) function. +* May 28, 2017: Reduce allocations when resetting decoder. +* Apr 02, 2017: Change back to official crc32, since changes were merged in Go 1.7. +* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625](https://github.com/golang/go/issues/18625). +* Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before. +* Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update. +* Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level. +* Mar 24, 2016: Always attempt Huffman encoding on level 4-7. This improves base 64 encoded data compression. +* Mar 24, 2016: Small speedup for level 1-3. +* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. +* Feb 19, 2016: Handle small payloads faster in level 1-3. +* Feb 19, 2016: Added faster level 2 + 3 compression modes. +* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5. +* Feb 14, 2016: Snappy: Merge upstream changes. +* Feb 14, 2016: Snappy: Fix aggressive skipping. +* Feb 14, 2016: Snappy: Update benchmark. +* Feb 13, 2016: Deflate: Fixed assembler problem that could lead to sub-optimal compression. +* Feb 12, 2016: Snappy: Added AMD64 SSE 4.2 optimizations to matching, which makes easy to compress material run faster. Typical speedup is around 25%. +* Feb 9, 2016: Added Snappy package fork. This version is 5-7% faster, much more on hard to compress content. +* Jan 30, 2016: Optimize level 1 to 3 by not considering static dictionary or storing uncompressed. ~4-5% speedup. +* Jan 16, 2016: Optimization on deflate level 1,2,3 compression. +* Jan 8 2016: Merge [CL 18317](https://go-review.googlesource.com/#/c/18317): fix reading, writing of zip64 archives. +* Dec 8 2015: Make level 1 and -2 deterministic even if write size differs. +* Dec 8 2015: Split encoding functions, so hashing and matching can potentially be inlined. 1-3% faster on AMD64. 5% faster on other platforms. +* Dec 8 2015: Fixed rare [one byte out-of bounds read](https://github.com/klauspost/compress/issues/20). Please update! +* Nov 23 2015: Optimization on token writer. ~2-4% faster. Contributed by [@dsnet](https://github.com/dsnet). +* Nov 20 2015: Small optimization to bit writer on 64 bit systems. +* Nov 17 2015: Fixed out-of-bound errors if the underlying Writer returned an error. See [#15](https://github.com/klauspost/compress/issues/15). +* Nov 12 2015: Added [io.WriterTo](https://golang.org/pkg/io/#WriterTo) support to gzip/inflate. +* Nov 11 2015: Merged [CL 16669](https://go-review.googlesource.com/#/c/16669/4): archive/zip: enable overriding (de)compressors per file +* Oct 15 2015: Added skipping on uncompressible data. Random data speed up >5x. + +
+ +# deflate usage + +The packages are drop-in replacements for standard libraries. Simply replace the import path to use them: + +| old import | new import | Documentation +|--------------------|-----------------------------------------|--------------------| +| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) +| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) +| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) +| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) + +* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). + +You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages. + +The packages contains the same as the standard library, so you can use the godoc for that: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/). + +Currently there is only minor speedup on decompression (mostly CRC32 calculation). + +Memory usage is typically 1MB for a Writer. stdlib is in the same range. +If you expect to have a lot of concurrently allocated Writers consider using +the stateless compress described below. + +For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). + +# Stateless compression + +This package offers stateless compression as a special option for gzip/deflate. +It will do compression but without maintaining any state between Write calls. + +This means there will be no memory kept between Write calls, but compression and speed will be suboptimal. + +This is only relevant in cases where you expect to run many thousands of compressors concurrently, +but with very little activity. This is *not* intended for regular web servers serving individual requests. + +Because of this, the size of actual Write calls will affect output size. + +In gzip, specify level `-3` / `gzip.StatelessCompression` to enable. + +For direct deflate use, NewStatelessWriter and StatelessDeflate are available. See [documentation](https://godoc.org/github.com/klauspost/compress/flate#NewStatelessWriter) + +A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer: + +``` + // replace 'ioutil.Discard' with your output. + gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression) + if err != nil { + return err + } + defer gzw.Close() + + w := bufio.NewWriterSize(gzw, 4096) + defer w.Flush() + + // Write to 'w' +``` + +This will only use up to 4KB in memory when the writer is idle. + +Compression is almost always worse than the fastest compression level +and each write will allocate (a little) memory. + +# Performance Update 2018 + +It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD. + +The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet. + +The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input. + +The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet). + + +## Overall differences. + +There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels. + +The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library. + +This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression. + +There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab. + +## Web Content + +This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS. + +Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big. + +Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case. + +## Object files + +This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible. + +The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression. + +The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively. + +## Highly Compressible File + +This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc. + +It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression. + +So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground". + +## Medium-High Compressible + +This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams. + +We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both. + +## Medium Compressible + +I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario. + +The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior. + + +## Un-compressible Content + +This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections. + + +## Huffman only compression + +This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character. + +This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM). + +Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core. + +The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%). + +The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup. + +For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/). + +This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip. + +# Other packages + +Here are other packages of good quality and pure Go (no cgo wrappers or autoconverted code): + +* [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression. +* [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression. +* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer. + +# license + +This code is licensed under the same conditions as the original Go code. See LICENSE file. diff --git a/vendor/github.com/klauspost/compress/compressible.go b/vendor/github.com/klauspost/compress/compressible.go new file mode 100644 index 000000000..ea5a692d5 --- /dev/null +++ b/vendor/github.com/klauspost/compress/compressible.go @@ -0,0 +1,85 @@ +package compress + +import "math" + +// Estimate returns a normalized compressibility estimate of block b. +// Values close to zero are likely uncompressible. +// Values above 0.1 are likely to be compressible. +// Values above 0.5 are very compressible. +// Very small lengths will return 0. +func Estimate(b []byte) float64 { + if len(b) < 16 { + return 0 + } + + // Correctly predicted order 1 + hits := 0 + lastMatch := false + var o1 [256]byte + var hist [256]int + c1 := byte(0) + for _, c := range b { + if c == o1[c1] { + // We only count a hit if there was two correct predictions in a row. + if lastMatch { + hits++ + } + lastMatch = true + } else { + lastMatch = false + } + o1[c1] = c + c1 = c + hist[c]++ + } + + // Use x^0.6 to give better spread + prediction := math.Pow(float64(hits)/float64(len(b)), 0.6) + + // Calculate histogram distribution + variance := float64(0) + avg := float64(len(b)) / 256 + + for _, v := range hist { + Δ := float64(v) - avg + variance += Δ * Δ + } + + stddev := math.Sqrt(float64(variance)) / float64(len(b)) + exp := math.Sqrt(1 / float64(len(b))) + + // Subtract expected stddev + stddev -= exp + if stddev < 0 { + stddev = 0 + } + stddev *= 1 + exp + + // Use x^0.4 to give better spread + entropy := math.Pow(stddev, 0.4) + + // 50/50 weight between prediction and histogram distribution + return math.Pow((prediction+entropy)/2, 0.9) +} + +// ShannonEntropyBits returns the number of bits minimum required to represent +// an entropy encoding of the input bytes. +// https://en.wiktionary.org/wiki/Shannon_entropy +func ShannonEntropyBits(b []byte) int { + if len(b) == 0 { + return 0 + } + var hist [256]int + for _, c := range b { + hist[c]++ + } + shannon := float64(0) + invTotal := 1.0 / float64(len(b)) + for _, v := range hist[:] { + if v > 0 { + n := float64(v) + shannon += math.Ceil(-math.Log2(n*invTotal) * n) + } + } + return int(math.Ceil(shannon)) +} diff --git a/vendor/github.com/klauspost/compress/fse/README.md b/vendor/github.com/klauspost/compress/fse/README.md new file mode 100644 index 000000000..ea7324da6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/README.md @@ -0,0 +1,79 @@ +# Finite State Entropy + +This package provides Finite State Entropy encoding and decoding. + +Finite State Entropy (also referenced as [tANS](https://en.wikipedia.org/wiki/Asymmetric_numeral_systems#tANS)) +encoding provides a fast near-optimal symbol encoding/decoding +for byte blocks as implemented in [zstandard](https://github.com/facebook/zstd). + +This can be used for compressing input with a lot of similar input values to the smallest number of bytes. +This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, +but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. + +* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/fse) + +## News + + * Feb 2018: First implementation released. Consider this beta software for now. + +# Usage + +This package provides a low level interface that allows to compress single independent blocks. + +Each block is separate, and there is no built in integrity checks. +This means that the caller should keep track of block sizes and also do checksums if needed. + +Compressing a block is done via the [`Compress`](https://godoc.org/github.com/klauspost/compress/fse#Compress) function. +You must provide input and will receive the output and maybe an error. + +These error values can be returned: + +| Error | Description | +|---------------------|-----------------------------------------------------------------------------| +| `` | Everything ok, output is returned | +| `ErrIncompressible` | Returned when input is judged to be too hard to compress | +| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | +| `(error)` | An internal error occurred. | + +As can be seen above there are errors that will be returned even under normal operation so it is important to handle these. + +To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/fse#Scratch) object +that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same +object can be used for both. + +Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this +you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. + +Decompressing is done by calling the [`Decompress`](https://godoc.org/github.com/klauspost/compress/fse#Decompress) function. +You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back +your input was likely corrupted. + +It is important to note that a successful decoding does *not* mean your output matches your original input. +There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. + +For more detailed usage, see examples in the [godoc documentation](https://godoc.org/github.com/klauspost/compress/fse#pkg-examples). + +# Performance + +A lot of factors are affecting speed. Block sizes and compressibility of the material are primary factors. +All compression functions are currently only running on the calling goroutine so only one core will be used per block. + +The compressor is significantly faster if symbols are kept as small as possible. The highest byte value of the input +is used to reduce some of the processing, so if all your input is above byte value 64 for instance, it may be +beneficial to transpose all your input values down by 64. + +With moderate block sizes around 64k speed are typically 200MB/s per core for compression and +around 300MB/s decompression speed. + +The same hardware typically does Huffman (deflate) encoding at 125MB/s and decompression at 100MB/s. + +# Plans + +At one point, more internals will be exposed to facilitate more "expert" usage of the components. + +A streaming interface is also likely to be implemented. Likely compatible with [FSE stream format](https://github.com/Cyan4973/FiniteStateEntropy/blob/dev/programs/fileio.c#L261). + +# Contributing + +Contributions are always welcome. Be aware that adding public functions will require good justification and breaking +changes will likely not be accepted. If in doubt open an issue before writing the PR. \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/bitreader.go b/vendor/github.com/klauspost/compress/fse/bitreader.go new file mode 100644 index 000000000..f65eb3909 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bitreader.go @@ -0,0 +1,122 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import ( + "encoding/binary" + "errors" + "io" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBits(uint32(v))) + return nil +} + +// getBits will return n bits. n can be 0. +func (b *bitReader) getBits(n uint8) uint16 { + if n == 0 || b.bitsRead >= 64 { + return 0 + } + return b.getBitsFast(n) +} + +// getBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) getBitsFast(n uint8) uint16 { + const regMask = 64 - 1 + v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + b.bitsRead += n + return v +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + // 2 bounds checks. + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value = (b.value << 8) | uint64(b.in[b.off-1]) + b.bitsRead -= 8 + b.off-- + } +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return b.bitsRead >= 64 && b.off == 0 +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/fse/bitwriter.go b/vendor/github.com/klauspost/compress/fse/bitwriter.go new file mode 100644 index 000000000..43e463611 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bitwriter.go @@ -0,0 +1,168 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import "fmt" + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16ZeroNC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +// This is fastest if bits can be zero. +func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { + if bits == 0 { + return + } + value <<= (16 - bits) & 15 + value >>= (16 - bits) & 15 + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush will flush all pending full bytes. +// There will be at least 56 bits available for writing when this has been called. +// Using flush32 is faster, but leaves less space for writing. +func (b *bitWriter) flush() { + v := b.nBits >> 3 + switch v { + case 0: + case 1: + b.out = append(b.out, + byte(b.bitContainer), + ) + case 2: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + ) + case 3: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + ) + case 4: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + ) + case 5: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + ) + case 6: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + ) + case 7: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + ) + case 8: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + byte(b.bitContainer>>56), + ) + default: + panic(fmt.Errorf("bits (%d) > 64", b.nBits)) + } + b.bitContainer >>= v << 3 + b.nBits &= 7 +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() error { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() + return nil +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/vendor/github.com/klauspost/compress/fse/bytereader.go b/vendor/github.com/klauspost/compress/fse/bytereader.go new file mode 100644 index 000000000..abade2d60 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bytereader.go @@ -0,0 +1,47 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// init will initialize the reader and set the input. +func (b *byteReader) init(in []byte) { + b.b = in + b.off = 0 +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go new file mode 100644 index 000000000..dac97e58a --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/compress.go @@ -0,0 +1,682 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import ( + "errors" + "fmt" +) + +// Compress the input bytes. Input must be < 2GB. +// Provide a Scratch buffer to avoid memory allocations. +// Note that the output is also kept in the scratch buffer. +// If input is too hard to compress, ErrIncompressible is returned. +// If input is a single byte value repeated ErrUseRLE is returned. +func Compress(in []byte, s *Scratch) ([]byte, error) { + if len(in) <= 1 { + return nil, ErrIncompressible + } + if len(in) > (2<<30)-1 { + return nil, errors.New("input too big, must be < 2GB") + } + s, err := s.prepare(in) + if err != nil { + return nil, err + } + + // Create histogram, if none was provided. + maxCount := s.maxCount + if maxCount == 0 { + maxCount = s.countSimple(in) + } + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount == len(in) { + // One symbol, use RLE + return nil, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return nil, ErrIncompressible + } + s.optimalTableLog() + err = s.normalizeCount() + if err != nil { + return nil, err + } + err = s.writeCount() + if err != nil { + return nil, err + } + + if false { + err = s.validateNorm() + if err != nil { + return nil, err + } + } + + err = s.buildCTable() + if err != nil { + return nil, err + } + err = s.compress(in) + if err != nil { + return nil, err + } + s.Out = s.bw.out + // Check if we compressed. + if len(s.Out) >= len(in) { + return nil, ErrIncompressible + } + return s.Out, nil +} + +// cState contains the compression state of a stream. +type cState struct { + bw *bitWriter + stateTable []uint16 + state uint16 +} + +// init will initialize the compression state to the first symbol of the stream. +func (c *cState) init(bw *bitWriter, ct *cTable, tableLog uint8, first symbolTransform) { + c.bw = bw + c.stateTable = ct.stateTable + + nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 + im := int32((nbBitsOut << 16) - first.deltaNbBits) + lu := (im >> nbBitsOut) + first.deltaFindState + c.state = c.stateTable[lu] +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encode(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState + c.bw.addBits16NC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encodeZero(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState + c.bw.addBits16ZeroNC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// flush will write the tablelog to the output and flush the remaining full bytes. +func (c *cState) flush(tableLog uint8) { + c.bw.flush32() + c.bw.addBits16NC(c.state, tableLog) + c.bw.flush() +} + +// compress is the main compression loop that will encode the input from the last byte to the first. +func (s *Scratch) compress(src []byte) error { + if len(src) <= 2 { + return errors.New("compress: src too small") + } + tt := s.ct.symbolTT[:256] + s.bw.reset(s.Out) + + // Our two states each encodes every second byte. + // Last byte encoded (first byte decoded) will always be encoded by c1. + var c1, c2 cState + + // Encode so remaining size is divisible by 4. + ip := len(src) + if ip&1 == 1 { + c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) + c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) + c1.encodeZero(tt[src[ip-3]]) + ip -= 3 + } else { + c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) + c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) + ip -= 2 + } + if ip&2 != 0 { + c2.encodeZero(tt[src[ip-1]]) + c1.encodeZero(tt[src[ip-2]]) + ip -= 2 + } + src = src[:ip] + + // Main compression loop. + switch { + case !s.zeroBits && s.actualTableLog <= 8: + // We can encode 4 symbols without requiring a flush. + // We do not need to check if any output is 0 bits. + for ; len(src) >= 4; src = src[:len(src)-4] { + s.bw.flush32() + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] + c2.encode(tt[v0]) + c1.encode(tt[v1]) + c2.encode(tt[v2]) + c1.encode(tt[v3]) + } + case !s.zeroBits: + // We do not need to check if any output is 0 bits. + for ; len(src) >= 4; src = src[:len(src)-4] { + s.bw.flush32() + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] + c2.encode(tt[v0]) + c1.encode(tt[v1]) + s.bw.flush32() + c2.encode(tt[v2]) + c1.encode(tt[v3]) + } + case s.actualTableLog <= 8: + // We can encode 4 symbols without requiring a flush + for ; len(src) >= 4; src = src[:len(src)-4] { + s.bw.flush32() + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] + c2.encodeZero(tt[v0]) + c1.encodeZero(tt[v1]) + c2.encodeZero(tt[v2]) + c1.encodeZero(tt[v3]) + } + default: + for ; len(src) >= 4; src = src[:len(src)-4] { + s.bw.flush32() + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] + c2.encodeZero(tt[v0]) + c1.encodeZero(tt[v1]) + s.bw.flush32() + c2.encodeZero(tt[v2]) + c1.encodeZero(tt[v3]) + } + } + + // Flush final state. + // Used to initialize state when decoding. + c2.flush(s.actualTableLog) + c1.flush(s.actualTableLog) + + return s.bw.close() +} + +// writeCount will write the normalized histogram count to header. +// This is read back by readNCount. +func (s *Scratch) writeCount() error { + var ( + tableLog = s.actualTableLog + tableSize = 1 << tableLog + previous0 bool + charnum uint16 + + maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3 + + // Write Table Size + bitStream = uint32(tableLog - minTablelog) + bitCount = uint(4) + remaining = int16(tableSize + 1) /* +1 for extra accuracy */ + threshold = int16(tableSize) + nbBits = uint(tableLog + 1) + ) + if cap(s.Out) < maxHeaderSize { + s.Out = make([]byte, 0, s.br.remain()+maxHeaderSize) + } + outP := uint(0) + out := s.Out[:maxHeaderSize] + + // stops at 1 + for remaining > 1 { + if previous0 { + start := charnum + for s.norm[charnum] == 0 { + charnum++ + } + for charnum >= start+24 { + start += 24 + bitStream += uint32(0xFFFF) << bitCount + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + } + for charnum >= start+3 { + start += 3 + bitStream += 3 << bitCount + bitCount += 2 + } + bitStream += uint32(charnum-start) << bitCount + bitCount += 2 + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + count := s.norm[charnum] + charnum++ + max := (2*threshold - 1) - remaining + if count < 0 { + remaining += count + } else { + remaining -= count + } + count++ // +1 for extra accuracy + if count >= threshold { + count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ + } + bitStream += uint32(count) << bitCount + bitCount += nbBits + if count < max { + bitCount-- + } + + previous0 = count == 1 + if remaining < 1 { + return errors.New("internal error: remaining<1") + } + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += (bitCount + 7) / 8 + + if charnum > s.symbolLen { + return errors.New("internal error: charnum > s.symbolLen") + } + s.Out = out[:outP] + return nil +} + +// symbolTransform contains the state transform for a symbol. +type symbolTransform struct { + deltaFindState int32 + deltaNbBits uint32 +} + +// String prints values as a human readable string. +func (s symbolTransform) String() string { + return fmt.Sprintf("dnbits: %08x, fs:%d", s.deltaNbBits, s.deltaFindState) +} + +// cTable contains tables used for compression. +type cTable struct { + tableSymbol []byte + stateTable []uint16 + symbolTT []symbolTransform +} + +// allocCtable will allocate tables needed for compression. +// If existing tables a re big enough, they are simply re-used. +func (s *Scratch) allocCtable() { + tableSize := 1 << s.actualTableLog + // get tableSymbol that is big enough. + if cap(s.ct.tableSymbol) < tableSize { + s.ct.tableSymbol = make([]byte, tableSize) + } + s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] + + ctSize := tableSize + if cap(s.ct.stateTable) < ctSize { + s.ct.stateTable = make([]uint16, ctSize) + } + s.ct.stateTable = s.ct.stateTable[:ctSize] + + if cap(s.ct.symbolTT) < 256 { + s.ct.symbolTT = make([]symbolTransform, 256) + } + s.ct.symbolTT = s.ct.symbolTT[:256] +} + +// buildCTable will populate the compression table so it is ready to be used. +func (s *Scratch) buildCTable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + var cumul [maxSymbolValue + 2]int16 + + s.allocCtable() + tableSymbol := s.ct.tableSymbol[:tableSize] + // symbol start positions + { + cumul[0] = 0 + for ui, v := range s.norm[:s.symbolLen-1] { + u := byte(ui) // one less than reference + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = u + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + } + // Encode last symbol separately to avoid overflowing u + u := int(s.symbolLen - 1) + v := s.norm[s.symbolLen-1] + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = byte(u) + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + if uint32(cumul[s.symbolLen]) != tableSize { + return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) + } + cumul[s.symbolLen] = int16(tableSize) + 1 + } + // Spread symbols + s.zeroBits = false + { + step := tableStep(tableSize) + tableMask := tableSize - 1 + var position uint32 + // if any symbol > largeLimit, we may have 0 bits output. + largeLimit := int16(1 << (s.actualTableLog - 1)) + for ui, v := range s.norm[:s.symbolLen] { + symbol := byte(ui) + if v > largeLimit { + s.zeroBits = true + } + for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + tableSymbol[position] = symbol + position = (position + step) & tableMask + for position > highThreshold { + position = (position + step) & tableMask + } /* Low proba area */ + } + } + + // Check if we have gone through all positions + if position != 0 { + return errors.New("position!=0") + } + } + + // Build table + table := s.ct.stateTable + { + tsi := int(tableSize) + for u, v := range tableSymbol { + // TableU16 : sorted by symbol order; gives next state value + table[cumul[v]] = uint16(tsi + u) + cumul[v]++ + } + } + + // Build Symbol Transformation Table + { + total := int16(0) + symbolTT := s.ct.symbolTT[:s.symbolLen] + tableLog := s.actualTableLog + tl := (uint32(tableLog) << 16) - (1 << tableLog) + for i, v := range s.norm[:s.symbolLen] { + switch v { + case 0: + case -1, 1: + symbolTT[i].deltaNbBits = tl + symbolTT[i].deltaFindState = int32(total - 1) + total++ + default: + maxBitsOut := uint32(tableLog) - highBits(uint32(v-1)) + minStatePlus := uint32(v) << maxBitsOut + symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus + symbolTT[i].deltaFindState = int32(total - v) + total += v + } + } + if total != int16(tableSize) { + return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) + } + } + return nil +} + +// countSimple will create a simple histogram in s.count. +// Returns the biggest count. +// Does not update s.clearCount. +func (s *Scratch) countSimple(in []byte) (max int) { + for _, v := range in { + s.count[v]++ + } + m, symlen := uint32(0), s.symbolLen + for i, v := range s.count[:] { + if v == 0 { + continue + } + if v > m { + m = v + } + symlen = uint16(i) + 1 + } + s.symbolLen = symlen + return int(m) +} + +// minTableLog provides the minimum logSize to safely represent a distribution. +func (s *Scratch) minTableLog() uint8 { + minBitsSrc := highBits(uint32(s.br.remain()-1)) + 1 + minBitsSymbols := highBits(uint32(s.symbolLen-1)) + 2 + if minBitsSrc < minBitsSymbols { + return uint8(minBitsSrc) + } + return uint8(minBitsSymbols) +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *Scratch) optimalTableLog() { + tableLog := s.TableLog + minBits := s.minTableLog() + maxBitsSrc := uint8(highBits(uint32(s.br.remain()-1))) - 2 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minTablelog { + tableLog = minTablelog + } + if tableLog > maxTableLog { + tableLog = maxTableLog + } + s.actualTableLog = tableLog +} + +var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} + +// normalizeCount will normalize the count of the symbols so +// the total is equal to the table size. +func (s *Scratch) normalizeCount() error { + var ( + tableLog = s.actualTableLog + scale = 62 - uint64(tableLog) + step = (1 << 62) / uint64(s.br.remain()) + vStep = uint64(1) << (scale - 20) + stillToDistribute = int16(1 << tableLog) + largest int + largestP int16 + lowThreshold = (uint32)(s.br.remain() >> tableLog) + ) + + for i, cnt := range s.count[:s.symbolLen] { + // already handled + // if (count[s] == s.length) return 0; /* rle special case */ + + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + stillToDistribute-- + } else { + proba := (int16)((uint64(cnt) * step) >> scale) + if proba < 8 { + restToBeat := vStep * uint64(rtbTable[proba]) + v := uint64(cnt)*step - (uint64(proba) << scale) + if v > restToBeat { + proba++ + } + } + if proba > largestP { + largestP = proba + largest = i + } + s.norm[i] = proba + stillToDistribute -= proba + } + } + + if -stillToDistribute >= (s.norm[largest] >> 1) { + // corner case, need another normalization method + return s.normalizeCount2() + } + s.norm[largest] += stillToDistribute + return nil +} + +// Secondary normalization method. +// To be used when primary method fails. +func (s *Scratch) normalizeCount2() error { + const notYetAssigned = -2 + var ( + distributed uint32 + total = uint32(s.br.remain()) + tableLog = s.actualTableLog + lowThreshold = total >> tableLog + lowOne = (total * 3) >> (tableLog + 1) + ) + for i, cnt := range s.count[:s.symbolLen] { + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + distributed++ + total -= cnt + continue + } + if cnt <= lowOne { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + s.norm[i] = notYetAssigned + } + toDistribute := (1 << tableLog) - distributed + + if (total / toDistribute) > lowOne { + // risk of rounding to zero + lowOne = (total * 3) / (toDistribute * 2) + for i, cnt := range s.count[:s.symbolLen] { + if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + } + toDistribute = (1 << tableLog) - distributed + } + if distributed == uint32(s.symbolLen)+1 { + // all values are pretty poor; + // probably incompressible data (should have already been detected); + // find max, then give all remaining points to max + var maxV int + var maxC uint32 + for i, cnt := range s.count[:s.symbolLen] { + if cnt > maxC { + maxV = i + maxC = cnt + } + } + s.norm[maxV] += int16(toDistribute) + return nil + } + + if total == 0 { + // all of the symbols were low enough for the lowOne or lowThreshold + for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { + if s.norm[i] > 0 { + toDistribute-- + s.norm[i]++ + } + } + return nil + } + + var ( + vStepLog = 62 - uint64(tableLog) + mid = uint64((1 << (vStepLog - 1)) - 1) + rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining + tmpTotal = mid + ) + for i, cnt := range s.count[:s.symbolLen] { + if s.norm[i] == notYetAssigned { + var ( + end = tmpTotal + uint64(cnt)*rStep + sStart = uint32(tmpTotal >> vStepLog) + sEnd = uint32(end >> vStepLog) + weight = sEnd - sStart + ) + if weight < 1 { + return errors.New("weight < 1") + } + s.norm[i] = int16(weight) + tmpTotal = end + } + } + return nil +} + +// validateNorm validates the normalized histogram table. +func (s *Scratch) validateNorm() (err error) { + var total int + for _, v := range s.norm[:s.symbolLen] { + if v >= 0 { + total += int(v) + } else { + total -= int(v) + } + } + defer func() { + if err == nil { + return + } + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) + for i, v := range s.norm[:s.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) + } + }() + if total != (1 << s.actualTableLog) { + return fmt.Errorf("warning: Total == %d != %d", total, 1< tablelogAbsoluteMax { + return errors.New("tableLog too large") + } + bitStream >>= 4 + bitCount := uint(4) + + s.actualTableLog = uint8(nbBits) + remaining := int32((1 << nbBits) + 1) + threshold := int32(1 << nbBits) + gotTotal := int32(0) + nbBits++ + + for remaining > 1 { + if previous0 { + n0 := charnum + for (bitStream & 0xFFFF) == 0xFFFF { + n0 += 24 + if b.off < iend-5 { + b.advance(2) + bitStream = b.Uint32() >> bitCount + } else { + bitStream >>= 16 + bitCount += 16 + } + } + for (bitStream & 3) == 3 { + n0 += 3 + bitStream >>= 2 + bitCount += 2 + } + n0 += uint16(bitStream & 3) + bitCount += 2 + if n0 > maxSymbolValue { + return errors.New("maxSymbolValue too small") + } + for charnum < n0 { + s.norm[charnum&0xff] = 0 + charnum++ + } + + if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { + b.advance(bitCount >> 3) + bitCount &= 7 + bitStream = b.Uint32() >> bitCount + } else { + bitStream >>= 2 + } + } + + max := (2*(threshold) - 1) - (remaining) + var count int32 + + if (int32(bitStream) & (threshold - 1)) < max { + count = int32(bitStream) & (threshold - 1) + bitCount += nbBits - 1 + } else { + count = int32(bitStream) & (2*threshold - 1) + if count >= threshold { + count -= max + } + bitCount += nbBits + } + + count-- // extra accuracy + if count < 0 { + // -1 means +1 + remaining += count + gotTotal -= count + } else { + remaining -= count + gotTotal += count + } + s.norm[charnum&0xff] = int16(count) + charnum++ + previous0 = count == 0 + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { + b.advance(bitCount >> 3) + bitCount &= 7 + } else { + bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) + b.off = len(b.b) - 4 + } + bitStream = b.Uint32() >> (bitCount & 31) + } + s.symbolLen = charnum + + if s.symbolLen <= 1 { + return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) + } + if s.symbolLen > maxSymbolValue+1 { + return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) + } + if remaining != 1 { + return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) + } + if bitCount > 32 { + return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) + } + if gotTotal != 1<> 3) + return nil +} + +// decSymbol contains information about a state entry, +// Including the state offset base, the output symbol and +// the number of bits to read for the low part of the destination state. +type decSymbol struct { + newState uint16 + symbol uint8 + nbBits uint8 +} + +// allocDtable will allocate decoding tables if they are not big enough. +func (s *Scratch) allocDtable() { + tableSize := 1 << s.actualTableLog + if cap(s.decTable) < tableSize { + s.decTable = make([]decSymbol, tableSize) + } + s.decTable = s.decTable[:tableSize] + + if cap(s.ct.tableSymbol) < 256 { + s.ct.tableSymbol = make([]byte, 256) + } + s.ct.tableSymbol = s.ct.tableSymbol[:256] + + if cap(s.ct.stateTable) < 256 { + s.ct.stateTable = make([]uint16, 256) + } + s.ct.stateTable = s.ct.stateTable[:256] +} + +// buildDtable will build the decoding table. +func (s *Scratch) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + s.allocDtable() + symbolNext := s.ct.stateTable[:256] + + // Init, lay down lowprob symbols + s.zeroBits = false + { + largeLimit := int16(1 << (s.actualTableLog - 1)) + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.decTable[highThreshold].symbol = uint8(i) + highThreshold-- + symbolNext[i] = 1 + } else { + if v >= largeLimit { + s.zeroBits = true + } + symbolNext[i] = uint16(v) + } + } + } + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.decTable[position].symbol = uint8(ss) + position = (position + step) & tableMask + for position > highThreshold { + // lowprob area + position = (position + step) & tableMask + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.decTable { + symbol := v.symbol + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.decTable[u].nbBits = nBits + newState := (nextState << nBits) - tableSize + if newState >= tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.decTable[u].newState = newState + } + } + return nil +} + +// decompress will decompress the bitstream. +// If the buffer is over-read an error is returned. +func (s *Scratch) decompress() error { + br := &s.bits + if err := br.init(s.br.unread()); err != nil { + return err + } + + var s1, s2 decoder + // Initialize and decode first state and symbol. + s1.init(br, s.decTable, s.actualTableLog) + s2.init(br, s.decTable, s.actualTableLog) + + // Use temp table to avoid bound checks/append penalty. + var tmp = s.ct.tableSymbol[:256] + var off uint8 + + // Main part + if !s.zeroBits { + for br.off >= 8 { + br.fillFast() + tmp[off+0] = s1.nextFast() + tmp[off+1] = s2.nextFast() + br.fillFast() + tmp[off+2] = s1.nextFast() + tmp[off+3] = s2.nextFast() + off += 4 + // When off is 0, we have overflowed and should write. + if off == 0 { + s.Out = append(s.Out, tmp...) + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + } + } else { + for br.off >= 8 { + br.fillFast() + tmp[off+0] = s1.next() + tmp[off+1] = s2.next() + br.fillFast() + tmp[off+2] = s1.next() + tmp[off+3] = s2.next() + off += 4 + if off == 0 { + s.Out = append(s.Out, tmp...) + // When off is 0, we have overflowed and should write. + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + } + } + s.Out = append(s.Out, tmp[:off]...) + + // Final bits, a bit more expensive check + for { + if s1.finished() { + s.Out = append(s.Out, s1.final(), s2.final()) + break + } + br.fill() + s.Out = append(s.Out, s1.next()) + if s2.finished() { + s.Out = append(s.Out, s2.final(), s1.final()) + break + } + s.Out = append(s.Out, s2.next()) + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + return br.close() +} + +// decoder keeps track of the current state and updates it from the bitstream. +type decoder struct { + state uint16 + br *bitReader + dt []decSymbol +} + +// init will initialize the decoder and read the first state from the stream. +func (d *decoder) init(in *bitReader, dt []decSymbol, tableLog uint8) { + d.dt = dt + d.br = in + d.state = in.getBits(tableLog) +} + +// next returns the next symbol and sets the next state. +// At least tablelog bits must be available in the bit reader. +func (d *decoder) next() uint8 { + n := &d.dt[d.state] + lowBits := d.br.getBits(n.nbBits) + d.state = n.newState + lowBits + return n.symbol +} + +// finished returns true if all bits have been read from the bitstream +// and the next state would require reading bits from the input. +func (d *decoder) finished() bool { + return d.br.finished() && d.dt[d.state].nbBits > 0 +} + +// final returns the current state symbol without decoding the next. +func (d *decoder) final() uint8 { + return d.dt[d.state].symbol +} + +// nextFast returns the next symbol and sets the next state. +// This can only be used if no symbols are 0 bits. +// At least tablelog bits must be available in the bit reader. +func (d *decoder) nextFast() uint8 { + n := d.dt[d.state] + lowBits := d.br.getBitsFast(n.nbBits) + d.state = n.newState + lowBits + return n.symbol +} diff --git a/vendor/github.com/klauspost/compress/fse/fse.go b/vendor/github.com/klauspost/compress/fse/fse.go new file mode 100644 index 000000000..535cbadfd --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fse.go @@ -0,0 +1,144 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +// Package fse provides Finite State Entropy encoding and decoding. +// +// Finite State Entropy encoding provides a fast near-optimal symbol encoding/decoding +// for byte blocks as implemented in zstd. +// +// See https://github.com/klauspost/compress/tree/master/fse for more information. +package fse + +import ( + "errors" + "fmt" + "math/bits" +) + +const ( + /*!MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ + maxMemoryUsage = 14 + defaultMemoryUsage = 13 + + maxTableLog = maxMemoryUsage - 2 + maxTablesize = 1 << maxTableLog + defaultTablelog = defaultMemoryUsage - 2 + minTablelog = 5 + maxSymbolValue = 255 +) + +var ( + // ErrIncompressible is returned when input is judged to be too hard to compress. + ErrIncompressible = errors.New("input is not compressible") + + // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. + ErrUseRLE = errors.New("input is single value repeated") +) + +// Scratch provides temporary storage for compression and decompression. +type Scratch struct { + // Private + count [maxSymbolValue + 1]uint32 + norm [maxSymbolValue + 1]int16 + br byteReader + bits bitReader + bw bitWriter + ct cTable // Compression tables. + decTable []decSymbol // Decompression table. + maxCount int // count of the most probable symbol + + // Per block parameters. + // These can be used to override compression parameters of the block. + // Do not touch, unless you know what you are doing. + + // Out is output buffer. + // If the scratch is re-used before the caller is done processing the output, + // set this field to nil. + // Otherwise the output buffer will be re-used for next Compression/Decompression step + // and allocation will be avoided. + Out []byte + + // DecompressLimit limits the maximum decoded size acceptable. + // If > 0 decompression will stop when approximately this many bytes + // has been decoded. + // If 0, maximum size will be 2GB. + DecompressLimit int + + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + zeroBits bool // no bits has prob > 50%. + clearCount bool // clear count + + // MaxSymbolValue will override the maximum symbol value of the next block. + MaxSymbolValue uint8 + + // TableLog will attempt to override the tablelog for the next block. + TableLog uint8 +} + +// Histogram allows to populate the histogram and skip that step in the compression, +// It otherwise allows to inspect the histogram when compression is done. +// To indicate that you have populated the histogram call HistogramFinished +// with the value of the highest populated symbol, as well as the number of entries +// in the most populated entry. These are accepted at face value. +// The returned slice will always be length 256. +func (s *Scratch) Histogram() []uint32 { + return s.count[:] +} + +// HistogramFinished can be called to indicate that the histogram has been populated. +// maxSymbol is the index of the highest set symbol of the next data segment. +// maxCount is the number of entries in the most populated entry. +// These are accepted at face value. +func (s *Scratch) HistogramFinished(maxSymbol uint8, maxCount int) { + s.maxCount = maxCount + s.symbolLen = uint16(maxSymbol) + 1 + s.clearCount = maxCount != 0 +} + +// prepare will prepare and allocate scratch tables used for both compression and decompression. +func (s *Scratch) prepare(in []byte) (*Scratch, error) { + if s == nil { + s = &Scratch{} + } + if s.MaxSymbolValue == 0 { + s.MaxSymbolValue = 255 + } + if s.TableLog == 0 { + s.TableLog = defaultTablelog + } + if s.TableLog > maxTableLog { + return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, maxTableLog) + } + if cap(s.Out) == 0 { + s.Out = make([]byte, 0, len(in)) + } + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + s.br.init(in) + if s.DecompressLimit == 0 { + // Max size 2GB. + s.DecompressLimit = (2 << 30) - 1 + } + + return s, nil +} + +// tableStep returns the next table index. +func tableStep(tableSize uint32) uint32 { + return (tableSize >> 1) + (tableSize >> 3) + 3 +} + +func highBits(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/gen.sh b/vendor/github.com/klauspost/compress/gen.sh new file mode 100644 index 000000000..aff942205 --- /dev/null +++ b/vendor/github.com/klauspost/compress/gen.sh @@ -0,0 +1,4 @@ +#!/bin/sh + +cd s2/cmd/_s2sx/ || exit 1 +go generate . diff --git a/vendor/github.com/klauspost/compress/huff0/.gitignore b/vendor/github.com/klauspost/compress/huff0/.gitignore new file mode 100644 index 000000000..b3d262958 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/.gitignore @@ -0,0 +1 @@ +/huff0-fuzz.zip diff --git a/vendor/github.com/klauspost/compress/huff0/README.md b/vendor/github.com/klauspost/compress/huff0/README.md new file mode 100644 index 000000000..8b6e5c663 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/README.md @@ -0,0 +1,89 @@ +# Huff0 entropy compression + +This package provides Huff0 encoding and decoding as used in zstd. + +[Huff0](https://github.com/Cyan4973/FiniteStateEntropy#new-generation-entropy-coders), +a Huffman codec designed for modern CPU, featuring OoO (Out of Order) operations on multiple ALU +(Arithmetic Logic Unit), achieving extremely fast compression and decompression speeds. + +This can be used for compressing input with a lot of similar input values to the smallest number of bytes. +This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, +but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. + +* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/huff0) + +## News + +This is used as part of the [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression package. + +This ensures that most functionality is well tested. + +# Usage + +This package provides a low level interface that allows to compress single independent blocks. + +Each block is separate, and there is no built in integrity checks. +This means that the caller should keep track of block sizes and also do checksums if needed. + +Compressing a block is done via the [`Compress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress1X) and +[`Compress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress4X) functions. +You must provide input and will receive the output and maybe an error. + +These error values can be returned: + +| Error | Description | +|---------------------|-----------------------------------------------------------------------------| +| `` | Everything ok, output is returned | +| `ErrIncompressible` | Returned when input is judged to be too hard to compress | +| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | +| `ErrTooBig` | Returned if the input block exceeds the maximum allowed size (128 Kib) | +| `(error)` | An internal error occurred. | + + +As can be seen above some of there are errors that will be returned even under normal operation so it is important to handle these. + +To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object +that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same +object can be used for both. + +Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this +you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. + +The `Scratch` object will retain state that allows to re-use previous tables for encoding and decoding. + +## Tables and re-use + +Huff0 allows for reusing tables from the previous block to save space if that is expected to give better/faster results. + +The Scratch object allows you to set a [`ReusePolicy`](https://godoc.org/github.com/klauspost/compress/huff0#ReusePolicy) +that controls this behaviour. See the documentation for details. This can be altered between each block. + +Do however note that this information is *not* stored in the output block and it is up to the users of the package to +record whether [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable) should be called, +based on the boolean reported back from the CompressXX call. + +If you want to store the table separate from the data, you can access them as `OutData` and `OutTable` on the +[`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object. + +## Decompressing + +The first part of decoding is to initialize the decoding table through [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable). +This will initialize the decoding tables. +You can supply the complete block to `ReadTable` and it will return the data part of the block +which can be given to the decompressor. + +Decompressing is done by calling the [`Decompress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress1X) +or [`Decompress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress4X) function. + +For concurrently decompressing content with a fixed table a stateless [`Decoder`](https://godoc.org/github.com/klauspost/compress/huff0#Decoder) can be requested which will remain correct as long as the scratch is unchanged. The capacity of the provided slice indicates the expected output size. + +You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back +your input was likely corrupted. + +It is important to note that a successful decoding does *not* mean your output matches your original input. +There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. + +# Contributing + +Contributions are always welcome. Be aware that adding public functions will require good justification and breaking +changes will likely not be accepted. If in doubt open an issue before writing the PR. diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go new file mode 100644 index 000000000..e36d9742f --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go @@ -0,0 +1,229 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +import ( + "encoding/binary" + "errors" + "fmt" + "io" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReaderBytes struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReaderBytes) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.advance(8 - uint8(highBit32(uint32(v)))) + return nil +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReaderBytes) peekByteFast() uint8 { + got := uint8(b.value >> 56) + return got +} + +func (b *bitReaderBytes) advance(n uint8) { + b.bitsRead += n + b.value <<= n & 63 +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReaderBytes) fillFast() { + if b.bitsRead < 32 { + return + } + + // 2 bounds checks. + v := b.in[b.off-4 : b.off] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << (b.bitsRead - 32) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read. +func (b *bitReaderBytes) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReaderBytes) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4 : b.off] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << (b.bitsRead - 32) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value |= uint64(b.in[b.off-1]) << (b.bitsRead - 8) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReaderBytes) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +func (b *bitReaderBytes) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReaderBytes) close() error { + // Release reference. + b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +// bitReaderShifted reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReaderShifted struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReaderShifted) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.advance(8 - uint8(highBit32(uint32(v)))) + return nil +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 { + return uint16(b.value >> ((64 - n) & 63)) +} + +func (b *bitReaderShifted) advance(n uint8) { + b.bitsRead += n + b.value <<= n & 63 +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReaderShifted) fillFast() { + if b.bitsRead < 32 { + return + } + + // 2 bounds checks. + v := b.in[b.off-4 : b.off] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << ((b.bitsRead - 32) & 63) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read. +func (b *bitReaderShifted) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReaderShifted) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4 : b.off] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << ((b.bitsRead - 32) & 63) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value |= uint64(b.in[b.off-1]) << ((b.bitsRead - 8) & 63) + b.bitsRead -= 8 + b.off-- + } +} + +func (b *bitReaderShifted) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReaderShifted) close() error { + // Release reference. + b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go new file mode 100644 index 000000000..aed2347ce --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go @@ -0,0 +1,111 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// encSymbol will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) encSymbol(ct cTable, symbol byte) { + enc := ct[symbol] + b.bitContainer |= uint64(enc.val) << (b.nBits & 63) + if false { + if enc.nBits == 0 { + panic("nbits 0") + } + } + b.nBits += enc.nBits +} + +// encTwoSymbols will add up to 32 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) { + encA := ct[av] + encB := ct[bv] + sh := b.nBits & 63 + combined := uint64(encA.val) | (uint64(encB.val) << (encA.nBits & 63)) + b.bitContainer |= combined << sh + if false { + if encA.nBits == 0 { + panic("nbitsA 0") + } + if encB.nBits == 0 { + panic("nbitsB 0") + } + } + b.nBits += encA.nBits + encB.nBits +} + +// encFourSymbols adds up to 32 bits from four symbols. +// It will not check if there is space for them, +// so the caller must ensure that b has been flushed recently. +func (b *bitWriter) encFourSymbols(encA, encB, encC, encD cTableEntry) { + bitsA := encA.nBits + bitsB := bitsA + encB.nBits + bitsC := bitsB + encC.nBits + bitsD := bitsC + encD.nBits + combined := uint64(encA.val) | + (uint64(encB.val) << (bitsA & 63)) | + (uint64(encC.val) << (bitsB & 63)) | + (uint64(encD.val) << (bitsC & 63)) + b.bitContainer |= combined << (b.nBits & 63) + b.nBits += bitsD +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() error { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() + return nil +} diff --git a/vendor/github.com/klauspost/compress/huff0/bytereader.go b/vendor/github.com/klauspost/compress/huff0/bytereader.go new file mode 100644 index 000000000..4dcab8d23 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bytereader.go @@ -0,0 +1,44 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// init will initialize the reader and set the input. +func (b *byteReader) init(in []byte) { + b.b = in + b.off = 0 +} + +// Int32 returns a little endian int32 starting at current offset. +func (b byteReader) Int32() int32 { + v3 := int32(b.b[b.off+3]) + v2 := int32(b.b[b.off+2]) + v1 := int32(b.b[b.off+1]) + v0 := int32(b.b[b.off]) + return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + v3 := uint32(b.b[b.off+3]) + v2 := uint32(b.b[b.off+2]) + v1 := uint32(b.b[b.off+1]) + v0 := uint32(b.b[b.off]) + return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go new file mode 100644 index 000000000..4ee4fa18d --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -0,0 +1,749 @@ +package huff0 + +import ( + "fmt" + "math" + "runtime" + "sync" +) + +// Compress1X will compress the input. +// The output can be decoded using Decompress1X. +// Supply a Scratch object. The scratch object contains state about re-use, +// So when sharing across independent encodes, be sure to set the re-use policy. +func Compress1X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { + s, err = s.prepare(in) + if err != nil { + return nil, false, err + } + return compress(in, s, s.compress1X) +} + +// Compress4X will compress the input. The input is split into 4 independent blocks +// and compressed similar to Compress1X. +// The output can be decoded using Decompress4X. +// Supply a Scratch object. The scratch object contains state about re-use, +// So when sharing across independent encodes, be sure to set the re-use policy. +func Compress4X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { + s, err = s.prepare(in) + if err != nil { + return nil, false, err + } + if false { + // TODO: compress4Xp only slightly faster. + const parallelThreshold = 8 << 10 + if len(in) < parallelThreshold || runtime.GOMAXPROCS(0) == 1 { + return compress(in, s, s.compress4X) + } + return compress(in, s, s.compress4Xp) + } + return compress(in, s, s.compress4X) +} + +func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)) (out []byte, reUsed bool, err error) { + // Nuke previous table if we cannot reuse anyway. + if s.Reuse == ReusePolicyNone { + s.prevTable = s.prevTable[:0] + } + + // Create histogram, if none was provided. + maxCount := s.maxCount + var canReuse = false + if maxCount == 0 { + maxCount, canReuse = s.countSimple(in) + } else { + canReuse = s.canUseTable(s.prevTable) + } + + // We want the output size to be less than this: + wantSize := len(in) + if s.WantLogLess > 0 { + wantSize -= wantSize >> s.WantLogLess + } + + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount >= len(in) { + if maxCount > len(in) { + return nil, false, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) + } + if len(in) == 1 { + return nil, false, ErrIncompressible + } + // One symbol, use RLE + return nil, false, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return nil, false, ErrIncompressible + } + if s.Reuse == ReusePolicyMust && !canReuse { + // We must reuse, but we can't. + return nil, false, ErrIncompressible + } + if (s.Reuse == ReusePolicyPrefer || s.Reuse == ReusePolicyMust) && canReuse { + keepTable := s.cTable + keepTL := s.actualTableLog + s.cTable = s.prevTable + s.actualTableLog = s.prevTableLog + s.Out, err = compressor(in) + s.cTable = keepTable + s.actualTableLog = keepTL + if err == nil && len(s.Out) < wantSize { + s.OutData = s.Out + return s.Out, true, nil + } + if s.Reuse == ReusePolicyMust { + return nil, false, ErrIncompressible + } + // Do not attempt to re-use later. + s.prevTable = s.prevTable[:0] + } + + // Calculate new table. + err = s.buildCTable() + if err != nil { + return nil, false, err + } + + if false && !s.canUseTable(s.cTable) { + panic("invalid table generated") + } + + if s.Reuse == ReusePolicyAllow && canReuse { + hSize := len(s.Out) + oldSize := s.prevTable.estimateSize(s.count[:s.symbolLen]) + newSize := s.cTable.estimateSize(s.count[:s.symbolLen]) + if oldSize <= hSize+newSize || hSize+12 >= wantSize { + // Retain cTable even if we re-use. + keepTable := s.cTable + keepTL := s.actualTableLog + + s.cTable = s.prevTable + s.actualTableLog = s.prevTableLog + s.Out, err = compressor(in) + + // Restore ctable. + s.cTable = keepTable + s.actualTableLog = keepTL + if err != nil { + return nil, false, err + } + if len(s.Out) >= wantSize { + return nil, false, ErrIncompressible + } + s.OutData = s.Out + return s.Out, true, nil + } + } + + // Use new table + err = s.cTable.write(s) + if err != nil { + s.OutTable = nil + return nil, false, err + } + s.OutTable = s.Out + + // Compress using new table + s.Out, err = compressor(in) + if err != nil { + s.OutTable = nil + return nil, false, err + } + if len(s.Out) >= wantSize { + s.OutTable = nil + return nil, false, ErrIncompressible + } + // Move current table into previous. + s.prevTable, s.prevTableLog, s.cTable = s.cTable, s.actualTableLog, s.prevTable[:0] + s.OutData = s.Out[len(s.OutTable):] + return s.Out, false, nil +} + +// EstimateSizes will estimate the data sizes +func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err error) { + s, err = s.prepare(in) + if err != nil { + return 0, 0, 0, err + } + + // Create histogram, if none was provided. + tableSz, dataSz, reuseSz = -1, -1, -1 + maxCount := s.maxCount + var canReuse = false + if maxCount == 0 { + maxCount, canReuse = s.countSimple(in) + } else { + canReuse = s.canUseTable(s.prevTable) + } + + // We want the output size to be less than this: + wantSize := len(in) + if s.WantLogLess > 0 { + wantSize -= wantSize >> s.WantLogLess + } + + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount >= len(in) { + if maxCount > len(in) { + return 0, 0, 0, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) + } + if len(in) == 1 { + return 0, 0, 0, ErrIncompressible + } + // One symbol, use RLE + return 0, 0, 0, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return 0, 0, 0, ErrIncompressible + } + + // Calculate new table. + err = s.buildCTable() + if err != nil { + return 0, 0, 0, err + } + + if false && !s.canUseTable(s.cTable) { + panic("invalid table generated") + } + + tableSz, err = s.cTable.estTableSize(s) + if err != nil { + return 0, 0, 0, err + } + if canReuse { + reuseSz = s.prevTable.estimateSize(s.count[:s.symbolLen]) + } + dataSz = s.cTable.estimateSize(s.count[:s.symbolLen]) + + // Restore + return tableSz, dataSz, reuseSz, nil +} + +func (s *Scratch) compress1X(src []byte) ([]byte, error) { + return s.compress1xDo(s.Out, src) +} + +func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) { + var bw = bitWriter{out: dst} + + // N is length divisible by 4. + n := len(src) + n -= n & 3 + cTable := s.cTable[:256] + + // Encode last bytes. + for i := len(src) & 3; i > 0; i-- { + bw.encSymbol(cTable, src[n+i-1]) + } + n -= 4 + if s.actualTableLog <= 8 { + for ; n >= 0; n -= 4 { + tmp := src[n : n+4] + // tmp should be len 4 + bw.flush32() + bw.encFourSymbols(cTable[tmp[3]], cTable[tmp[2]], cTable[tmp[1]], cTable[tmp[0]]) + } + } else { + for ; n >= 0; n -= 4 { + tmp := src[n : n+4] + // tmp should be len 4 + bw.flush32() + bw.encTwoSymbols(cTable, tmp[3], tmp[2]) + bw.flush32() + bw.encTwoSymbols(cTable, tmp[1], tmp[0]) + } + } + err := bw.close() + return bw.out, err +} + +var sixZeros [6]byte + +func (s *Scratch) compress4X(src []byte) ([]byte, error) { + if len(src) < 12 { + return nil, ErrIncompressible + } + segmentSize := (len(src) + 3) / 4 + + // Add placeholder for output length + offsetIdx := len(s.Out) + s.Out = append(s.Out, sixZeros[:]...) + + for i := 0; i < 4; i++ { + toDo := src + if len(toDo) > segmentSize { + toDo = toDo[:segmentSize] + } + src = src[len(toDo):] + + var err error + idx := len(s.Out) + s.Out, err = s.compress1xDo(s.Out, toDo) + if err != nil { + return nil, err + } + if len(s.Out)-idx > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } + // Write compressed length as little endian before block. + if i < 3 { + // Last length is not written. + length := len(s.Out) - idx + s.Out[i*2+offsetIdx] = byte(length) + s.Out[i*2+offsetIdx+1] = byte(length >> 8) + } + } + + return s.Out, nil +} + +// compress4Xp will compress 4 streams using separate goroutines. +func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { + if len(src) < 12 { + return nil, ErrIncompressible + } + // Add placeholder for output length + s.Out = s.Out[:6] + + segmentSize := (len(src) + 3) / 4 + var wg sync.WaitGroup + var errs [4]error + wg.Add(4) + for i := 0; i < 4; i++ { + toDo := src + if len(toDo) > segmentSize { + toDo = toDo[:segmentSize] + } + src = src[len(toDo):] + + // Separate goroutine for each block. + go func(i int) { + s.tmpOut[i], errs[i] = s.compress1xDo(s.tmpOut[i][:0], toDo) + wg.Done() + }(i) + } + wg.Wait() + for i := 0; i < 4; i++ { + if errs[i] != nil { + return nil, errs[i] + } + o := s.tmpOut[i] + if len(o) > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } + // Write compressed length as little endian before block. + if i < 3 { + // Last length is not written. + s.Out[i*2] = byte(len(o)) + s.Out[i*2+1] = byte(len(o) >> 8) + } + + // Write output. + s.Out = append(s.Out, o...) + } + return s.Out, nil +} + +// countSimple will create a simple histogram in s.count. +// Returns the biggest count. +// Does not update s.clearCount. +func (s *Scratch) countSimple(in []byte) (max int, reuse bool) { + reuse = true + for _, v := range in { + s.count[v]++ + } + m := uint32(0) + if len(s.prevTable) > 0 { + for i, v := range s.count[:] { + if v == 0 { + continue + } + if v > m { + m = v + } + s.symbolLen = uint16(i) + 1 + if i >= len(s.prevTable) { + reuse = false + } else if s.prevTable[i].nBits == 0 { + reuse = false + } + } + return int(m), reuse + } + for i, v := range s.count[:] { + if v == 0 { + continue + } + if v > m { + m = v + } + s.symbolLen = uint16(i) + 1 + } + return int(m), false +} + +func (s *Scratch) canUseTable(c cTable) bool { + if len(c) < int(s.symbolLen) { + return false + } + for i, v := range s.count[:s.symbolLen] { + if v != 0 && c[i].nBits == 0 { + return false + } + } + return true +} + +//lint:ignore U1000 used for debugging +func (s *Scratch) validateTable(c cTable) bool { + if len(c) < int(s.symbolLen) { + return false + } + for i, v := range s.count[:s.symbolLen] { + if v != 0 { + if c[i].nBits == 0 { + return false + } + if c[i].nBits > s.actualTableLog { + return false + } + } + } + return true +} + +// minTableLog provides the minimum logSize to safely represent a distribution. +func (s *Scratch) minTableLog() uint8 { + minBitsSrc := highBit32(uint32(s.br.remain())) + 1 + minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2 + if minBitsSrc < minBitsSymbols { + return uint8(minBitsSrc) + } + return uint8(minBitsSymbols) +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *Scratch) optimalTableLog() { + tableLog := s.TableLog + minBits := s.minTableLog() + maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 1 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minTablelog { + tableLog = minTablelog + } + if tableLog > tableLogMax { + tableLog = tableLogMax + } + s.actualTableLog = tableLog +} + +type cTableEntry struct { + val uint16 + nBits uint8 + // We have 8 bits extra +} + +const huffNodesMask = huffNodesLen - 1 + +func (s *Scratch) buildCTable() error { + s.optimalTableLog() + s.huffSort() + if cap(s.cTable) < maxSymbolValue+1 { + s.cTable = make([]cTableEntry, s.symbolLen, maxSymbolValue+1) + } else { + s.cTable = s.cTable[:s.symbolLen] + for i := range s.cTable { + s.cTable[i] = cTableEntry{} + } + } + + var startNode = int16(s.symbolLen) + nonNullRank := s.symbolLen - 1 + + nodeNb := startNode + huffNode := s.nodes[1 : huffNodesLen+1] + + // This overlays the slice above, but allows "-1" index lookups. + // Different from reference implementation. + huffNode0 := s.nodes[0 : huffNodesLen+1] + + for huffNode[nonNullRank].count() == 0 { + nonNullRank-- + } + + lowS := int16(nonNullRank) + nodeRoot := nodeNb + lowS - 1 + lowN := nodeNb + huffNode[nodeNb].setCount(huffNode[lowS].count() + huffNode[lowS-1].count()) + huffNode[lowS].setParent(nodeNb) + huffNode[lowS-1].setParent(nodeNb) + nodeNb++ + lowS -= 2 + for n := nodeNb; n <= nodeRoot; n++ { + huffNode[n].setCount(1 << 30) + } + // fake entry, strong barrier + huffNode0[0].setCount(1 << 31) + + // create parents + for nodeNb <= nodeRoot { + var n1, n2 int16 + if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { + n1 = lowS + lowS-- + } else { + n1 = lowN + lowN++ + } + if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { + n2 = lowS + lowS-- + } else { + n2 = lowN + lowN++ + } + + huffNode[nodeNb].setCount(huffNode0[n1+1].count() + huffNode0[n2+1].count()) + huffNode0[n1+1].setParent(nodeNb) + huffNode0[n2+1].setParent(nodeNb) + nodeNb++ + } + + // distribute weights (unlimited tree height) + huffNode[nodeRoot].setNbBits(0) + for n := nodeRoot - 1; n >= startNode; n-- { + huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) + } + for n := uint16(0); n <= nonNullRank; n++ { + huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) + } + s.actualTableLog = s.setMaxHeight(int(nonNullRank)) + maxNbBits := s.actualTableLog + + // fill result into tree (val, nbBits) + if maxNbBits > tableLogMax { + return fmt.Errorf("internal error: maxNbBits (%d) > tableLogMax (%d)", maxNbBits, tableLogMax) + } + var nbPerRank [tableLogMax + 1]uint16 + var valPerRank [16]uint16 + for _, v := range huffNode[:nonNullRank+1] { + nbPerRank[v.nbBits()]++ + } + // determine stating value per rank + { + min := uint16(0) + for n := maxNbBits; n > 0; n-- { + // get starting value within each rank + valPerRank[n] = min + min += nbPerRank[n] + min >>= 1 + } + } + + // push nbBits per symbol, symbol order + for _, v := range huffNode[:nonNullRank+1] { + s.cTable[v.symbol()].nBits = v.nbBits() + } + + // assign value within rank, symbol order + t := s.cTable[:s.symbolLen] + for n, val := range t { + nbits := val.nBits & 15 + v := valPerRank[nbits] + t[n].val = v + valPerRank[nbits] = v + 1 + } + + return nil +} + +// huffSort will sort symbols, decreasing order. +func (s *Scratch) huffSort() { + type rankPos struct { + base uint32 + current uint32 + } + + // Clear nodes + nodes := s.nodes[:huffNodesLen+1] + s.nodes = nodes + nodes = nodes[1 : huffNodesLen+1] + + // Sort into buckets based on length of symbol count. + var rank [32]rankPos + for _, v := range s.count[:s.symbolLen] { + r := highBit32(v+1) & 31 + rank[r].base++ + } + // maxBitLength is log2(BlockSizeMax) + 1 + const maxBitLength = 18 + 1 + for n := maxBitLength; n > 0; n-- { + rank[n-1].base += rank[n].base + } + for n := range rank[:maxBitLength] { + rank[n].current = rank[n].base + } + for n, c := range s.count[:s.symbolLen] { + r := (highBit32(c+1) + 1) & 31 + pos := rank[r].current + rank[r].current++ + prev := nodes[(pos-1)&huffNodesMask] + for pos > rank[r].base && c > prev.count() { + nodes[pos&huffNodesMask] = prev + pos-- + prev = nodes[(pos-1)&huffNodesMask] + } + nodes[pos&huffNodesMask] = makeNodeElt(c, byte(n)) + } +} + +func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { + maxNbBits := s.actualTableLog + huffNode := s.nodes[1 : huffNodesLen+1] + //huffNode = huffNode[: huffNodesLen] + + largestBits := huffNode[lastNonNull].nbBits() + + // early exit : no elt > maxNbBits + if largestBits <= maxNbBits { + return largestBits + } + totalCost := int(0) + baseCost := int(1) << (largestBits - maxNbBits) + n := uint32(lastNonNull) + + for huffNode[n].nbBits() > maxNbBits { + totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits())) + huffNode[n].setNbBits(maxNbBits) + n-- + } + // n stops at huffNode[n].nbBits <= maxNbBits + + for huffNode[n].nbBits() == maxNbBits { + n-- + } + // n end at index of smallest symbol using < maxNbBits + + // renorm totalCost + totalCost >>= largestBits - maxNbBits /* note : totalCost is necessarily a multiple of baseCost */ + + // repay normalized cost + { + const noSymbol = 0xF0F0F0F0 + var rankLast [tableLogMax + 2]uint32 + + for i := range rankLast[:] { + rankLast[i] = noSymbol + } + + // Get pos of last (smallest) symbol per rank + { + currentNbBits := maxNbBits + for pos := int(n); pos >= 0; pos-- { + if huffNode[pos].nbBits() >= currentNbBits { + continue + } + currentNbBits = huffNode[pos].nbBits() // < maxNbBits + rankLast[maxNbBits-currentNbBits] = uint32(pos) + } + } + + for totalCost > 0 { + nBitsToDecrease := uint8(highBit32(uint32(totalCost))) + 1 + + for ; nBitsToDecrease > 1; nBitsToDecrease-- { + highPos := rankLast[nBitsToDecrease] + lowPos := rankLast[nBitsToDecrease-1] + if highPos == noSymbol { + continue + } + if lowPos == noSymbol { + break + } + highTotal := huffNode[highPos].count() + lowTotal := 2 * huffNode[lowPos].count() + if highTotal <= lowTotal { + break + } + } + // only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) + // HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary + // FIXME: try to remove + for (nBitsToDecrease <= tableLogMax) && (rankLast[nBitsToDecrease] == noSymbol) { + nBitsToDecrease++ + } + totalCost -= 1 << (nBitsToDecrease - 1) + if rankLast[nBitsToDecrease-1] == noSymbol { + // this rank is no longer empty + rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease] + } + huffNode[rankLast[nBitsToDecrease]].setNbBits(1 + + huffNode[rankLast[nBitsToDecrease]].nbBits()) + if rankLast[nBitsToDecrease] == 0 { + /* special case, reached largest symbol */ + rankLast[nBitsToDecrease] = noSymbol + } else { + rankLast[nBitsToDecrease]-- + if huffNode[rankLast[nBitsToDecrease]].nbBits() != maxNbBits-nBitsToDecrease { + rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */ + } + } + } + + for totalCost < 0 { /* Sometimes, cost correction overshoot */ + if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ + for huffNode[n].nbBits() == maxNbBits { + n-- + } + huffNode[n+1].setNbBits(huffNode[n+1].nbBits() - 1) + rankLast[1] = n + 1 + totalCost++ + continue + } + huffNode[rankLast[1]+1].setNbBits(huffNode[rankLast[1]+1].nbBits() - 1) + rankLast[1]++ + totalCost++ + } + } + return maxNbBits +} + +// A nodeElt is the fields +// +// count uint32 +// parent uint16 +// symbol byte +// nbBits uint8 +// +// in some order, all squashed into an integer so that the compiler +// always loads and stores entire nodeElts instead of separate fields. +type nodeElt uint64 + +func makeNodeElt(count uint32, symbol byte) nodeElt { + return nodeElt(count) | nodeElt(symbol)<<48 +} + +func (e *nodeElt) count() uint32 { return uint32(*e) } +func (e *nodeElt) parent() uint16 { return uint16(*e >> 32) } +func (e *nodeElt) symbol() byte { return byte(*e >> 48) } +func (e *nodeElt) nbBits() uint8 { return uint8(*e >> 56) } + +func (e *nodeElt) setCount(c uint32) { *e = (*e)&0xffffffff00000000 | nodeElt(c) } +func (e *nodeElt) setParent(p int16) { *e = (*e)&0xffff0000ffffffff | nodeElt(uint16(p))<<32 } +func (e *nodeElt) setNbBits(n uint8) { *e = (*e)&0x00ffffffffffffff | nodeElt(n)<<56 } diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go new file mode 100644 index 000000000..3c0b398c7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -0,0 +1,1167 @@ +package huff0 + +import ( + "errors" + "fmt" + "io" + "sync" + + "github.com/klauspost/compress/fse" +) + +type dTable struct { + single []dEntrySingle +} + +// single-symbols decoding +type dEntrySingle struct { + entry uint16 +} + +// Uses special code for all tables that are < 8 bits. +const use8BitTables = true + +// ReadTable will read a table from the input. +// The size of the input may be larger than the table definition. +// Any content remaining after the table definition will be returned. +// If no Scratch is provided a new one is allocated. +// The returned Scratch can be used for encoding or decoding input using this table. +func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { + s, err = s.prepare(nil) + if err != nil { + return s, nil, err + } + if len(in) <= 1 { + return s, nil, errors.New("input too small for table") + } + iSize := in[0] + in = in[1:] + if iSize >= 128 { + // Uncompressed + oSize := iSize - 127 + iSize = (oSize + 1) / 2 + if int(iSize) > len(in) { + return s, nil, errors.New("input too small for table") + } + for n := uint8(0); n < oSize; n += 2 { + v := in[n/2] + s.huffWeight[n] = v >> 4 + s.huffWeight[n+1] = v & 15 + } + s.symbolLen = uint16(oSize) + in = in[iSize:] + } else { + if len(in) < int(iSize) { + return s, nil, fmt.Errorf("input too small for table, want %d bytes, have %d", iSize, len(in)) + } + // FSE compressed weights + s.fse.DecompressLimit = 255 + hw := s.huffWeight[:] + s.fse.Out = hw + b, err := fse.Decompress(in[:iSize], s.fse) + s.fse.Out = nil + if err != nil { + return s, nil, fmt.Errorf("fse decompress returned: %w", err) + } + if len(b) > 255 { + return s, nil, errors.New("corrupt input: output table too large") + } + s.symbolLen = uint16(len(b)) + in = in[iSize:] + } + + // collect weight stats + var rankStats [16]uint32 + weightTotal := uint32(0) + for _, v := range s.huffWeight[:s.symbolLen] { + if v > tableLogMax { + return s, nil, errors.New("corrupt input: weight too large") + } + v2 := v & 15 + rankStats[v2]++ + // (1 << (v2-1)) is slower since the compiler cannot prove that v2 isn't 0. + weightTotal += (1 << v2) >> 1 + } + if weightTotal == 0 { + return s, nil, errors.New("corrupt input: weights zero") + } + + // get last non-null symbol weight (implied, total must be 2^n) + { + tableLog := highBit32(weightTotal) + 1 + if tableLog > tableLogMax { + return s, nil, errors.New("corrupt input: tableLog too big") + } + s.actualTableLog = uint8(tableLog) + // determine last weight + { + total := uint32(1) << tableLog + rest := total - weightTotal + verif := uint32(1) << highBit32(rest) + lastWeight := highBit32(rest) + 1 + if verif != rest { + // last value must be a clean power of 2 + return s, nil, errors.New("corrupt input: last value not power of two") + } + s.huffWeight[s.symbolLen] = uint8(lastWeight) + s.symbolLen++ + rankStats[lastWeight]++ + } + } + + if (rankStats[1] < 2) || (rankStats[1]&1 != 0) { + // by construction : at least 2 elts of rank 1, must be even + return s, nil, errors.New("corrupt input: min elt size, even check failed ") + } + + // TODO: Choose between single/double symbol decoding + + // Calculate starting value for each rank + { + var nextRankStart uint32 + for n := uint8(1); n < s.actualTableLog+1; n++ { + current := nextRankStart + nextRankStart += rankStats[n] << (n - 1) + rankStats[n] = current + } + } + + // fill DTable (always full size) + tSize := 1 << tableLogMax + if len(s.dt.single) != tSize { + s.dt.single = make([]dEntrySingle, tSize) + } + cTable := s.prevTable + if cap(cTable) < maxSymbolValue+1 { + cTable = make([]cTableEntry, 0, maxSymbolValue+1) + } + cTable = cTable[:maxSymbolValue+1] + s.prevTable = cTable[:s.symbolLen] + s.prevTableLog = s.actualTableLog + + for n, w := range s.huffWeight[:s.symbolLen] { + if w == 0 { + cTable[n] = cTableEntry{ + val: 0, + nBits: 0, + } + continue + } + length := (uint32(1) << w) >> 1 + d := dEntrySingle{ + entry: uint16(s.actualTableLog+1-w) | (uint16(n) << 8), + } + + rank := &rankStats[w] + cTable[n] = cTableEntry{ + val: uint16(*rank >> (w - 1)), + nBits: uint8(d.entry), + } + + single := s.dt.single[*rank : *rank+length] + for i := range single { + single[i] = d + } + *rank += length + } + + return s, in, nil +} + +// Decompress1X will decompress a 1X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// Before this is called, the table must be initialized with ReadTable unless +// the encoder re-used the table. +// deprecated: Use the stateless Decoder() to get a concurrent version. +func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) { + if cap(s.Out) < s.MaxDecodedSize { + s.Out = make([]byte, s.MaxDecodedSize) + } + s.Out = s.Out[:0:s.MaxDecodedSize] + s.Out, err = s.Decoder().Decompress1X(s.Out, in) + return s.Out, err +} + +// Decompress4X will decompress a 4X encoded stream. +// Before this is called, the table must be initialized with ReadTable unless +// the encoder re-used the table. +// The length of the supplied input must match the end of a block exactly. +// The destination size of the uncompressed data must be known and provided. +// deprecated: Use the stateless Decoder() to get a concurrent version. +func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) { + if dstSize > s.MaxDecodedSize { + return nil, ErrMaxDecodedSizeExceeded + } + if cap(s.Out) < dstSize { + s.Out = make([]byte, s.MaxDecodedSize) + } + s.Out = s.Out[:0:dstSize] + s.Out, err = s.Decoder().Decompress4X(s.Out, in) + return s.Out, err +} + +// Decoder will return a stateless decoder that can be used by multiple +// decompressors concurrently. +// Before this is called, the table must be initialized with ReadTable. +// The Decoder is still linked to the scratch buffer so that cannot be reused. +// However, it is safe to discard the scratch. +func (s *Scratch) Decoder() *Decoder { + return &Decoder{ + dt: s.dt, + actualTableLog: s.actualTableLog, + bufs: &s.decPool, + } +} + +// Decoder provides stateless decoding. +type Decoder struct { + dt dTable + actualTableLog uint8 + bufs *sync.Pool +} + +func (d *Decoder) buffer() *[4][256]byte { + buf, ok := d.bufs.Get().(*[4][256]byte) + if ok { + return buf + } + return &[4][256]byte{} +} + +// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { + if d.actualTableLog == 8 { + return d.decompress1X8BitExactly(dst, src) + } + var br bitReaderBytes + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + dt := d.dt.single[:256] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + switch d.actualTableLog { + case 8: + const shift = 8 - 8 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 7: + const shift = 8 - 7 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 6: + const shift = 8 - 6 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 5: + const shift = 8 - 5 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 4: + const shift = 8 - 4 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 3: + const shift = 8 - 3 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 2: + const shift = 8 - 2 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 1: + const shift = 8 - 1 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + default: + d.bufs.Put(bufs) + return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog) + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 4, so uint8 is fine + bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) + shift := (8 - d.actualTableLog) & 7 + + for bitsLeft > 0 { + if br.bitsRead >= 64-8 { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + if len(dst) >= maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + v := dt[br.peekByteFast()>>shift] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= int8(nBits) + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} + +// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { + var br bitReaderBytes + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + dt := d.dt.single[:256] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + const shift = 56 + + //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog) + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 4, so uint8 is fine + bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) + for bitsLeft > 0 { + if br.bitsRead >= 64-8 { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := dt[br.peekByteFast()] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= int8(nBits) + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { + if d.actualTableLog == 8 { + return d.decompress4X8bitExactly(dst, src) + } + + var br [4]bitReaderBytes + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + shift := (56 + (8 - d.actualTableLog)) & 63 + + const tlSize = 1 << 8 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 4 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + // Interleave 2 decodes. + const stream = 0 + const stream2 = 1 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + off += 4 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + // There must at least be 3 buffers left. + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + if br.finished() { + d.bufs.Put(buf) + return nil, io.ErrUnexpectedEOF + } + if br.bitsRead >= 56 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value |= uint64(low) << (br.bitsRead - 32) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + v := single[uint8(br.value>>shift)].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + d.bufs.Put(buf) + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { + var br [4]bitReaderBytes + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const shift = 56 + const tlSize = 1 << 8 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 4 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + // Interleave 2 decodes. + const stream = 0 + const stream2 = 1 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + off += 4 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + // There must at least be 3 buffers left. + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + // copy(out[dstEvery*3:], buf[3][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + if br.finished() { + d.bufs.Put(buf) + return nil, io.ErrUnexpectedEOF + } + if br.bitsRead >= 56 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value |= uint64(low) << (br.bitsRead - 32) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + v := single[br.peekByteFast()].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + d.bufs.Put(buf) + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// matches will compare a decoding table to a coding table. +// Errors are written to the writer. +// Nothing will be written if table is ok. +func (s *Scratch) matches(ct cTable, w io.Writer) { + if s == nil || len(s.dt.single) == 0 { + return + } + dt := s.dt.single[:1<>8) == byte(sym) { + fmt.Fprintf(w, "symbol %x has decoder, but no encoder\n", sym) + errs++ + break + } + } + if errs == 0 { + broken-- + } + continue + } + // Unused bits in input + ub := tablelog - enc.nBits + top := enc.val << ub + // decoder looks at top bits. + dec := dt[top] + if uint8(dec.entry) != enc.nBits { + fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", sym, enc.nBits, uint8(dec.entry)) + errs++ + } + if uint8(dec.entry>>8) != uint8(sym) { + fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", sym, sym, uint8(dec.entry>>8)) + errs++ + } + if errs > 0 { + fmt.Fprintf(w, "%d errros in base, stopping\n", errs) + continue + } + // Ensure that all combinations are covered. + for i := uint16(0); i < (1 << ub); i++ { + vval := top | i + dec := dt[vval] + if uint8(dec.entry) != enc.nBits { + fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, uint8(dec.entry)) + errs++ + } + if uint8(dec.entry>>8) != uint8(sym) { + fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, uint8(dec.entry>>8)) + errs++ + } + if errs > 20 { + fmt.Fprintf(w, "%d errros, stopping\n", errs) + break + } + } + if errs == 0 { + ok++ + broken-- + } + } + if broken > 0 { + fmt.Fprintf(w, "%d broken, %d ok\n", broken, ok) + } +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go new file mode 100644 index 000000000..ba7e8e6b0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go @@ -0,0 +1,226 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +// This file contains the specialisation of Decoder.Decompress4X +// and Decoder.Decompress1X that use an asm implementation of thir main loops. +package huff0 + +import ( + "errors" + "fmt" + + "github.com/klauspost/compress/internal/cpuinfo" +) + +// decompress4x_main_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog > 8. +// +//go:noescape +func decompress4x_main_loop_amd64(ctx *decompress4xContext) + +// decompress4x_8b_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog <= 8 which decodes 4 entries +// per loop. +// +//go:noescape +func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) + +// fallback8BitSize is the size where using Go version is faster. +const fallback8BitSize = 800 + +type decompress4xContext struct { + pbr *[4]bitReaderShifted + peekBits uint8 + out *byte + dstEvery int + tbl *dEntrySingle + decoded int + limit *byte +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + + use8BitTables := d.actualTableLog <= 8 + if cap(dst) < fallback8BitSize && use8BitTables { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + var decoded int + + if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) { + ctx := decompress4xContext{ + pbr: &br, + peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() + out: &out[0], + dstEvery: dstEvery, + tbl: &single[0], + limit: &out[dstEvery-4], // Always stop decoding when first buffer gets here to avoid writing OOB on last. + } + if use8BitTables { + decompress4x_8b_main_loop_amd64(&ctx) + } else { + decompress4x_main_loop_amd64(&ctx) + } + + decoded = ctx.decoded + out = out[decoded/4:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// decompress4x_main_loop_x86 is an x86 assembler implementation +// of Decompress1X when tablelog > 8. +// +//go:noescape +func decompress1x_main_loop_amd64(ctx *decompress1xContext) + +// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation +// of Decompress1X when tablelog > 8. +// +//go:noescape +func decompress1x_main_loop_bmi2(ctx *decompress1xContext) + +type decompress1xContext struct { + pbr *bitReaderShifted + peekBits uint8 + out *byte + outCap int + tbl *dEntrySingle + decoded int +} + +// Error reported by asm implementations +const error_max_decoded_size_exeeded = -1 + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:maxDecodedSize] + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + + if maxDecodedSize >= 4 { + ctx := decompress1xContext{ + pbr: &br, + out: &dst[0], + outCap: maxDecodedSize, + peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() + tbl: &d.dt.single[0], + } + + if cpuinfo.HasBMI2() { + decompress1x_main_loop_bmi2(&ctx) + } else { + decompress1x_main_loop_amd64(&ctx) + } + if ctx.decoded == error_max_decoded_size_exeeded { + return nil, ErrMaxDecodedSizeExceeded + } + + dst = dst[:ctx.decoded] + } + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if len(dst) >= maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + return dst, br.close() +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s new file mode 100644 index 000000000..c4c7ab2d1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s @@ -0,0 +1,830 @@ +// Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT. + +//go:build amd64 && !appengine && !noasm && gc + +// func decompress4x_main_loop_amd64(ctx *decompress4xContext) +TEXT ·decompress4x_main_loop_amd64(SB), $0-8 + // Preload values + MOVQ ctx+0(FP), AX + MOVBQZX 8(AX), DI + MOVQ 16(AX), BX + MOVQ 48(AX), SI + MOVQ 24(AX), R8 + MOVQ 32(AX), R9 + MOVQ (AX), R10 + + // Main loop +main_loop: + XORL DX, DX + CMPQ BX, SI + SETGE DL + + // br0.fillFast32() + MOVQ 32(R10), R11 + MOVBQZX 40(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill0 + MOVQ 24(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ (R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 24(R10) + ORQ R13, R11 + + // exhausted += (br0.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill0: + // val0 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br0.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (BX) + + // update the bitreader structure + MOVQ R11, 32(R10) + MOVB R12, 40(R10) + + // br1.fillFast32() + MOVQ 80(R10), R11 + MOVBQZX 88(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill1 + MOVQ 72(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ 48(R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 72(R10) + ORQ R13, R11 + + // exhausted += (br1.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill1: + // val0 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br1.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (BX)(R8*1) + + // update the bitreader structure + MOVQ R11, 80(R10) + MOVB R12, 88(R10) + + // br2.fillFast32() + MOVQ 128(R10), R11 + MOVBQZX 136(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill2 + MOVQ 120(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ 96(R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 120(R10) + ORQ R13, R11 + + // exhausted += (br2.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill2: + // val0 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br2.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (BX)(R8*2) + + // update the bitreader structure + MOVQ R11, 128(R10) + MOVB R12, 136(R10) + + // br3.fillFast32() + MOVQ 176(R10), R11 + MOVBQZX 184(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill3 + MOVQ 168(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ 144(R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 168(R10) + ORQ R13, R11 + + // exhausted += (br3.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill3: + // val0 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br3.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + LEAQ (R8)(R8*2), CX + MOVW AX, (BX)(CX*1) + + // update the bitreader structure + MOVQ R11, 176(R10) + MOVB R12, 184(R10) + ADDQ $0x02, BX + TESTB DL, DL + JZ main_loop + MOVQ ctx+0(FP), AX + SUBQ 16(AX), BX + SHLQ $0x02, BX + MOVQ BX, 40(AX) + RET + +// func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) +TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8 + // Preload values + MOVQ ctx+0(FP), CX + MOVBQZX 8(CX), DI + MOVQ 16(CX), BX + MOVQ 48(CX), SI + MOVQ 24(CX), R8 + MOVQ 32(CX), R9 + MOVQ (CX), R10 + + // Main loop +main_loop: + XORL DX, DX + CMPQ BX, SI + SETGE DL + + // br0.fillFast32() + MOVQ 32(R10), R11 + MOVBQZX 40(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill0 + MOVQ 24(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ (R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 24(R10) + ORQ R14, R11 + + // exhausted += (br0.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill0: + // val0 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (BX) + + // update the bitreader structure + MOVQ R11, 32(R10) + MOVB R12, 40(R10) + + // br1.fillFast32() + MOVQ 80(R10), R11 + MOVBQZX 88(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill1 + MOVQ 72(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 48(R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 72(R10) + ORQ R14, R11 + + // exhausted += (br1.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill1: + // val0 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (BX)(R8*1) + + // update the bitreader structure + MOVQ R11, 80(R10) + MOVB R12, 88(R10) + + // br2.fillFast32() + MOVQ 128(R10), R11 + MOVBQZX 136(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill2 + MOVQ 120(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 96(R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 120(R10) + ORQ R14, R11 + + // exhausted += (br2.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill2: + // val0 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (BX)(R8*2) + + // update the bitreader structure + MOVQ R11, 128(R10) + MOVB R12, 136(R10) + + // br3.fillFast32() + MOVQ 176(R10), R11 + MOVBQZX 184(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill3 + MOVQ 168(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 144(R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 168(R10) + ORQ R14, R11 + + // exhausted += (br3.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill3: + // val0 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + LEAQ (R8)(R8*2), CX + MOVL AX, (BX)(CX*1) + + // update the bitreader structure + MOVQ R11, 176(R10) + MOVB R12, 184(R10) + ADDQ $0x04, BX + TESTB DL, DL + JZ main_loop + MOVQ ctx+0(FP), AX + SUBQ 16(AX), BX + SHLQ $0x02, BX + MOVQ BX, 40(AX) + RET + +// func decompress1x_main_loop_amd64(ctx *decompress1xContext) +TEXT ·decompress1x_main_loop_amd64(SB), $0-8 + MOVQ ctx+0(FP), CX + MOVQ 16(CX), DX + MOVQ 24(CX), BX + CMPQ BX, $0x04 + JB error_max_decoded_size_exceeded + LEAQ (DX)(BX*1), BX + MOVQ (CX), SI + MOVQ (SI), R8 + MOVQ 24(SI), R9 + MOVQ 32(SI), R10 + MOVBQZX 40(SI), R11 + MOVQ 32(CX), SI + MOVBQZX 8(CX), DI + JMP loop_condition + +main_loop: + // Check if we have room for 4 bytes in the output buffer + LEAQ 4(DX), CX + CMPQ CX, BX + JGE error_max_decoded_size_exceeded + + // Decode 4 values + CMPQ R11, $0x20 + JL bitReader_fillFast_1_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), R12 + MOVQ R11, CX + SHLQ CL, R12 + ORQ R12, R10 + +bitReader_fillFast_1_end: + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + BSWAPL AX + CMPQ R11, $0x20 + JL bitReader_fillFast_2_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), R12 + MOVQ R11, CX + SHLQ CL, R12 + ORQ R12, R10 + +bitReader_fillFast_2_end: + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + BSWAPL AX + + // Store the decoded values + MOVL AX, (DX) + ADDQ $0x04, DX + +loop_condition: + CMPQ R9, $0x08 + JGE main_loop + + // Update ctx structure + MOVQ ctx+0(FP), AX + SUBQ 16(AX), DX + MOVQ DX, 40(AX) + MOVQ (AX), AX + MOVQ R9, 24(AX) + MOVQ R10, 32(AX) + MOVB R11, 40(AX) + RET + + // Report error +error_max_decoded_size_exceeded: + MOVQ ctx+0(FP), AX + MOVQ $-1, CX + MOVQ CX, 40(AX) + RET + +// func decompress1x_main_loop_bmi2(ctx *decompress1xContext) +// Requires: BMI2 +TEXT ·decompress1x_main_loop_bmi2(SB), $0-8 + MOVQ ctx+0(FP), CX + MOVQ 16(CX), DX + MOVQ 24(CX), BX + CMPQ BX, $0x04 + JB error_max_decoded_size_exceeded + LEAQ (DX)(BX*1), BX + MOVQ (CX), SI + MOVQ (SI), R8 + MOVQ 24(SI), R9 + MOVQ 32(SI), R10 + MOVBQZX 40(SI), R11 + MOVQ 32(CX), SI + MOVBQZX 8(CX), DI + JMP loop_condition + +main_loop: + // Check if we have room for 4 bytes in the output buffer + LEAQ 4(DX), CX + CMPQ CX, BX + JGE error_max_decoded_size_exceeded + + // Decode 4 values + CMPQ R11, $0x20 + JL bitReader_fillFast_1_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), CX + SHLXQ R11, CX, CX + ORQ CX, R10 + +bitReader_fillFast_1_end: + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + BSWAPL AX + CMPQ R11, $0x20 + JL bitReader_fillFast_2_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), CX + SHLXQ R11, CX, CX + ORQ CX, R10 + +bitReader_fillFast_2_end: + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + BSWAPL AX + + // Store the decoded values + MOVL AX, (DX) + ADDQ $0x04, DX + +loop_condition: + CMPQ R9, $0x08 + JGE main_loop + + // Update ctx structure + MOVQ ctx+0(FP), AX + SUBQ 16(AX), DX + MOVQ DX, 40(AX) + MOVQ (AX), AX + MOVQ R9, 24(AX) + MOVQ R10, 32(AX) + MOVB R11, 40(AX) + RET + + // Report error +error_max_decoded_size_exceeded: + MOVQ ctx+0(FP), AX + MOVQ $-1, CX + MOVQ CX, 40(AX) + RET diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go new file mode 100644 index 000000000..908c17de6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go @@ -0,0 +1,299 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +// This file contains a generic implementation of Decoder.Decompress4X. +package huff0 + +import ( + "errors" + "fmt" +) + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 2 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + const stream = 0 + const stream2 = 1 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + off += 2 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + // There must at least be 3 buffers left. + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + //copy(out[dstEvery*3:], buf[3][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress1X8Bit(dst, src) + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + dt := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + for br.off >= 8 { + br.fillFast() + v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + // Refill + br.fillFast() + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if false && br.bitsRead >= 32 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value = (br.value << 32) | uint64(low) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value = (br.value << 8) | uint64(br.in[br.off-1]) + br.bitsRead -= 8 + br.off-- + } + } + } + if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go new file mode 100644 index 000000000..e8ad17ad0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -0,0 +1,337 @@ +// Package huff0 provides fast huffman encoding as used in zstd. +// +// See README.md at https://github.com/klauspost/compress/tree/master/huff0 for details. +package huff0 + +import ( + "errors" + "fmt" + "math" + "math/bits" + "sync" + + "github.com/klauspost/compress/fse" +) + +const ( + maxSymbolValue = 255 + + // zstandard limits tablelog to 11, see: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#huffman-tree-description + tableLogMax = 11 + tableLogDefault = 11 + minTablelog = 5 + huffNodesLen = 512 + + // BlockSizeMax is maximum input size for a single block uncompressed. + BlockSizeMax = 1<<18 - 1 +) + +var ( + // ErrIncompressible is returned when input is judged to be too hard to compress. + ErrIncompressible = errors.New("input is not compressible") + + // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. + ErrUseRLE = errors.New("input is single value repeated") + + // ErrTooBig is return if input is too large for a single block. + ErrTooBig = errors.New("input too big") + + // ErrMaxDecodedSizeExceeded is return if input is too large for a single block. + ErrMaxDecodedSizeExceeded = errors.New("maximum output size exceeded") +) + +type ReusePolicy uint8 + +const ( + // ReusePolicyAllow will allow reuse if it produces smaller output. + ReusePolicyAllow ReusePolicy = iota + + // ReusePolicyPrefer will re-use aggressively if possible. + // This will not check if a new table will produce smaller output, + // except if the current table is impossible to use or + // compressed output is bigger than input. + ReusePolicyPrefer + + // ReusePolicyNone will disable re-use of tables. + // This is slightly faster than ReusePolicyAllow but may produce larger output. + ReusePolicyNone + + // ReusePolicyMust must allow reuse and produce smaller output. + ReusePolicyMust +) + +type Scratch struct { + count [maxSymbolValue + 1]uint32 + + // Per block parameters. + // These can be used to override compression parameters of the block. + // Do not touch, unless you know what you are doing. + + // Out is output buffer. + // If the scratch is re-used before the caller is done processing the output, + // set this field to nil. + // Otherwise the output buffer will be re-used for next Compression/Decompression step + // and allocation will be avoided. + Out []byte + + // OutTable will contain the table data only, if a new table has been generated. + // Slice of the returned data. + OutTable []byte + + // OutData will contain the compressed data. + // Slice of the returned data. + OutData []byte + + // MaxDecodedSize will set the maximum allowed output size. + // This value will automatically be set to BlockSizeMax if not set. + // Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded. + MaxDecodedSize int + + br byteReader + + // MaxSymbolValue will override the maximum symbol value of the next block. + MaxSymbolValue uint8 + + // TableLog will attempt to override the tablelog for the next block. + // Must be <= 11 and >= 5. + TableLog uint8 + + // Reuse will specify the reuse policy + Reuse ReusePolicy + + // WantLogLess allows to specify a log 2 reduction that should at least be achieved, + // otherwise the block will be returned as incompressible. + // The reduction should then at least be (input size >> WantLogLess) + // If WantLogLess == 0 any improvement will do. + WantLogLess uint8 + + symbolLen uint16 // Length of active part of the symbol table. + maxCount int // count of the most probable symbol + clearCount bool // clear count + actualTableLog uint8 // Selected tablelog. + prevTableLog uint8 // Tablelog for previous table + prevTable cTable // Table used for previous compression. + cTable cTable // compression table + dt dTable // decompression table + nodes []nodeElt + tmpOut [4][]byte + fse *fse.Scratch + decPool sync.Pool // *[4][256]byte buffers. + huffWeight [maxSymbolValue + 1]byte +} + +// TransferCTable will transfer the previously used compression table. +func (s *Scratch) TransferCTable(src *Scratch) { + if cap(s.prevTable) < len(src.prevTable) { + s.prevTable = make(cTable, 0, maxSymbolValue+1) + } + s.prevTable = s.prevTable[:len(src.prevTable)] + copy(s.prevTable, src.prevTable) + s.prevTableLog = src.prevTableLog +} + +func (s *Scratch) prepare(in []byte) (*Scratch, error) { + if len(in) > BlockSizeMax { + return nil, ErrTooBig + } + if s == nil { + s = &Scratch{} + } + if s.MaxSymbolValue == 0 { + s.MaxSymbolValue = maxSymbolValue + } + if s.TableLog == 0 { + s.TableLog = tableLogDefault + } + if s.TableLog > tableLogMax || s.TableLog < minTablelog { + return nil, fmt.Errorf(" invalid tableLog %d (%d -> %d)", s.TableLog, minTablelog, tableLogMax) + } + if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax { + s.MaxDecodedSize = BlockSizeMax + } + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + if cap(s.Out) == 0 { + s.Out = make([]byte, 0, len(in)) + } + s.Out = s.Out[:0] + + s.OutTable = nil + s.OutData = nil + if cap(s.nodes) < huffNodesLen+1 { + s.nodes = make([]nodeElt, 0, huffNodesLen+1) + } + s.nodes = s.nodes[:0] + if s.fse == nil { + s.fse = &fse.Scratch{} + } + s.br.init(in) + + return s, nil +} + +type cTable []cTableEntry + +func (c cTable) write(s *Scratch) error { + var ( + // precomputed conversion table + bitsToWeight [tableLogMax + 1]byte + huffLog = s.actualTableLog + // last weight is not saved. + maxSymbolValue = uint8(s.symbolLen - 1) + huffWeight = s.huffWeight[:256] + ) + const ( + maxFSETableLog = 6 + ) + // convert to weight + bitsToWeight[0] = 0 + for n := uint8(1); n < huffLog+1; n++ { + bitsToWeight[n] = huffLog + 1 - n + } + + // Acquire histogram for FSE. + hist := s.fse.Histogram() + hist = hist[:256] + for i := range hist[:16] { + hist[i] = 0 + } + for n := uint8(0); n < maxSymbolValue; n++ { + v := bitsToWeight[c[n].nBits] & 15 + huffWeight[n] = v + hist[v]++ + } + + // FSE compress if feasible. + if maxSymbolValue >= 2 { + huffMaxCnt := uint32(0) + huffMax := uint8(0) + for i, v := range hist[:16] { + if v == 0 { + continue + } + huffMax = byte(i) + if v > huffMaxCnt { + huffMaxCnt = v + } + } + s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) + s.fse.TableLog = maxFSETableLog + b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) + if err == nil && len(b) < int(s.symbolLen>>1) { + s.Out = append(s.Out, uint8(len(b))) + s.Out = append(s.Out, b...) + return nil + } + // Unable to compress (RLE/uncompressible) + } + // write raw values as 4-bits (max : 15) + if maxSymbolValue > (256 - 128) { + // should not happen : likely means source cannot be compressed + return ErrIncompressible + } + op := s.Out + // special case, pack weights 4 bits/weight. + op = append(op, 128|(maxSymbolValue-1)) + // be sure it doesn't cause msan issue in final combination + huffWeight[maxSymbolValue] = 0 + for n := uint16(0); n < uint16(maxSymbolValue); n += 2 { + op = append(op, (huffWeight[n]<<4)|huffWeight[n+1]) + } + s.Out = op + return nil +} + +func (c cTable) estTableSize(s *Scratch) (sz int, err error) { + var ( + // precomputed conversion table + bitsToWeight [tableLogMax + 1]byte + huffLog = s.actualTableLog + // last weight is not saved. + maxSymbolValue = uint8(s.symbolLen - 1) + huffWeight = s.huffWeight[:256] + ) + const ( + maxFSETableLog = 6 + ) + // convert to weight + bitsToWeight[0] = 0 + for n := uint8(1); n < huffLog+1; n++ { + bitsToWeight[n] = huffLog + 1 - n + } + + // Acquire histogram for FSE. + hist := s.fse.Histogram() + hist = hist[:256] + for i := range hist[:16] { + hist[i] = 0 + } + for n := uint8(0); n < maxSymbolValue; n++ { + v := bitsToWeight[c[n].nBits] & 15 + huffWeight[n] = v + hist[v]++ + } + + // FSE compress if feasible. + if maxSymbolValue >= 2 { + huffMaxCnt := uint32(0) + huffMax := uint8(0) + for i, v := range hist[:16] { + if v == 0 { + continue + } + huffMax = byte(i) + if v > huffMaxCnt { + huffMaxCnt = v + } + } + s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) + s.fse.TableLog = maxFSETableLog + b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) + if err == nil && len(b) < int(s.symbolLen>>1) { + sz += 1 + len(b) + return sz, nil + } + // Unable to compress (RLE/uncompressible) + } + // write raw values as 4-bits (max : 15) + if maxSymbolValue > (256 - 128) { + // should not happen : likely means source cannot be compressed + return 0, ErrIncompressible + } + // special case, pack weights 4 bits/weight. + sz += 1 + int(maxSymbolValue/2) + return sz, nil +} + +// estimateSize returns the estimated size in bytes of the input represented in the +// histogram supplied. +func (c cTable) estimateSize(hist []uint32) int { + nbBits := uint32(7) + for i, v := range c[:len(hist)] { + nbBits += uint32(v.nBits) * hist[i] + } + return int(nbBits >> 3) +} + +// minSize returns the minimum possible size considering the shannon limit. +func (s *Scratch) minSize(total int) int { + nbBits := float64(7) + fTotal := float64(total) + for _, v := range s.count[:s.symbolLen] { + n := float64(v) + if n > 0 { + nbBits += math.Log2(fTotal/n) * n + } + } + return int(nbBits) >> 3 +} + +func highBit32(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go new file mode 100644 index 000000000..3954c5121 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go @@ -0,0 +1,34 @@ +// Package cpuinfo gives runtime info about the current CPU. +// +// This is a very limited module meant for use internally +// in this project. For more versatile solution check +// https://github.com/klauspost/cpuid. +package cpuinfo + +// HasBMI1 checks whether an x86 CPU supports the BMI1 extension. +func HasBMI1() bool { + return hasBMI1 +} + +// HasBMI2 checks whether an x86 CPU supports the BMI2 extension. +func HasBMI2() bool { + return hasBMI2 +} + +// DisableBMI2 will disable BMI2, for testing purposes. +// Call returned function to restore previous state. +func DisableBMI2() func() { + old := hasBMI2 + hasBMI2 = false + return func() { + hasBMI2 = old + } +} + +// HasBMI checks whether an x86 CPU supports both BMI1 and BMI2 extensions. +func HasBMI() bool { + return HasBMI1() && HasBMI2() +} + +var hasBMI1 bool +var hasBMI2 bool diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go new file mode 100644 index 000000000..e802579c4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go @@ -0,0 +1,11 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package cpuinfo + +// go:noescape +func x86extensions() (bmi1, bmi2 bool) + +func init() { + hasBMI1, hasBMI2 = x86extensions() +} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s new file mode 100644 index 000000000..4465fbe9e --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s @@ -0,0 +1,36 @@ +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" +#include "funcdata.h" +#include "go_asm.h" + +TEXT ·x86extensions(SB), NOSPLIT, $0 + // 1. determine max EAX value + XORQ AX, AX + CPUID + + CMPQ AX, $7 + JB unsupported + + // 2. EAX = 7, ECX = 0 --- see Table 3-8 "Information Returned by CPUID Instruction" + MOVQ $7, AX + MOVQ $0, CX + CPUID + + BTQ $3, BX // bit 3 = BMI1 + SETCS AL + + BTQ $8, BX // bit 8 = BMI2 + SETCS AH + + MOVB AL, bmi1+0(FP) + MOVB AH, bmi2+1(FP) + RET + +unsupported: + XORQ AX, AX + MOVB AL, bmi1+0(FP) + MOVB AL, bmi2+1(FP) + RET diff --git a/vendor/github.com/klauspost/compress/internal/snapref/LICENSE b/vendor/github.com/klauspost/compress/internal/snapref/LICENSE new file mode 100644 index 000000000..6050c10f4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode.go b/vendor/github.com/klauspost/compress/internal/snapref/decode.go new file mode 100644 index 000000000..40796a49d --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/decode.go @@ -0,0 +1,264 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +import ( + "encoding/binary" + "errors" + "io" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("snappy: corrupt input") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("snappy: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 + decodeErrCodeUnsupportedLiteralLength = 2 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// Decode handles the Snappy block format, not the Snappy stream format. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= len(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + switch decode(dst, src[s:]) { + case 0: + return dst, nil + case decodeErrCodeUnsupportedLiteralLength: + return nil, errUnsupportedLiteralLength + } + return nil, ErrCorrupt +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewReader(r io.Reader) *Reader { + return &Reader{ + r: r, + decoded: make([]byte, maxBlockSize), + buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +// +// Reader handles the Snappy stream format, not the Snappy block format. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + readHeader bool +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.readHeader = false +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +func (r *Reader) fill() error { + for r.i >= r.j { + if !r.readFull(r.buf[:4], true) { + return r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + r.err = ErrUnsupported + return r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return r.err + } + if n > len(r.decoded) { + r.err = ErrCorrupt + return r.err + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if n > len(r.decoded) { + r.err = ErrCorrupt + return r.err + } + if !r.readFull(r.decoded[:n], false) { + return r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return r.err + } + for i := 0; i < len(magicBody); i++ { + if r.buf[i] != magicBody[i] { + r.err = ErrCorrupt + return r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return r.err + } + } + + return nil +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil +} + +// ReadByte satisfies the io.ByteReader interface. +func (r *Reader) ReadByte() (byte, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + c := r.decoded[r.i] + r.i++ + return c, nil +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go new file mode 100644 index 000000000..77395a6b8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go @@ -0,0 +1,113 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func decode(dst, src []byte) int { + var d, s, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length <= 0 { + return decodeErrCodeUnsupportedLiteralLength + } + if length > len(dst)-d || length > len(src)-s { + return decodeErrCodeCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + return decodeErrCodeCorrupt + } + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset >= length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] + } + d += length + } + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode.go b/vendor/github.com/klauspost/compress/internal/snapref/encode.go new file mode 100644 index 000000000..13c6040a5 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode.go @@ -0,0 +1,289 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +import ( + "encoding/binary" + "errors" + "io" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// Encode handles the Snappy block format, not the Snappy stream format. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return dst[:d] +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 16 - 1 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// could be encoded with a copy tag. This is the minimum with respect to the +// algorithm used by encodeBlock, not a minimum enforced by the file format. +// +// The encoded output must start with at least a 1 byte literal, as there are +// no previous bytes to copy. A minimal (1 byte) copy after that, generated +// from an emitCopy call in encodeBlock's main loop, would require at least +// another inputMargin bytes, for the reason above: we want any emitLiteral +// calls inside encodeBlock's main loop to use the fast path if possible, which +// requires being able to overrun by inputMargin bytes. Thus, +// minNonLiteralBlockSize equals 1 + 1 + inputMargin. +// +// The C++ code doesn't use this exact threshold, but it could, as discussed at +// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion +// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an +// optimization. It should not affect the encoded form. This is tested by +// TestSameEncodingAsCppShortCopies. +const minNonLiteralBlockSize = 1 + 1 + inputMargin + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if n > 0xffffffff { + return -1 + } + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // That is, 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + n = 32 + n + n/6 + if n > 0xffffffff { + return -1 + } + return int(n) +} + +var errClosed = errors.New("snappy: Writer is closed") + +// NewWriter returns a new Writer that compresses to w. +// +// The Writer returned does not buffer writes. There is no need to Flush or +// Close such a Writer. +// +// Deprecated: the Writer returned is not suitable for many small writes, only +// for few large writes. Use NewBufferedWriter instead, which is efficient +// regardless of the frequency and shape of the writes, and remember to Close +// that Writer when done. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + obuf: make([]byte, obufLen), + } +} + +// NewBufferedWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// The Writer returned buffers writes. Users must call Close to guarantee all +// data has been forwarded to the underlying io.Writer. They may also call +// Flush zero or more times before calling Close. +func NewBufferedWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + ibuf: make([]byte, 0, maxBlockSize), + obuf: make([]byte, obufLen), + } +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +// +// Writer handles the Snappy stream format, not the Snappy block format. +type Writer struct { + w io.Writer + err error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + // + // Its use is optional. For backwards compatibility, Writers created by the + // NewWriter function have ibuf == nil, do not buffer incoming bytes, and + // therefore do not need to be Flush'ed or Close'd. + ibuf []byte + + // obuf is a buffer for the outgoing (compressed) bytes. + obuf []byte + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool +} + +// Reset discards the writer's state and switches the Snappy writer to write to +// w. This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + w.w = writer + w.err = nil + if w.ibuf != nil { + w.ibuf = w.ibuf[:0] + } + w.wroteStreamHeader = false +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if w.ibuf == nil { + // Do not buffer incoming bytes. This does not perform or compress well + // if the caller of Writer.Write writes many small slices. This + // behavior is therefore deprecated, but still supported for backwards + // compatibility with code that doesn't explicitly Flush or Close. + return w.write(p) + } + + // The remainder of this method is based on bufio.Writer.Write from the + // standard library. + + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.Flush() + } + nRet += n + p = p[n:] + } + if w.err != nil { + return nRet, w.err + } + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if w.err != nil { + return 0, w.err + } + for len(p) > 0 { + obufStart := len(magicChunk) + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + copy(w.obuf, magicChunk) + obufStart = 0 + } + + var uncompressed []byte + if len(p) > maxBlockSize { + uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] + } else { + uncompressed, p = p, nil + } + checksum := crc(uncompressed) + + // Compress the buffer, discarding the result if the improvement + // isn't at least 12.5%. + compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) + chunkType := uint8(chunkTypeCompressedData) + chunkLen := 4 + len(compressed) + obufEnd := obufHeaderLen + len(compressed) + if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { + chunkType = chunkTypeUncompressedData + chunkLen = 4 + len(uncompressed) + obufEnd = obufHeaderLen + } + + // Fill in the per-chunk header that comes before the body. + w.obuf[len(magicChunk)+0] = chunkType + w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) + w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) + w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) + w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) + w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) + w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) + w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) + + if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { + w.err = err + return nRet, err + } + if chunkType == chunkTypeUncompressedData { + if _, err := w.w.Write(uncompressed); err != nil { + w.err = err + return nRet, err + } + } + nRet += len(uncompressed) + } + return nRet, nil +} + +// Flush flushes the Writer to its underlying io.Writer. +func (w *Writer) Flush() error { + if w.err != nil { + return w.err + } + if len(w.ibuf) == 0 { + return nil + } + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err +} + +// Close calls Flush and then closes the Writer. +func (w *Writer) Close() error { + w.Flush() + ret := w.err + if w.err == nil { + w.err = errClosed + } + return ret +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go new file mode 100644 index 000000000..05db94d39 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go @@ -0,0 +1,262 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 1 <= len(lit) && len(lit) <= 65536 +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + default: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + } + return i + copy(dst[i:], lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= 65535 +// 4 <= length && length <= 65535 +func emitCopy(dst []byte, offset, length int) int { + i := 0 + // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The + // threshold for this loop is a little higher (at 68 = 64 + 4), and the + // length emitted down below is is a little lower (at 60 = 64 - 4), because + // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed + // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as + // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as + // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a + // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an + // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. + for length >= 68 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[i+0] = 63<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 64 + } + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + dst[i+0] = 59<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 60 + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + return i + 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + return i + 2 +} + +// extendMatch returns the largest k such that k <= len(src) and that +// src[i:i+k-j] and src[j:k] have the same contents. +// +// It assumes that: +// +// 0 <= i && i < j && j <= len(src) +func extendMatch(src []byte, i, j int) int { + for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { + } + return j +} + +func hash(u, shift uint32) uint32 { + return (u * 0x1e35a7bd) >> shift +} + +// EncodeBlockInto exposes encodeBlock but checks dst size. +func EncodeBlockInto(dst, src []byte) (d int) { + if MaxEncodedLen(len(src)) > len(dst) { + return 0 + } + + // encodeBlock breaks on too big blocks, so split. + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return d +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxBlockSize and maxBlockSize == 65536. + const ( + maxTableSize = 1 << 14 + // tableMask is redundant, but helps the compiler eliminate bounds + // checks. + tableMask = maxTableSize - 1 + ) + shift := uint32(32 - 8) + for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + shift-- + } + // In Go, all array elements are zero-initialized, so there is no advantage + // to a smaller tableSize per se. However, it matches the C++ algorithm, + // and in the asm versions of this code, we can get away with zeroing only + // the first tableSize elements. + var table [maxTableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s), shift) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS), shift) + if load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { + } + + d += emitCopy(dst[d:], base-candidate, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x>>0), shift) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x>>8), shift) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x>>16), shift) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/snappy.go b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go new file mode 100644 index 000000000..34d01f4aa --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go @@ -0,0 +1,98 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snapref implements the Snappy compression format. It aims for very +// high speeds and reasonable compression. +// +// There are actually two Snappy formats: block and stream. They are related, +// but different: trying to decompress block-compressed data as a Snappy stream +// will fail, and vice versa. The block format is the Decode and Encode +// functions and the stream format is the Reader and Writer types. +// +// The block format, the more common case, is used when the complete size (the +// number of bytes) of the original data is known upfront, at the time +// compression starts. The stream format, also known as the framing format, is +// for when that isn't always true. +// +// The canonical, C++ implementation is at https://github.com/google/snappy and +// it only implements the block format. +package snapref + +import ( + "hash/crc32" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer issued by most + encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicBody = "sNaPpY" + + // maxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + maxBlockSize = 65536 + + // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + maxEncodedLenOfMaxBlockSize = 76490 + + obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize + obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return uint32(c>>15|c<<17) + 0xa282ead8 +} diff --git a/vendor/github.com/klauspost/compress/s2/README.md b/vendor/github.com/klauspost/compress/s2/README.md index 73c0c462d..8284bb081 100644 --- a/vendor/github.com/klauspost/compress/s2/README.md +++ b/vendor/github.com/klauspost/compress/s2/README.md @@ -20,11 +20,12 @@ This is important, so you don't have to worry about spending CPU cycles on alrea * Concurrent stream compression * Faster decompression, even for Snappy compatible content * Concurrent Snappy/S2 stream decompression -* Ability to quickly skip forward in compressed stream +* Skip forward in compressed stream * Random seeking with indexes * Compatible with reading Snappy compressed content * Smaller block size overhead on incompressible blocks * Block concatenation +* Block Dictionary support * Uncompressed stream mode * Automatic stream size padding * Snappy compatible block compression @@ -325,35 +326,35 @@ The content compressed in this mode is fully compatible with the standard decode Snappy vs S2 **compression** speed on 16 core (32 thread) computer, using all threads and a single thread (1 CPU): -| File | S2 speed | S2 Throughput | S2 % smaller | S2 "better" | "better" throughput | "better" % smaller | -|-----------------------------------------------------------------------------------------------------|----------|---------------|--------------|-------------|---------------------|--------------------| -| [rawstudio-mint14.tar](https://files.klauspost.com/compress/rawstudio-mint14.7z) | 12.70x | 10556 MB/s | 7.35% | 4.15x | 3455 MB/s | 12.79% | -| (1 CPU) | 1.14x | 948 MB/s | - | 0.42x | 349 MB/s | - | -| [github-june-2days-2019.json](https://files.klauspost.com/compress/github-june-2days-2019.json.zst) | 17.13x | 14484 MB/s | 31.60% | 10.09x | 8533 MB/s | 37.71% | -| (1 CPU) | 1.33x | 1127 MB/s | - | 0.70x | 589 MB/s | - | -| [github-ranks-backup.bin](https://files.klauspost.com/compress/github-ranks-backup.bin.zst) | 15.14x | 12000 MB/s | -5.79% | 6.59x | 5223 MB/s | 5.80% | -| (1 CPU) | 1.11x | 877 MB/s | - | 0.47x | 370 MB/s | - | -| [consensus.db.10gb](https://files.klauspost.com/compress/consensus.db.10gb.zst) | 14.62x | 12116 MB/s | 15.90% | 5.35x | 4430 MB/s | 16.08% | -| (1 CPU) | 1.38x | 1146 MB/s | - | 0.38x | 312 MB/s | - | -| [adresser.json](https://files.klauspost.com/compress/adresser.json.zst) | 8.83x | 17579 MB/s | 43.86% | 6.54x | 13011 MB/s | 47.23% | -| (1 CPU) | 1.14x | 2259 MB/s | - | 0.74x | 1475 MB/s | - | -| [gob-stream](https://files.klauspost.com/compress/gob-stream.7z) | 16.72x | 14019 MB/s | 24.02% | 10.11x | 8477 MB/s | 30.48% | -| (1 CPU) | 1.24x | 1043 MB/s | - | 0.70x | 586 MB/s | - | -| [10gb.tar](http://mattmahoney.net/dc/10gb.html) | 13.33x | 9254 MB/s | 1.84% | 6.75x | 4686 MB/s | 6.72% | -| (1 CPU) | 0.97x | 672 MB/s | - | 0.53x | 366 MB/s | - | -| sharnd.out.2gb | 2.11x | 12639 MB/s | 0.01% | 1.98x | 11833 MB/s | 0.01% | -| (1 CPU) | 0.93x | 5594 MB/s | - | 1.34x | 8030 MB/s | - | -| [enwik9](http://mattmahoney.net/dc/textdata.html) | 19.34x | 8220 MB/s | 3.98% | 7.87x | 3345 MB/s | 15.82% | -| (1 CPU) | 1.06x | 452 MB/s | - | 0.50x | 213 MB/s | - | -| [silesia.tar](http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip) | 10.48x | 6124 MB/s | 5.67% | 3.76x | 2197 MB/s | 12.60% | -| (1 CPU) | 0.97x | 568 MB/s | - | 0.46x | 271 MB/s | - | -| [enwik10](https://encode.su/threads/3315-enwik10-benchmark-results) | 21.07x | 9020 MB/s | 6.36% | 6.91x | 2959 MB/s | 16.95% | -| (1 CPU) | 1.07x | 460 MB/s | - | 0.51x | 220 MB/s | - | +| File | S2 Speed | S2 Throughput | S2 % smaller | S2 "better" | "better" throughput | "better" % smaller | +|---------------------------------------------------------------------------------------------------------|----------|---------------|--------------|-------------|---------------------|--------------------| +| [rawstudio-mint14.tar](https://files.klauspost.com/compress/rawstudio-mint14.7z) | 16.33x | 10556 MB/s | 8.0% | 6.04x | 5252 MB/s | 14.7% | +| (1 CPU) | 1.08x | 940 MB/s | - | 0.46x | 400 MB/s | - | +| [github-june-2days-2019.json](https://files.klauspost.com/compress/github-june-2days-2019.json.zst) | 16.51x | 15224 MB/s | 31.70% | 9.47x | 8734 MB/s | 37.71% | +| (1 CPU) | 1.26x | 1157 MB/s | - | 0.60x | 556 MB/s | - | +| [github-ranks-backup.bin](https://files.klauspost.com/compress/github-ranks-backup.bin.zst) | 15.14x | 12598 MB/s | -5.76% | 6.23x | 5675 MB/s | 3.62% | +| (1 CPU) | 1.02x | 932 MB/s | - | 0.47x | 432 MB/s | - | +| [consensus.db.10gb](https://files.klauspost.com/compress/consensus.db.10gb.zst) | 11.21x | 12116 MB/s | 15.95% | 3.24x | 3500 MB/s | 18.00% | +| (1 CPU) | 1.05x | 1135 MB/s | - | 0.27x | 292 MB/s | - | +| [apache.log](https://files.klauspost.com/compress/apache.log.zst) | 8.55x | 16673 MB/s | 20.54% | 5.85x | 11420 MB/s | 24.97% | +| (1 CPU) | 1.91x | 1771 MB/s | - | 0.53x | 1041 MB/s | - | +| [gob-stream](https://files.klauspost.com/compress/gob-stream.7z) | 15.76x | 14357 MB/s | 24.01% | 8.67x | 7891 MB/s | 33.68% | +| (1 CPU) | 1.17x | 1064 MB/s | - | 0.65x | 595 MB/s | - | +| [10gb.tar](http://mattmahoney.net/dc/10gb.html) | 13.33x | 9835 MB/s | 2.34% | 6.85x | 4863 MB/s | 9.96% | +| (1 CPU) | 0.97x | 689 MB/s | - | 0.55x | 387 MB/s | - | +| sharnd.out.2gb | 9.11x | 13213 MB/s | 0.01% | 1.49x | 9184 MB/s | 0.01% | +| (1 CPU) | 0.88x | 5418 MB/s | - | 0.77x | 5417 MB/s | - | +| [sofia-air-quality-dataset csv](https://files.klauspost.com/compress/sofia-air-quality-dataset.tar.zst) | 22.00x | 11477 MB/s | 18.73% | 11.15x | 5817 MB/s | 27.88% | +| (1 CPU) | 1.23x | 642 MB/s | - | 0.71x | 642 MB/s | - | +| [silesia.tar](http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip) | 11.23x | 6520 MB/s | 5.9% | 5.35x | 3109 MB/s | 15.88% | +| (1 CPU) | 1.05x | 607 MB/s | - | 0.52x | 304 MB/s | - | +| [enwik9](https://files.klauspost.com/compress/enwik9.zst) | 19.28x | 8440 MB/s | 4.04% | 9.31x | 4076 MB/s | 18.04% | +| (1 CPU) | 1.12x | 488 MB/s | - | 0.57x | 250 MB/s | - | ### Legend -* `S2 speed`: Speed of S2 compared to Snappy, using 16 cores and 1 core. -* `S2 throughput`: Throughput of S2 in MB/s. +* `S2 Speed`: Speed of S2 compared to Snappy, using 16 cores and 1 core. +* `S2 Throughput`: Throughput of S2 in MB/s. * `S2 % smaller`: How many percent of the Snappy output size is S2 better. * `S2 "better"`: Speed when enabling "better" compression mode in S2 compared to Snappy. * `"better" throughput`: Speed when enabling "better" compression mode in S2 compared to Snappy. @@ -361,7 +362,7 @@ Snappy vs S2 **compression** speed on 16 core (32 thread) computer, using all th There is a good speedup across the board when using a single thread and a significant speedup when using multiple threads. -Machine generated data gets by far the biggest compression boost, with size being being reduced by up to 45% of Snappy size. +Machine generated data gets by far the biggest compression boost, with size being reduced by up to 35% of Snappy size. The "better" compression mode sees a good improvement in all cases, but usually at a performance cost. @@ -404,15 +405,15 @@ The "better" compression mode will actively look for shorter matches, which is w Without assembly decompression is also very fast; single goroutine decompression speed. No assembly: | File | S2 Throughput | S2 throughput | -|--------------------------------|--------------|---------------| -| consensus.db.10gb.s2 | 1.84x | 2289.8 MB/s | -| 10gb.tar.s2 | 1.30x | 867.07 MB/s | -| rawstudio-mint14.tar.s2 | 1.66x | 1329.65 MB/s | -| github-june-2days-2019.json.s2 | 2.36x | 1831.59 MB/s | -| github-ranks-backup.bin.s2 | 1.73x | 1390.7 MB/s | -| enwik9.s2 | 1.67x | 681.53 MB/s | -| adresser.json.s2 | 3.41x | 4230.53 MB/s | -| silesia.tar.s2 | 1.52x | 811.58 | +|--------------------------------|---------------|---------------| +| consensus.db.10gb.s2 | 1.84x | 2289.8 MB/s | +| 10gb.tar.s2 | 1.30x | 867.07 MB/s | +| rawstudio-mint14.tar.s2 | 1.66x | 1329.65 MB/s | +| github-june-2days-2019.json.s2 | 2.36x | 1831.59 MB/s | +| github-ranks-backup.bin.s2 | 1.73x | 1390.7 MB/s | +| enwik9.s2 | 1.67x | 681.53 MB/s | +| adresser.json.s2 | 3.41x | 4230.53 MB/s | +| silesia.tar.s2 | 1.52x | 811.58 | Even though S2 typically compresses better than Snappy, decompression speed is always better. @@ -450,14 +451,14 @@ The most reliable is a wide dataset. For this we use [`webdevdata.org-2015-01-07-subset`](https://files.klauspost.com/compress/webdevdata.org-2015-01-07-4GB-subset.7z), 53927 files, total input size: 4,014,735,833 bytes. Single goroutine used. -| * | Input | Output | Reduction | MB/s | -|-------------------|------------|------------|-----------|--------| -| S2 | 4014735833 | 1059723369 | 73.60% | **934.34** | -| S2 Better | 4014735833 | 969670507 | 75.85% | 532.70 | -| S2 Best | 4014735833 | 906625668 | **77.85%** | 46.84 | -| Snappy | 4014735833 | 1128706759 | 71.89% | 762.59 | -| S2, Snappy Output | 4014735833 | 1093821420 | 72.75% | 908.60 | -| LZ4 | 4014735833 | 1079259294 | 73.12% | 526.94 | +| * | Input | Output | Reduction | MB/s | +|-------------------|------------|------------|------------|------------| +| S2 | 4014735833 | 1059723369 | 73.60% | **936.73** | +| S2 Better | 4014735833 | 961580539 | 76.05% | 451.10 | +| S2 Best | 4014735833 | 899182886 | **77.60%** | 46.84 | +| Snappy | 4014735833 | 1128706759 | 71.89% | 790.15 | +| S2, Snappy Output | 4014735833 | 1093823291 | 72.75% | 936.60 | +| LZ4 | 4014735833 | 1063768713 | 73.50% | 452.02 | S2 delivers both the best single threaded throughput with regular mode and the best compression rate with "best". "Better" mode provides the same compression speed as LZ4 with better compression ratio. @@ -489,42 +490,23 @@ AMD64 assembly is use for both S2 and Snappy. | Absolute Perf | Snappy size | S2 Size | Snappy Speed | S2 Speed | Snappy dec | S2 dec | |-----------------------|-------------|---------|--------------|-------------|-------------|-------------| -| html | 22843 | 21111 | 16246 MB/s | 17438 MB/s | 40972 MB/s | 49263 MB/s | -| urls.10K | 335492 | 287326 | 7943 MB/s | 9693 MB/s | 22523 MB/s | 26484 MB/s | -| fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 273889 MB/s | 718321 MB/s | 827552 MB/s | -| fireworks.jpeg (200B) | 146 | 155 | 8869 MB/s | 17773 MB/s | 33691 MB/s | 52421 MB/s | -| paper-100k.pdf | 85304 | 84459 | 167546 MB/s | 101263 MB/s | 326905 MB/s | 291944 MB/s | -| html_x_4 | 92234 | 21113 | 15194 MB/s | 50670 MB/s | 30843 MB/s | 32217 MB/s | -| alice29.txt | 88034 | 85975 | 5936 MB/s | 6139 MB/s | 12882 MB/s | 20044 MB/s | -| asyoulik.txt | 77503 | 79650 | 5517 MB/s | 6366 MB/s | 12735 MB/s | 22806 MB/s | -| lcet10.txt | 234661 | 220670 | 6235 MB/s | 6067 MB/s | 14519 MB/s | 18697 MB/s | -| plrabn12.txt | 319267 | 317985 | 5159 MB/s | 5726 MB/s | 11923 MB/s | 19901 MB/s | -| geo.protodata | 23335 | 18690 | 21220 MB/s | 26529 MB/s | 56271 MB/s | 62540 MB/s | -| kppkn.gtb | 69526 | 65312 | 9732 MB/s | 8559 MB/s | 18491 MB/s | 18969 MB/s | -| alice29.txt (128B) | 80 | 82 | 6691 MB/s | 15489 MB/s | 31883 MB/s | 38874 MB/s | -| alice29.txt (1000B) | 774 | 774 | 12204 MB/s | 13000 MB/s | 48056 MB/s | 52341 MB/s | -| alice29.txt (10000B) | 6648 | 6933 | 10044 MB/s | 12806 MB/s | 32378 MB/s | 46322 MB/s | -| alice29.txt (20000B) | 12686 | 13574 | 7733 MB/s | 11210 MB/s | 30566 MB/s | 58969 MB/s | - - -| Relative Perf | Snappy size | S2 size improved | S2 Speed | S2 Dec Speed | -|-----------------------|-------------|------------------|----------|--------------| -| html | 22.31% | 7.58% | 1.07x | 1.20x | -| urls.10K | 47.78% | 14.36% | 1.22x | 1.18x | -| fireworks.jpeg | 99.95% | -0.05% | 0.78x | 1.15x | -| fireworks.jpeg (200B) | 73.00% | -6.16% | 2.00x | 1.56x | -| paper-100k.pdf | 83.30% | 0.99% | 0.60x | 0.89x | -| html_x_4 | 22.52% | 77.11% | 3.33x | 1.04x | -| alice29.txt | 57.88% | 2.34% | 1.03x | 1.56x | -| asyoulik.txt | 61.91% | -2.77% | 1.15x | 1.79x | -| lcet10.txt | 54.99% | 5.96% | 0.97x | 1.29x | -| plrabn12.txt | 66.26% | 0.40% | 1.11x | 1.67x | -| geo.protodata | 19.68% | 19.91% | 1.25x | 1.11x | -| kppkn.gtb | 37.72% | 6.06% | 0.88x | 1.03x | -| alice29.txt (128B) | 62.50% | -2.50% | 2.31x | 1.22x | -| alice29.txt (1000B) | 77.40% | 0.00% | 1.07x | 1.09x | -| alice29.txt (10000B) | 66.48% | -4.29% | 1.27x | 1.43x | -| alice29.txt (20000B) | 63.43% | -7.00% | 1.45x | 1.93x | +| html | 22843 | 20868 | 16246 MB/s | 18617 MB/s | 40972 MB/s | 49263 MB/s | +| urls.10K | 335492 | 286541 | 7943 MB/s | 10201 MB/s | 22523 MB/s | 26484 MB/s | +| fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 303228 MB/s | 718321 MB/s | 827552 MB/s | +| fireworks.jpeg (200B) | 146 | 155 | 8869 MB/s | 20180 MB/s | 33691 MB/s | 52421 MB/s | +| paper-100k.pdf | 85304 | 84202 | 167546 MB/s | 112988 MB/s | 326905 MB/s | 291944 MB/s | +| html_x_4 | 92234 | 20870 | 15194 MB/s | 54457 MB/s | 30843 MB/s | 32217 MB/s | +| alice29.txt | 88034 | 85934 | 5936 MB/s | 6540 MB/s | 12882 MB/s | 20044 MB/s | +| asyoulik.txt | 77503 | 79575 | 5517 MB/s | 6657 MB/s | 12735 MB/s | 22806 MB/s | +| lcet10.txt | 234661 | 220383 | 6235 MB/s | 6303 MB/s | 14519 MB/s | 18697 MB/s | +| plrabn12.txt | 319267 | 318196 | 5159 MB/s | 6074 MB/s | 11923 MB/s | 19901 MB/s | +| geo.protodata | 23335 | 18606 | 21220 MB/s | 25432 MB/s | 56271 MB/s | 62540 MB/s | +| kppkn.gtb | 69526 | 65019 | 9732 MB/s | 8905 MB/s | 18491 MB/s | 18969 MB/s | +| alice29.txt (128B) | 80 | 82 | 6691 MB/s | 17179 MB/s | 31883 MB/s | 38874 MB/s | +| alice29.txt (1000B) | 774 | 774 | 12204 MB/s | 13273 MB/s | 48056 MB/s | 52341 MB/s | +| alice29.txt (10000B) | 6648 | 6933 | 10044 MB/s | 12824 MB/s | 32378 MB/s | 46322 MB/s | +| alice29.txt (20000B) | 12686 | 13516 | 7733 MB/s | 12160 MB/s | 30566 MB/s | 58969 MB/s | + Speed is generally at or above Snappy. Small blocks gets a significant speedup, although at the expense of size. @@ -543,42 +525,23 @@ So individual benchmarks should only be seen as a guideline and the overall pict | Absolute Perf | Snappy size | Better Size | Snappy Speed | Better Speed | Snappy dec | Better dec | |-----------------------|-------------|-------------|--------------|--------------|-------------|-------------| -| html | 22843 | 19833 | 16246 MB/s | 7731 MB/s | 40972 MB/s | 40292 MB/s | -| urls.10K | 335492 | 253529 | 7943 MB/s | 3980 MB/s | 22523 MB/s | 20981 MB/s | -| fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 9760 MB/s | 718321 MB/s | 823698 MB/s | -| fireworks.jpeg (200B) | 146 | 142 | 8869 MB/s | 594 MB/s | 33691 MB/s | 30101 MB/s | -| paper-100k.pdf | 85304 | 82915 | 167546 MB/s | 7470 MB/s | 326905 MB/s | 198869 MB/s | -| html_x_4 | 92234 | 19841 | 15194 MB/s | 23403 MB/s | 30843 MB/s | 30937 MB/s | -| alice29.txt | 88034 | 73218 | 5936 MB/s | 2945 MB/s | 12882 MB/s | 16611 MB/s | -| asyoulik.txt | 77503 | 66844 | 5517 MB/s | 2739 MB/s | 12735 MB/s | 14975 MB/s | -| lcet10.txt | 234661 | 190589 | 6235 MB/s | 3099 MB/s | 14519 MB/s | 16634 MB/s | -| plrabn12.txt | 319267 | 270828 | 5159 MB/s | 2600 MB/s | 11923 MB/s | 13382 MB/s | -| geo.protodata | 23335 | 18278 | 21220 MB/s | 11208 MB/s | 56271 MB/s | 57961 MB/s | -| kppkn.gtb | 69526 | 61851 | 9732 MB/s | 4556 MB/s | 18491 MB/s | 16524 MB/s | -| alice29.txt (128B) | 80 | 81 | 6691 MB/s | 529 MB/s | 31883 MB/s | 34225 MB/s | -| alice29.txt (1000B) | 774 | 748 | 12204 MB/s | 1943 MB/s | 48056 MB/s | 42068 MB/s | -| alice29.txt (10000B) | 6648 | 6234 | 10044 MB/s | 2949 MB/s | 32378 MB/s | 28813 MB/s | -| alice29.txt (20000B) | 12686 | 11584 | 7733 MB/s | 2822 MB/s | 30566 MB/s | 27315 MB/s | - - -| Relative Perf | Snappy size | Better size | Better Speed | Better dec | -|-----------------------|-------------|-------------|--------------|------------| -| html | 22.31% | 13.18% | 0.48x | 0.98x | -| urls.10K | 47.78% | 24.43% | 0.50x | 0.93x | -| fireworks.jpeg | 99.95% | -0.05% | 0.03x | 1.15x | -| fireworks.jpeg (200B) | 73.00% | 2.74% | 0.07x | 0.89x | -| paper-100k.pdf | 83.30% | 2.80% | 0.07x | 0.61x | -| html_x_4 | 22.52% | 78.49% | 0.04x | 1.00x | -| alice29.txt | 57.88% | 16.83% | 1.54x | 1.29x | -| asyoulik.txt | 61.91% | 13.75% | 0.50x | 1.18x | -| lcet10.txt | 54.99% | 18.78% | 0.50x | 1.15x | -| plrabn12.txt | 66.26% | 15.17% | 0.50x | 1.12x | -| geo.protodata | 19.68% | 21.67% | 0.50x | 1.03x | -| kppkn.gtb | 37.72% | 11.04% | 0.53x | 0.89x | -| alice29.txt (128B) | 62.50% | -1.25% | 0.47x | 1.07x | -| alice29.txt (1000B) | 77.40% | 3.36% | 0.08x | 0.88x | -| alice29.txt (10000B) | 66.48% | 6.23% | 0.16x | 0.89x | -| alice29.txt (20000B) | 63.43% | 8.69% | 0.29x | 0.89x | +| html | 22843 | 18972 | 16246 MB/s | 8621 MB/s | 40972 MB/s | 40292 MB/s | +| urls.10K | 335492 | 248079 | 7943 MB/s | 5104 MB/s | 22523 MB/s | 20981 MB/s | +| fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 84429 MB/s | 718321 MB/s | 823698 MB/s | +| fireworks.jpeg (200B) | 146 | 149 | 8869 MB/s | 7125 MB/s | 33691 MB/s | 30101 MB/s | +| paper-100k.pdf | 85304 | 82887 | 167546 MB/s | 11087 MB/s | 326905 MB/s | 198869 MB/s | +| html_x_4 | 92234 | 18982 | 15194 MB/s | 29316 MB/s | 30843 MB/s | 30937 MB/s | +| alice29.txt | 88034 | 71611 | 5936 MB/s | 3709 MB/s | 12882 MB/s | 16611 MB/s | +| asyoulik.txt | 77503 | 65941 | 5517 MB/s | 3380 MB/s | 12735 MB/s | 14975 MB/s | +| lcet10.txt | 234661 | 184939 | 6235 MB/s | 3537 MB/s | 14519 MB/s | 16634 MB/s | +| plrabn12.txt | 319267 | 264990 | 5159 MB/s | 2960 MB/s | 11923 MB/s | 13382 MB/s | +| geo.protodata | 23335 | 17689 | 21220 MB/s | 10859 MB/s | 56271 MB/s | 57961 MB/s | +| kppkn.gtb | 69526 | 55398 | 9732 MB/s | 5206 MB/s | 18491 MB/s | 16524 MB/s | +| alice29.txt (128B) | 80 | 78 | 6691 MB/s | 7422 MB/s | 31883 MB/s | 34225 MB/s | +| alice29.txt (1000B) | 774 | 746 | 12204 MB/s | 5734 MB/s | 48056 MB/s | 42068 MB/s | +| alice29.txt (10000B) | 6648 | 6218 | 10044 MB/s | 6055 MB/s | 32378 MB/s | 28813 MB/s | +| alice29.txt (20000B) | 12686 | 11492 | 7733 MB/s | 3143 MB/s | 30566 MB/s | 27315 MB/s | + Except for the mostly incompressible JPEG image compression is better and usually in the double digits in terms of percentage reduction over Snappy. @@ -605,33 +568,150 @@ Some examples compared on 16 core CPU, amd64 assembly used: ``` * enwik10 -Default... 10000000000 -> 4761467548 [47.61%]; 1.098s, 8685.6MB/s -Better... 10000000000 -> 4219438251 [42.19%]; 1.925s, 4954.2MB/s -Best... 10000000000 -> 3627364337 [36.27%]; 43.051s, 221.5MB/s +Default... 10000000000 -> 4759950115 [47.60%]; 1.03s, 9263.0MB/s +Better... 10000000000 -> 4084706676 [40.85%]; 2.16s, 4415.4MB/s +Best... 10000000000 -> 3615520079 [36.16%]; 42.259s, 225.7MB/s * github-june-2days-2019.json -Default... 6273951764 -> 1043196283 [16.63%]; 431ms, 13882.3MB/s -Better... 6273951764 -> 949146808 [15.13%]; 547ms, 10938.4MB/s -Best... 6273951764 -> 832855506 [13.27%]; 9.455s, 632.8MB/s +Default... 6273951764 -> 1041700255 [16.60%]; 431ms, 13882.3MB/s +Better... 6273951764 -> 945841238 [15.08%]; 547ms, 10938.4MB/s +Best... 6273951764 -> 826392576 [13.17%]; 9.455s, 632.8MB/s * nyc-taxi-data-10M.csv -Default... 3325605752 -> 1095998837 [32.96%]; 324ms, 9788.7MB/s -Better... 3325605752 -> 954776589 [28.71%]; 491ms, 6459.4MB/s -Best... 3325605752 -> 779098746 [23.43%]; 8.29s, 382.6MB/s +Default... 3325605752 -> 1093516949 [32.88%]; 324ms, 9788.7MB/s +Better... 3325605752 -> 885394158 [26.62%]; 491ms, 6459.4MB/s +Best... 3325605752 -> 773681257 [23.26%]; 8.29s, 412.0MB/s * 10gb.tar -Default... 10065157632 -> 5916578242 [58.78%]; 1.028s, 9337.4MB/s -Better... 10065157632 -> 5649207485 [56.13%]; 1.597s, 6010.6MB/s -Best... 10065157632 -> 5208719802 [51.75%]; 32.78s, 292.8MB/ +Default... 10065157632 -> 5915541066 [58.77%]; 1.028s, 9337.4MB/s +Better... 10065157632 -> 5453844650 [54.19%]; 1.597s, 4862.7MB/s +Best... 10065157632 -> 5192495021 [51.59%]; 32.78s, 308.2MB/ * consensus.db.10gb -Default... 10737418240 -> 4562648848 [42.49%]; 882ms, 11610.0MB/s -Better... 10737418240 -> 4542428129 [42.30%]; 1.533s, 6679.7MB/s -Best... 10737418240 -> 4244773384 [39.53%]; 42.96s, 238.4MB/s +Default... 10737418240 -> 4549762344 [42.37%]; 882ms, 12118.4MB/s +Better... 10737418240 -> 4438535064 [41.34%]; 1.533s, 3500.9MB/s +Best... 10737418240 -> 4210602774 [39.21%]; 42.96s, 254.4MB/s ``` Decompression speed should be around the same as using the 'better' compression mode. +## Dictionaries + +*Note: S2 dictionary compression is currently at an early implementation stage, with no assembly for +neither encoding nor decoding. Performance improvements can be expected in the future.* + +Adding dictionaries allow providing a custom dictionary that will serve as lookup in the beginning of blocks. + +The same dictionary *must* be used for both encoding and decoding. +S2 does not keep track of whether the same dictionary is used, +and using the wrong dictionary will most often not result in an error when decompressing. + +Blocks encoded *without* dictionaries can be decompressed seamlessly *with* a dictionary. +This means it is possible to switch from an encoding without dictionaries to an encoding with dictionaries +and treat the blocks similarly. + +Similar to [zStandard dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression), +the same usage scenario applies to S2 dictionaries. + +> Training works if there is some correlation in a family of small data samples. The more data-specific a dictionary is, the more efficient it is (there is no universal dictionary). Hence, deploying one dictionary per type of data will provide the greatest benefits. Dictionary gains are mostly effective in the first few KB. Then, the compression algorithm will gradually use previously decoded content to better compress the rest of the file. + +S2 further limits the dictionary to only be enabled on the first 64KB of a block. +This will remove any negative (speed) impacts of the dictionaries on bigger blocks. + +### Compression + +Using the [github_users_sample_set](https://github.com/facebook/zstd/releases/download/v1.1.3/github_users_sample_set.tar.zst) +and a 64KB dictionary trained with zStandard the following sizes can be achieved. + +| | Default | Better | Best | +|--------------------|------------------|------------------|-----------------------| +| Without Dictionary | 3362023 (44.92%) | 3083163 (41.19%) | 3057944 (40.86%) | +| With Dictionary | 921524 (12.31%) | 873154 (11.67%) | 785503 bytes (10.49%) | + +So for highly repetitive content, this case provides an almost 3x reduction in size. + +For less uniform data we will use the Go source code tree. +Compressing First 64KB of all `.go` files in `go/src`, Go 1.19.5, 8912 files, 51253563 bytes input: + +| | Default | Better | Best | +|--------------------|-------------------|-------------------|-------------------| +| Without Dictionary | 22955767 (44.79%) | 20189613 (39.39% | 19482828 (38.01%) | +| With Dictionary | 19654568 (38.35%) | 16289357 (31.78%) | 15184589 (29.63%) | +| Saving/file | 362 bytes | 428 bytes | 472 bytes | + + +### Creating Dictionaries + +There are no tools to create dictionaries in S2. +However, there are multiple ways to create a useful dictionary: + +#### Using a Sample File + +If your input is very uniform, you can just use a sample file as the dictionary. + +For example in the `github_users_sample_set` above, the average compression only goes up from +10.49% to 11.48% by using the first file as dictionary compared to using a dedicated dictionary. + +```Go + // Read a sample + sample, err := os.ReadFile("sample.json") + + // Create a dictionary. + dict := s2.MakeDict(sample, nil) + + // b := dict.Bytes() will provide a dictionary that can be saved + // and reloaded with s2.NewDict(b). + + // To encode: + encoded := dict.Encode(nil, file) + + // To decode: + decoded, err := dict.Decode(nil, file) +``` + +#### Using Zstandard + +Zstandard dictionaries can easily be converted to S2 dictionaries. + +This can be helpful to generate dictionaries for files that don't have a fixed structure. + + +Example, with training set files placed in `./training-set`: + +`λ zstd -r --train-fastcover training-set/* --maxdict=65536 -o name.dict` + +This will create a dictionary of 64KB, that can be converted to a dictionary like this: + +```Go + // Decode the Zstandard dictionary. + insp, err := zstd.InspectDictionary(zdict) + if err != nil { + panic(err) + } + + // We are only interested in the contents. + // Assume that files start with "// Copyright (c) 2023". + // Search for the longest match for that. + // This may save a few bytes. + dict := s2.MakeDict(insp.Content(), []byte("// Copyright (c) 2023")) + + // b := dict.Bytes() will provide a dictionary that can be saved + // and reloaded with s2.NewDict(b). + + // We can now encode using this dictionary + encodedWithDict := dict.Encode(nil, payload) + + // To decode content: + decoded, err := dict.Decode(nil, encodedWithDict) +``` + +It is recommended to save the dictionary returned by ` b:= dict.Bytes()`, since that will contain only the S2 dictionary. + +This dictionary can later be loaded using `s2.NewDict(b)`. The dictionary then no longer requires `zstd` to be initialized. + +Also note how `s2.MakeDict` allows you to search for a common starting sequence of your files. +This can be omitted, at the expense of a few bytes. + # Snappy Compatibility S2 now offers full compatibility with Snappy. @@ -648,10 +728,10 @@ If you would like more control, you can use the s2 package as described below: Snappy compatible blocks can be generated with the S2 encoder. Compression and speed is typically a bit better `MaxEncodedLen` is also smaller for smaller memory usage. Replace -| Snappy | S2 replacement | -|----------------------------|-------------------------| -| snappy.Encode(...) | s2.EncodeSnappy(...) | -| snappy.MaxEncodedLen(...) | s2.MaxEncodedLen(...) | +| Snappy | S2 replacement | +|---------------------------|-----------------------| +| snappy.Encode(...) | s2.EncodeSnappy(...) | +| snappy.MaxEncodedLen(...) | s2.MaxEncodedLen(...) | `s2.EncodeSnappy` can be replaced with `s2.EncodeSnappyBetter` or `s2.EncodeSnappyBest` to get more efficiently compressed snappy compatible output. @@ -660,12 +740,12 @@ Compression and speed is typically a bit better `MaxEncodedLen` is also smaller Comparison of [`webdevdata.org-2015-01-07-subset`](https://files.klauspost.com/compress/webdevdata.org-2015-01-07-4GB-subset.7z), 53927 files, total input size: 4,014,735,833 bytes. amd64, single goroutine used: -| Encoder | Size | MB/s | Reduction | -|-----------------------|------------|------------|------------ -| snappy.Encode | 1128706759 | 725.59 | 71.89% | -| s2.EncodeSnappy | 1093823291 | **899.16** | 72.75% | -| s2.EncodeSnappyBetter | 1001158548 | 578.49 | 75.06% | -| s2.EncodeSnappyBest | 944507998 | 66.00 | **76.47%**| +| Encoder | Size | MB/s | Reduction | +|-----------------------|------------|------------|------------| +| snappy.Encode | 1128706759 | 725.59 | 71.89% | +| s2.EncodeSnappy | 1093823291 | **899.16** | 72.75% | +| s2.EncodeSnappyBetter | 1001158548 | 578.49 | 75.06% | +| s2.EncodeSnappyBest | 944507998 | 66.00 | **76.47%** | ## Streams @@ -835,6 +915,13 @@ This is done using the regular "Skip" function: This will ensure that we are at exactly the offset we want, and reading from `dec` will start at the requested offset. +# Compact storage + +For compact storage [RemoveIndexHeaders](https://pkg.go.dev/github.com/klauspost/compress/s2#RemoveIndexHeaders) can be used to remove any redundant info from +a serialized index. If you remove the header it must be restored before [Loading](https://pkg.go.dev/github.com/klauspost/compress/s2#Index.Load). + +This is expected to save 20 bytes. These can be restored using [RestoreIndexHeaders](https://pkg.go.dev/github.com/klauspost/compress/s2#RestoreIndexHeaders). This removes a layer of security, but is the most compact representation. Returns nil if headers contains errors. + ## Index Format: Each block is structured as a snappy skippable block, with the chunk ID 0x99. @@ -844,20 +931,20 @@ The block can be read from the front, but contains information so it can be read Numbers are stored as fixed size little endian values or [zigzag encoded](https://developers.google.com/protocol-buffers/docs/encoding#signed_integers) [base 128 varints](https://developers.google.com/protocol-buffers/docs/encoding), with un-encoded value length of 64 bits, unless other limits are specified. -| Content | Format | -|---------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------| -| ID, `[1]byte` | Always 0x99. | -| Data Length, `[3]byte` | 3 byte little-endian length of the chunk in bytes, following this. | -| Header `[6]byte` | Header, must be `[115, 50, 105, 100, 120, 0]` or in text: "s2idx\x00". | -| UncompressedSize, Varint | Total Uncompressed size. | -| CompressedSize, Varint | Total Compressed size if known. Should be -1 if unknown. | -| EstBlockSize, Varint | Block Size, used for guessing uncompressed offsets. Must be >= 0. | -| Entries, Varint | Number of Entries in index, must be < 65536 and >=0. | -| HasUncompressedOffsets `byte` | 0 if no uncompressed offsets are present, 1 if present. Other values are invalid. | -| UncompressedOffsets, [Entries]VarInt | Uncompressed offsets. See below how to decode. | -| CompressedOffsets, [Entries]VarInt | Compressed offsets. See below how to decode. | -| Block Size, `[4]byte` | Little Endian total encoded size (including header and trailer). Can be used for searching backwards to start of block. | -| Trailer `[6]byte` | Trailer, must be `[0, 120, 100, 105, 50, 115]` or in text: "\x00xdi2s". Can be used for identifying block from end of stream. | +| Content | Format | +|--------------------------------------|-------------------------------------------------------------------------------------------------------------------------------| +| ID, `[1]byte` | Always 0x99. | +| Data Length, `[3]byte` | 3 byte little-endian length of the chunk in bytes, following this. | +| Header `[6]byte` | Header, must be `[115, 50, 105, 100, 120, 0]` or in text: "s2idx\x00". | +| UncompressedSize, Varint | Total Uncompressed size. | +| CompressedSize, Varint | Total Compressed size if known. Should be -1 if unknown. | +| EstBlockSize, Varint | Block Size, used for guessing uncompressed offsets. Must be >= 0. | +| Entries, Varint | Number of Entries in index, must be < 65536 and >=0. | +| HasUncompressedOffsets `byte` | 0 if no uncompressed offsets are present, 1 if present. Other values are invalid. | +| UncompressedOffsets, [Entries]VarInt | Uncompressed offsets. See below how to decode. | +| CompressedOffsets, [Entries]VarInt | Compressed offsets. See below how to decode. | +| Block Size, `[4]byte` | Little Endian total encoded size (including header and trailer). Can be used for searching backwards to start of block. | +| Trailer `[6]byte` | Trailer, must be `[0, 120, 100, 105, 50, 115]` or in text: "\x00xdi2s". Can be used for identifying block from end of stream. | For regular streams the uncompressed offsets are fully predictable, so `HasUncompressedOffsets` allows to specify that compressed blocks all have @@ -929,6 +1016,7 @@ To decode from any given uncompressed offset `(wantOffset)`: See [using indexes](https://github.com/klauspost/compress/tree/master/s2#using-indexes) for functions that perform the operations with a simpler interface. + # Format Extensions * Frame [Stream identifier](https://github.com/google/snappy/blob/master/framing_format.txt#L68) changed from `sNaPpY` to `S2sTwO`. @@ -951,13 +1039,80 @@ The length is specified by reading the 3-bit length specified in the tag and dec | 7 | 65540 + read 3 bytes | This allows any repeat offset + length to be represented by 2 to 5 bytes. +It also allows to emit matches longer than 64 bytes with one copy + one repeat instead of several 64 byte copies. Lengths are stored as little endian values. -The first copy of a block cannot be a repeat offset and the offset is not carried across blocks in streams. +The first copy of a block cannot be a repeat offset and the offset is reset on every block in streams. Default streaming block size is 1MB. +# Dictionary Encoding + +Adding dictionaries allow providing a custom dictionary that will serve as lookup in the beginning of blocks. + +A dictionary provides an initial repeat value that can be used to point to a common header. + +Other than that the dictionary contains values that can be used as back-references. + +Often used data should be placed at the *end* of the dictionary since offsets < 2048 bytes will be smaller. + +## Format + +Dictionary *content* must at least 16 bytes and less or equal to 64KiB (65536 bytes). + +Encoding: `[repeat value (uvarint)][dictionary content...]` + +Before the dictionary content, an unsigned base-128 (uvarint) encoded value specifying the initial repeat offset. +This value is an offset into the dictionary content and not a back-reference offset, +so setting this to 0 will make the repeat value point to the first value of the dictionary. + +The value must be less than the dictionary length-8 + +## Encoding + +From the decoder point of view the dictionary content is seen as preceding the encoded content. + +`[dictionary content][decoded output]` + +Backreferences to the dictionary are encoded as ordinary backreferences that have an offset before the start of the decoded block. + +Matches copying from the dictionary are **not** allowed to cross from the dictionary into the decoded data. +However, if a copy ends at the end of the dictionary the next repeat will point to the start of the decoded buffer, which is allowed. + +The first match can be a repeat value, which will use the repeat offset stored in the dictionary. + +When 64KB (65536 bytes) has been en/decoded it is no longer allowed to reference the dictionary, +neither by a copy nor repeat operations. +If the boundary is crossed while copying from the dictionary, the operation should complete, +but the next instruction is not allowed to reference the dictionary. + +Valid blocks encoded *without* a dictionary can be decoded with any dictionary. +There are no checks whether the supplied dictionary is the correct for a block. +Because of this there is no overhead by using a dictionary. + +## Example + +This is the dictionary content. Elements are separated by `[]`. + +Dictionary: `[0x0a][Yesterday 25 bananas were added to Benjamins brown bag]`. + +Initial repeat offset is set at 10, which is the letter `2`. + +Encoded `[LIT "10"][REPEAT len=10][LIT "hich"][MATCH off=50 len=6][MATCH off=31 len=6][MATCH off=61 len=10]` + +Decoded: `[10][ bananas w][hich][ were ][brown ][were added]` + +Output: `10 bananas which were brown were added` + + +## Streams + +For streams each block can use the dictionary. + +The dictionary cannot not currently be provided on the stream. + + # LICENSE This code is based on the [Snappy-Go](https://github.com/golang/snappy) implementation. diff --git a/vendor/github.com/klauspost/compress/s2/decode.go b/vendor/github.com/klauspost/compress/s2/decode.go index 27c0f3c2c..b7c9adfdd 100644 --- a/vendor/github.com/klauspost/compress/s2/decode.go +++ b/vendor/github.com/klauspost/compress/s2/decode.go @@ -13,6 +13,7 @@ import ( "io/ioutil" "math" "runtime" + "strconv" "sync" ) @@ -880,15 +881,20 @@ func (r *Reader) Skip(n int64) error { // See Reader.ReadSeeker type ReadSeeker struct { *Reader + readAtMu sync.Mutex } -// ReadSeeker will return an io.ReadSeeker compatible version of the reader. +// ReadSeeker will return an io.ReadSeeker and io.ReaderAt +// compatible version of the reader. // If 'random' is specified the returned io.Seeker can be used for // random seeking, otherwise only forward seeking is supported. // Enabling random seeking requires the original input to support // the io.Seeker interface. // A custom index can be specified which will be used if supplied. // When using a custom index, it will not be read from the input stream. +// The ReadAt position will affect regular reads and the current position of Seek. +// So using Read after ReadAt will continue from where the ReadAt stopped. +// No functions should be used concurrently. // The returned ReadSeeker contains a shallow reference to the existing Reader, // meaning changes performed to one is reflected in the other. func (r *Reader) ReadSeeker(random bool, index []byte) (*ReadSeeker, error) { @@ -952,44 +958,61 @@ func (r *Reader) ReadSeeker(random bool, index []byte) (*ReadSeeker, error) { // Seek allows seeking in compressed data. func (r *ReadSeeker) Seek(offset int64, whence int) (int64, error) { if r.err != nil { + if !errors.Is(r.err, io.EOF) { + return 0, r.err + } + // Reset on EOF + r.err = nil + } + + // Calculate absolute offset. + absOffset := offset + + switch whence { + case io.SeekStart: + case io.SeekCurrent: + absOffset = r.blockStart + int64(r.i) + offset + case io.SeekEnd: + if r.index == nil { + return 0, ErrUnsupported + } + absOffset = r.index.TotalUncompressed + offset + default: + r.err = ErrUnsupported return 0, r.err } - if offset == 0 && whence == io.SeekCurrent { - return r.blockStart + int64(r.i), nil + + if absOffset < 0 { + return 0, errors.New("seek before start of file") } + if !r.readHeader { // Make sure we read the header. _, r.err = r.Read([]byte{}) + if r.err != nil { + return 0, r.err + } } + + // If we are inside current block no need to seek. + // This includes no offset changes. + if absOffset >= r.blockStart && absOffset < r.blockStart+int64(r.j) { + r.i = int(absOffset - r.blockStart) + return r.blockStart + int64(r.i), nil + } + rs, ok := r.r.(io.ReadSeeker) if r.index == nil || !ok { - if whence == io.SeekCurrent && offset >= 0 { - err := r.Skip(offset) - return r.blockStart + int64(r.i), err - } - if whence == io.SeekStart && offset >= r.blockStart+int64(r.i) { - err := r.Skip(offset - r.blockStart - int64(r.i)) + currOffset := r.blockStart + int64(r.i) + if absOffset >= currOffset { + err := r.Skip(absOffset - currOffset) return r.blockStart + int64(r.i), err } return 0, ErrUnsupported - - } - - switch whence { - case io.SeekCurrent: - offset += r.blockStart + int64(r.i) - case io.SeekEnd: - if offset > 0 { - return 0, errors.New("seek after end of file") - } - offset = r.index.TotalUncompressed + offset - } - - if offset < 0 { - return 0, errors.New("seek before start of file") } - c, u, err := r.index.Find(offset) + // We can seek and we have an index. + c, u, err := r.index.Find(absOffset) if err != nil { return r.blockStart + int64(r.i), err } @@ -1000,12 +1023,57 @@ func (r *ReadSeeker) Seek(offset int64, whence int) (int64, error) { return 0, err } - r.i = r.j // Remove rest of current block. - if u < offset { + r.i = r.j // Remove rest of current block. + r.blockStart = u - int64(r.j) // Adjust current block start for accounting. + if u < absOffset { // Forward inside block - return offset, r.Skip(offset - u) + return absOffset, r.Skip(absOffset - u) } - return offset, nil + if u > absOffset { + return 0, fmt.Errorf("s2 seek: (internal error) u (%d) > absOffset (%d)", u, absOffset) + } + return absOffset, nil +} + +// ReadAt reads len(p) bytes into p starting at offset off in the +// underlying input source. It returns the number of bytes +// read (0 <= n <= len(p)) and any error encountered. +// +// When ReadAt returns n < len(p), it returns a non-nil error +// explaining why more bytes were not returned. In this respect, +// ReadAt is stricter than Read. +// +// Even if ReadAt returns n < len(p), it may use all of p as scratch +// space during the call. If some data is available but not len(p) bytes, +// ReadAt blocks until either all the data is available or an error occurs. +// In this respect ReadAt is different from Read. +// +// If the n = len(p) bytes returned by ReadAt are at the end of the +// input source, ReadAt may return either err == EOF or err == nil. +// +// If ReadAt is reading from an input source with a seek offset, +// ReadAt should not affect nor be affected by the underlying +// seek offset. +// +// Clients of ReadAt can execute parallel ReadAt calls on the +// same input source. This is however not recommended. +func (r *ReadSeeker) ReadAt(p []byte, offset int64) (int, error) { + r.readAtMu.Lock() + defer r.readAtMu.Unlock() + _, err := r.Seek(offset, io.SeekStart) + if err != nil { + return 0, err + } + n := 0 + for n < len(p) { + n2, err := r.Read(p[n:]) + if err != nil { + // This will include io.EOF + return n + n2, err + } + n += n2 + } + return n, nil } // ReadByte satisfies the io.ByteReader interface. @@ -1044,3 +1112,370 @@ func (r *Reader) SkippableCB(id uint8, fn func(r io.Reader) error) error { r.skippableCB[id] = fn return nil } + +// s2DecodeDict writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func s2DecodeDict(dst, src []byte, dict *Dict) int { + if dict == nil { + return s2Decode(dst, src) + } + const debug = false + const debugErrs = debug + + if debug { + fmt.Println("Starting decode, dst len:", len(dst)) + } + var d, s, length int + offset := len(dict.dict) - dict.repeat + + // As long as we can read at least 5 bytes... + for s < len(src)-5 { + // Removing bounds checks is SLOWER, when if doing + // in := src[s:s+5] + // Checked on Go 1.18 + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + x = uint32(src[s-1]) + case x == 61: + in := src[s : s+3] + x = uint32(in[1]) | uint32(in[2])<<8 + s += 3 + case x == 62: + in := src[s : s+4] + // Load as 32 bit and shift down. + x = uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 + x >>= 8 + s += 4 + case x == 63: + in := src[s : s+5] + x = uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24 + s += 5 + } + length = int(x) + 1 + if debug { + fmt.Println("literals, length:", length, "d-after:", d+length) + } + if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) { + if debugErrs { + fmt.Println("corrupt literal: length:", length, "d-left:", len(dst)-d, "src-left:", len(src)-s) + } + return decodeErrCodeCorrupt + } + + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + length = int(src[s-2]) >> 2 & 0x7 + if toffset == 0 { + if debug { + fmt.Print("(repeat) ") + } + // keep last offset + switch length { + case 5: + length = int(src[s]) + 4 + s += 1 + case 6: + in := src[s : s+2] + length = int(uint32(in[0])|(uint32(in[1])<<8)) + (1 << 8) + s += 2 + case 7: + in := src[s : s+3] + length = int((uint32(in[2])<<16)|(uint32(in[1])<<8)|uint32(in[0])) + (1 << 16) + s += 3 + default: // 0-> 4 + } + } else { + offset = toffset + } + length += 4 + case tagCopy2: + in := src[s : s+3] + offset = int(uint32(in[1]) | uint32(in[2])<<8) + length = 1 + int(in[0])>>2 + s += 3 + + case tagCopy4: + in := src[s : s+5] + offset = int(uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24) + length = 1 + int(in[0])>>2 + s += 5 + } + + if offset <= 0 || length > len(dst)-d { + if debugErrs { + fmt.Println("match error; offset:", offset, "length:", length, "dst-left:", len(dst)-d) + } + return decodeErrCodeCorrupt + } + + // copy from dict + if d < offset { + if d > MaxDictSrcOffset { + if debugErrs { + fmt.Println("dict after", MaxDictSrcOffset, "d:", d, "offset:", offset, "length:", length) + } + return decodeErrCodeCorrupt + } + startOff := len(dict.dict) - offset + d + if startOff < 0 || startOff+length > len(dict.dict) { + if debugErrs { + fmt.Printf("offset (%d) + length (%d) bigger than dict (%d)\n", offset, length, len(dict.dict)) + } + return decodeErrCodeCorrupt + } + if debug { + fmt.Println("dict copy, length:", length, "offset:", offset, "d-after:", d+length, "dict start offset:", startOff) + } + copy(dst[d:d+length], dict.dict[startOff:]) + d += length + continue + } + + if debug { + fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length) + } + + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset > length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] + } + d += length + } + + // Remaining with extra checks... + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) { + if debugErrs { + fmt.Println("corrupt literal: length:", length, "d-left:", len(dst)-d, "src-left:", len(src)-s) + } + return decodeErrCodeCorrupt + } + if debug { + fmt.Println("literals, length:", length, "d-after:", d+length) + } + + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + length = int(src[s-2]) >> 2 & 0x7 + toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + if toffset == 0 { + if debug { + fmt.Print("(repeat) ") + } + // keep last offset + switch length { + case 5: + s += 1 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + length = int(uint32(src[s-1])) + 4 + case 6: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + length = int(uint32(src[s-2])|(uint32(src[s-1])<<8)) + (1 << 8) + case 7: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + length = int(uint32(src[s-3])|(uint32(src[s-2])<<8)|(uint32(src[s-1])<<16)) + (1 << 16) + default: // 0-> 4 + } + } else { + offset = toffset + } + length += 4 + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || length > len(dst)-d { + if debugErrs { + fmt.Println("match error; offset:", offset, "length:", length, "dst-left:", len(dst)-d) + } + return decodeErrCodeCorrupt + } + + // copy from dict + if d < offset { + if d > MaxDictSrcOffset { + if debugErrs { + fmt.Println("dict after", MaxDictSrcOffset, "d:", d, "offset:", offset, "length:", length) + } + return decodeErrCodeCorrupt + } + rOff := len(dict.dict) - (offset - d) + if debug { + fmt.Println("starting dict entry from dict offset", len(dict.dict)-rOff) + } + if rOff+length > len(dict.dict) { + if debugErrs { + fmt.Println("err: END offset", rOff+length, "bigger than dict", len(dict.dict), "dict offset:", rOff, "length:", length) + } + return decodeErrCodeCorrupt + } + if rOff < 0 { + if debugErrs { + fmt.Println("err: START offset", rOff, "less than 0", len(dict.dict), "dict offset:", rOff, "length:", length) + } + return decodeErrCodeCorrupt + } + copy(dst[d:d+length], dict.dict[rOff:]) + d += length + continue + } + + if debug { + fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length) + } + + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset > length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] + } + d += length + } + + if d != len(dst) { + if debugErrs { + fmt.Println("wanted length", len(dst), "got", d) + } + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/klauspost/compress/s2/decode_other.go b/vendor/github.com/klauspost/compress/s2/decode_other.go index 1074ebd21..2cb55c2c7 100644 --- a/vendor/github.com/klauspost/compress/s2/decode_other.go +++ b/vendor/github.com/klauspost/compress/s2/decode_other.go @@ -28,6 +28,9 @@ func s2Decode(dst, src []byte) int { // As long as we can read at least 5 bytes... for s < len(src)-5 { + // Removing bounds checks is SLOWER, when if doing + // in := src[s:s+5] + // Checked on Go 1.18 switch src[s] & 0x03 { case tagLiteral: x := uint32(src[s] >> 2) @@ -38,17 +41,25 @@ func s2Decode(dst, src []byte) int { s += 2 x = uint32(src[s-1]) case x == 61: + in := src[s : s+3] + x = uint32(in[1]) | uint32(in[2])<<8 s += 3 - x = uint32(src[s-2]) | uint32(src[s-1])<<8 case x == 62: + in := src[s : s+4] + // Load as 32 bit and shift down. + x = uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 + x >>= 8 s += 4 - x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 case x == 63: + in := src[s : s+5] + x = uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24 s += 5 - x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 } length = int(x) + 1 if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) { + if debug { + fmt.Println("corrupt: lit size", length) + } return decodeErrCodeCorrupt } if debug { @@ -62,8 +73,8 @@ func s2Decode(dst, src []byte) int { case tagCopy1: s += 2 - length = int(src[s-2]) >> 2 & 0x7 toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + length = int(src[s-2]) >> 2 & 0x7 if toffset == 0 { if debug { fmt.Print("(repeat) ") @@ -71,14 +82,16 @@ func s2Decode(dst, src []byte) int { // keep last offset switch length { case 5: + length = int(src[s]) + 4 s += 1 - length = int(uint32(src[s-1])) + 4 case 6: + in := src[s : s+2] + length = int(uint32(in[0])|(uint32(in[1])<<8)) + (1 << 8) s += 2 - length = int(uint32(src[s-2])|(uint32(src[s-1])<<8)) + (1 << 8) case 7: + in := src[s : s+3] + length = int((uint32(in[2])<<16)|(uint32(in[1])<<8)|uint32(in[0])) + (1 << 16) s += 3 - length = int(uint32(src[s-3])|(uint32(src[s-2])<<8)|(uint32(src[s-1])<<16)) + (1 << 16) default: // 0-> 4 } } else { @@ -86,17 +99,23 @@ func s2Decode(dst, src []byte) int { } length += 4 case tagCopy2: + in := src[s : s+3] + offset = int(uint32(in[1]) | uint32(in[2])<<8) + length = 1 + int(in[0])>>2 s += 3 - length = 1 + int(src[s-3])>>2 - offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) case tagCopy4: + in := src[s : s+5] + offset = int(uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24) + length = 1 + int(in[0])>>2 s += 5 - length = 1 + int(src[s-5])>>2 - offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) } if offset <= 0 || d < offset || length > len(dst)-d { + if debug { + fmt.Println("corrupt: match, length", length, "offset:", offset, "dst avail:", len(dst)-d, "dst pos:", d) + } + return decodeErrCodeCorrupt } @@ -163,6 +182,9 @@ func s2Decode(dst, src []byte) int { } length = int(x) + 1 if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) { + if debug { + fmt.Println("corrupt: lit size", length) + } return decodeErrCodeCorrupt } if debug { @@ -229,6 +251,9 @@ func s2Decode(dst, src []byte) int { } if offset <= 0 || d < offset || length > len(dst)-d { + if debug { + fmt.Println("corrupt: match, length", length, "offset:", offset, "dst avail:", len(dst)-d, "dst pos:", d) + } return decodeErrCodeCorrupt } diff --git a/vendor/github.com/klauspost/compress/s2/dict.go b/vendor/github.com/klauspost/compress/s2/dict.go new file mode 100644 index 000000000..24f7ce80b --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/dict.go @@ -0,0 +1,331 @@ +// Copyright (c) 2022+ Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "bytes" + "encoding/binary" + "sync" +) + +const ( + // MinDictSize is the minimum dictionary size when repeat has been read. + MinDictSize = 16 + + // MaxDictSize is the maximum dictionary size when repeat has been read. + MaxDictSize = 65536 + + // MaxDictSrcOffset is the maximum offset where a dictionary entry can start. + MaxDictSrcOffset = 65535 +) + +// Dict contains a dictionary that can be used for encoding and decoding s2 +type Dict struct { + dict []byte + repeat int // Repeat as index of dict + + fast, better, best sync.Once + fastTable *[1 << 14]uint16 + + betterTableShort *[1 << 14]uint16 + betterTableLong *[1 << 17]uint16 + + bestTableShort *[1 << 16]uint32 + bestTableLong *[1 << 19]uint32 +} + +// NewDict will read a dictionary. +// It will return nil if the dictionary is invalid. +func NewDict(dict []byte) *Dict { + if len(dict) == 0 { + return nil + } + var d Dict + // Repeat is the first value of the dict + r, n := binary.Uvarint(dict) + if n <= 0 { + return nil + } + dict = dict[n:] + d.dict = dict + if cap(d.dict) < len(d.dict)+16 { + d.dict = append(make([]byte, 0, len(d.dict)+16), d.dict...) + } + if len(dict) < MinDictSize || len(dict) > MaxDictSize { + return nil + } + d.repeat = int(r) + if d.repeat > len(dict) { + return nil + } + return &d +} + +// Bytes will return a serialized version of the dictionary. +// The output can be sent to NewDict. +func (d *Dict) Bytes() []byte { + dst := make([]byte, binary.MaxVarintLen16+len(d.dict)) + return append(dst[:binary.PutUvarint(dst, uint64(d.repeat))], d.dict...) +} + +// MakeDict will create a dictionary. +// 'data' must be at least MinDictSize. +// If data is longer than MaxDictSize only the last MaxDictSize bytes will be used. +// If searchStart is set the start repeat value will be set to the last +// match of this content. +// If no matches are found, it will attempt to find shorter matches. +// This content should match the typical start of a block. +// If at least 4 bytes cannot be matched, repeat is set to start of block. +func MakeDict(data []byte, searchStart []byte) *Dict { + if len(data) == 0 { + return nil + } + if len(data) > MaxDictSize { + data = data[len(data)-MaxDictSize:] + } + var d Dict + dict := data + d.dict = dict + if cap(d.dict) < len(d.dict)+16 { + d.dict = append(make([]byte, 0, len(d.dict)+16), d.dict...) + } + if len(dict) < MinDictSize { + return nil + } + + // Find the longest match possible, last entry if multiple. + for s := len(searchStart); s > 4; s-- { + if idx := bytes.LastIndex(data, searchStart[:s]); idx >= 0 && idx <= len(data)-8 { + d.repeat = idx + break + } + } + + return &d +} + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func (d *Dict) Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if cap(dst) < n { + dst = make([]byte, n) + } else { + dst = dst[:n] + } + + // The block starts with the varint-encoded length of the decompressed bytes. + dstP := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:dstP] + } + if len(src) < minNonLiteralBlockSize { + dstP += emitLiteral(dst[dstP:], src) + return dst[:dstP] + } + n := encodeBlockDictGo(dst[dstP:], src, d) + if n > 0 { + dstP += n + return dst[:dstP] + } + // Not compressible + dstP += emitLiteral(dst[dstP:], src) + return dst[:dstP] +} + +// EncodeBetter returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// EncodeBetter compresses better than Encode but typically with a +// 10-40% speed decrease on both compression and decompression. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func (d *Dict) EncodeBetter(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + dstP := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:dstP] + } + if len(src) < minNonLiteralBlockSize { + dstP += emitLiteral(dst[dstP:], src) + return dst[:dstP] + } + n := encodeBlockBetterDict(dst[dstP:], src, d) + if n > 0 { + dstP += n + return dst[:dstP] + } + // Not compressible + dstP += emitLiteral(dst[dstP:], src) + return dst[:dstP] +} + +// EncodeBest returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// EncodeBest compresses as good as reasonably possible but with a +// big speed decrease. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func (d *Dict) EncodeBest(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + dstP := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:dstP] + } + if len(src) < minNonLiteralBlockSize { + dstP += emitLiteral(dst[dstP:], src) + return dst[:dstP] + } + n := encodeBlockBest(dst[dstP:], src, d) + if n > 0 { + dstP += n + return dst[:dstP] + } + // Not compressible + dstP += emitLiteral(dst[dstP:], src) + return dst[:dstP] +} + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func (d *Dict) Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= cap(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + if s2DecodeDict(dst, src[s:], d) != 0 { + return nil, ErrCorrupt + } + return dst, nil +} + +func (d *Dict) initFast() { + d.fast.Do(func() { + const ( + tableBits = 14 + maxTableSize = 1 << tableBits + ) + + var table [maxTableSize]uint16 + // We stop so any entry of length 8 can always be read. + for i := 0; i < len(d.dict)-8-2; i += 3 { + x0 := load64(d.dict, i) + h0 := hash6(x0, tableBits) + h1 := hash6(x0>>8, tableBits) + h2 := hash6(x0>>16, tableBits) + table[h0] = uint16(i) + table[h1] = uint16(i + 1) + table[h2] = uint16(i + 2) + } + d.fastTable = &table + }) +} + +func (d *Dict) initBetter() { + d.better.Do(func() { + const ( + // Long hash matches. + lTableBits = 17 + maxLTableSize = 1 << lTableBits + + // Short hash matches. + sTableBits = 14 + maxSTableSize = 1 << sTableBits + ) + + var lTable [maxLTableSize]uint16 + var sTable [maxSTableSize]uint16 + + // We stop so any entry of length 8 can always be read. + for i := 0; i < len(d.dict)-8; i++ { + cv := load64(d.dict, i) + lTable[hash7(cv, lTableBits)] = uint16(i) + sTable[hash4(cv, sTableBits)] = uint16(i) + } + d.betterTableShort = &sTable + d.betterTableLong = &lTable + }) +} + +func (d *Dict) initBest() { + d.best.Do(func() { + const ( + // Long hash matches. + lTableBits = 19 + maxLTableSize = 1 << lTableBits + + // Short hash matches. + sTableBits = 16 + maxSTableSize = 1 << sTableBits + ) + + var lTable [maxLTableSize]uint32 + var sTable [maxSTableSize]uint32 + + // We stop so any entry of length 8 can always be read. + for i := 0; i < len(d.dict)-8; i++ { + cv := load64(d.dict, i) + hashL := hash8(cv, lTableBits) + hashS := hash4(cv, sTableBits) + candidateL := lTable[hashL] + candidateS := sTable[hashS] + lTable[hashL] = uint32(i) | candidateL<<16 + sTable[hashS] = uint32(i) | candidateS<<16 + } + d.bestTableShort = &sTable + d.bestTableLong = &lTable + }) +} diff --git a/vendor/github.com/klauspost/compress/s2/encode.go b/vendor/github.com/klauspost/compress/s2/encode.go index 1aefabf31..c2ca7236a 100644 --- a/vendor/github.com/klauspost/compress/s2/encode.go +++ b/vendor/github.com/klauspost/compress/s2/encode.go @@ -58,6 +58,32 @@ func Encode(dst, src []byte) []byte { return dst[:d] } +// EstimateBlockSize will perform a very fast compression +// without outputting the result and return the compressed output size. +// The function returns -1 if no improvement could be achieved. +// Using actual compression will most often produce better compression than the estimate. +func EstimateBlockSize(src []byte) (d int) { + if len(src) < 6 || int64(len(src)) > 0xffffffff { + return -1 + } + if len(src) <= 1024 { + d = calcBlockSizeSmall(src) + } else { + d = calcBlockSize(src) + } + + if d == 0 { + return -1 + } + // Size of the varint encoded block size. + d += (bits.Len64(uint64(len(src))) + 7) / 7 + + if d >= len(src) { + return -1 + } + return d +} + // EncodeBetter returns the encoded form of src. The returned slice may be a sub- // slice of dst if dst was large enough to hold the entire encoded block. // Otherwise, a newly allocated slice will be returned. @@ -132,7 +158,7 @@ func EncodeBest(dst, src []byte) []byte { d += emitLiteral(dst[d:], src) return dst[:d] } - n := encodeBlockBest(dst[d:], src) + n := encodeBlockBest(dst[d:], src, nil) if n > 0 { d += n return dst[:d] @@ -404,10 +430,11 @@ type Writer struct { buffers sync.Pool pad int - writer io.Writer - randSrc io.Reader - writerWg sync.WaitGroup - index Index + writer io.Writer + randSrc io.Reader + writerWg sync.WaitGroup + index Index + customEnc func(dst, src []byte) int // wroteStreamHeader is whether we have written the stream header. wroteStreamHeader bool @@ -773,6 +800,9 @@ func (w *Writer) EncodeBuffer(buf []byte) (err error) { } func (w *Writer) encodeBlock(obuf, uncompressed []byte) int { + if w.customEnc != nil { + return w.customEnc(obuf, uncompressed) + } if w.snappy { switch w.level { case levelFast: @@ -790,7 +820,7 @@ func (w *Writer) encodeBlock(obuf, uncompressed []byte) int { case levelBetter: return encodeBlockBetter(obuf, uncompressed) case levelBest: - return encodeBlockBest(obuf, uncompressed) + return encodeBlockBest(obuf, uncompressed, nil) } return 0 } @@ -1339,3 +1369,15 @@ func WriterFlushOnWrite() WriterOption { return nil } } + +// WriterCustomEncoder allows to override the encoder for blocks on the stream. +// The function must compress 'src' into 'dst' and return the bytes used in dst as an integer. +// Block size (initial varint) should not be added by the encoder. +// Returning value 0 indicates the block could not be compressed. +// The function should expect to be called concurrently. +func WriterCustomEncoder(fn func(dst, src []byte) int) WriterOption { + return func(w *Writer) error { + w.customEnc = fn + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/s2/encode_all.go b/vendor/github.com/klauspost/compress/s2/encode_all.go index 8b16c38a6..11657f094 100644 --- a/vendor/github.com/klauspost/compress/s2/encode_all.go +++ b/vendor/github.com/klauspost/compress/s2/encode_all.go @@ -8,6 +8,7 @@ package s2 import ( "bytes" "encoding/binary" + "fmt" "math/bits" ) @@ -58,8 +59,9 @@ func encodeGo(dst, src []byte) []byte { // been written. // // It also assumes that: +// // len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize func encodeBlockGo(dst, src []byte) (d int) { // Initialize the hash table. const ( @@ -454,3 +456,594 @@ emitRemainder: } return d } + +// encodeBlockGo encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockDictGo(dst, src []byte, dict *Dict) (d int) { + // Initialize the hash table. + const ( + tableBits = 14 + maxTableSize = 1 << tableBits + maxAhead = 8 // maximum bytes ahead without checking sLimit + + debug = false + ) + dict.initFast() + + var table [maxTableSize]uint32 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + if sLimit > MaxDictSrcOffset-maxAhead { + sLimit = MaxDictSrcOffset - maxAhead + } + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 5 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form can start with a dict entry (copy or repeat). + s := 0 + + // Convert dict repeat to offset + repeat := len(dict.dict) - dict.repeat + cv := load64(src, 0) + + // While in dict +searchDict: + for { + // Next src position to check + nextS := s + (s-nextEmit)>>6 + 4 + hash0 := hash6(cv, tableBits) + hash1 := hash6(cv>>8, tableBits) + if nextS > sLimit { + if debug { + fmt.Println("slimit reached", s, nextS) + } + break searchDict + } + candidateDict := int(dict.fastTable[hash0]) + candidateDict2 := int(dict.fastTable[hash1]) + candidate2 := int(table[hash1]) + candidate := int(table[hash0]) + table[hash0] = uint32(s) + table[hash1] = uint32(s + 1) + hash2 := hash6(cv>>16, tableBits) + + // Check repeat at offset checkRep. + const checkRep = 1 + + if repeat > s { + candidate := len(dict.dict) - repeat + s + if repeat-s >= 4 && uint32(cv) == load32(dict.dict, candidate) { + // Extend back + base := s + for i := candidate; base > nextEmit && i > 0 && dict.dict[i-1] == src[base-1]; { + i-- + base-- + } + d += emitLiteral(dst[d:], src[nextEmit:base]) + if debug && nextEmit != base { + fmt.Println("emitted ", base-nextEmit, "literals") + } + s += 4 + candidate += 4 + for candidate < len(dict.dict)-8 && s <= len(src)-8 { + if diff := load64(src, s) ^ load64(dict.dict, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + d += emitRepeat(dst[d:], repeat, s-base) + if debug { + fmt.Println("emitted dict repeat length", s-base, "offset:", repeat, "s:", s) + } + nextEmit = s + if s >= sLimit { + break searchDict + } + cv = load64(src, s) + continue + } + } else if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + d += emitLiteral(dst[d:], src[nextEmit:base]) + if debug && nextEmit != base { + fmt.Println("emitted ", base-nextEmit, "literals") + } + + // Extend forward + candidate := s - repeat + 4 + checkRep + s += 4 + checkRep + for s <= sLimit { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + if debug { + // Validate match. + if s <= candidate { + panic("s <= candidate") + } + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + + if nextEmit > 0 { + // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. + d += emitRepeat(dst[d:], repeat, s-base) + } else { + // First match, cannot be repeat. + d += emitCopy(dst[d:], repeat, s-base) + } + + nextEmit = s + if s >= sLimit { + break searchDict + } + if debug { + fmt.Println("emitted reg repeat", s-base, "s:", s) + } + cv = load64(src, s) + continue searchDict + } + if s == 0 { + cv = load64(src, nextS) + s = nextS + continue searchDict + } + // Start with table. These matches will always be closer. + if uint32(cv) == load32(src, candidate) { + goto emitMatch + } + candidate = int(table[hash2]) + if uint32(cv>>8) == load32(src, candidate2) { + table[hash2] = uint32(s + 2) + candidate = candidate2 + s++ + goto emitMatch + } + + // Check dict. Dicts have longer offsets, so we want longer matches. + if cv == load64(dict.dict, candidateDict) { + table[hash2] = uint32(s + 2) + goto emitDict + } + + candidateDict = int(dict.fastTable[hash2]) + // Check if upper 7 bytes match + if candidateDict2 >= 1 { + if cv^load64(dict.dict, candidateDict2-1) < (1 << 8) { + table[hash2] = uint32(s + 2) + candidateDict = candidateDict2 + s++ + goto emitDict + } + } + + table[hash2] = uint32(s + 2) + if uint32(cv>>16) == load32(src, candidate) { + s += 2 + goto emitMatch + } + if candidateDict >= 2 { + // Check if upper 6 bytes match + if cv^load64(dict.dict, candidateDict-2) < (1 << 16) { + s += 2 + goto emitDict + } + } + + cv = load64(src, nextS) + s = nextS + continue searchDict + + emitDict: + { + if debug { + if load32(dict.dict, candidateDict) != load32(src, s) { + panic("dict emit mismatch") + } + } + // Extend backwards. + // The top bytes will be rechecked to get the full match. + for candidateDict > 0 && s > nextEmit && dict.dict[candidateDict-1] == src[s-1] { + candidateDict-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + d += emitLiteral(dst[d:], src[nextEmit:s]) + if debug && nextEmit != s { + fmt.Println("emitted ", s-nextEmit, "literals") + } + { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + repeat = s + (len(dict.dict)) - candidateDict + + // Extend the 4-byte match as long as possible. + s += 4 + candidateDict += 4 + for s <= len(src)-8 && len(dict.dict)-candidateDict >= 8 { + if diff := load64(src, s) ^ load64(dict.dict, candidateDict); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidateDict += 8 + } + + // Matches longer than 64 are split. + if s <= sLimit || s-base < 8 { + d += emitCopy(dst[d:], repeat, s-base) + } else { + // Split to ensure we don't start a copy within next block + d += emitCopy(dst[d:], repeat, 4) + d += emitRepeat(dst[d:], repeat, s-base-4) + } + if false { + // Validate match. + if s <= candidate { + panic("s <= candidate") + } + a := src[base:s] + b := dict.dict[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + if debug { + fmt.Println("emitted dict copy, length", s-base, "offset:", repeat, "s:", s) + } + nextEmit = s + if s >= sLimit { + break searchDict + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + + // Index and continue loop to try new candidate. + x := load64(src, s-2) + m2Hash := hash6(x, tableBits) + currHash := hash6(x>>8, tableBits) + candidate = int(table[currHash]) + table[m2Hash] = uint32(s - 2) + table[currHash] = uint32(s - 1) + cv = load64(src, s) + } + continue + } + emitMatch: + + // Extend backwards. + // The top bytes will be rechecked to get the full match. + for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] { + candidate-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + d += emitLiteral(dst[d:], src[nextEmit:s]) + if debug && nextEmit != s { + fmt.Println("emitted ", s-nextEmit, "literals") + } + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + repeat = base - candidate + + // Extend the 4-byte match as long as possible. + s += 4 + candidate += 4 + for s <= len(src)-8 { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopy(dst[d:], repeat, s-base) + if debug { + // Validate match. + if s <= candidate { + panic("s <= candidate") + } + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + if debug { + fmt.Println("emitted src copy, length", s-base, "offset:", repeat, "s:", s) + } + nextEmit = s + if s >= sLimit { + break searchDict + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Check for an immediate match, otherwise start search at s+1 + x := load64(src, s-2) + m2Hash := hash6(x, tableBits) + currHash := hash6(x>>16, tableBits) + candidate = int(table[currHash]) + table[m2Hash] = uint32(s - 2) + table[currHash] = uint32(s) + if debug && s == candidate { + panic("s == candidate") + } + if uint32(x>>16) != load32(src, candidate) { + cv = load64(src, s+1) + s++ + break + } + } + } + + // Search without dict: + if repeat > s { + repeat = 0 + } + + // No more dict + sLimit = len(src) - inputMargin + if s >= sLimit { + goto emitRemainder + } + if debug { + fmt.Println("non-dict matching at", s, "repeat:", repeat) + } + cv = load64(src, s) + if debug { + fmt.Println("now", s, "->", sLimit, "out:", d, "left:", len(src)-s, "nextemit:", nextEmit, "dstLimit:", dstLimit, "s:", s) + } + for { + candidate := 0 + for { + // Next src position to check + nextS := s + (s-nextEmit)>>6 + 4 + if nextS > sLimit { + goto emitRemainder + } + hash0 := hash6(cv, tableBits) + hash1 := hash6(cv>>8, tableBits) + candidate = int(table[hash0]) + candidate2 := int(table[hash1]) + table[hash0] = uint32(s) + table[hash1] = uint32(s + 1) + hash2 := hash6(cv>>16, tableBits) + + // Check repeat at offset checkRep. + const checkRep = 1 + if repeat > 0 && uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + d += emitLiteral(dst[d:], src[nextEmit:base]) + if debug && nextEmit != base { + fmt.Println("emitted ", base-nextEmit, "literals") + } + // Extend forward + candidate := s - repeat + 4 + checkRep + s += 4 + checkRep + for s <= sLimit { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + if debug { + // Validate match. + if s <= candidate { + panic("s <= candidate") + } + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + if nextEmit > 0 { + // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. + d += emitRepeat(dst[d:], repeat, s-base) + } else { + // First match, cannot be repeat. + d += emitCopy(dst[d:], repeat, s-base) + } + if debug { + fmt.Println("emitted src repeat length", s-base, "offset:", repeat, "s:", s) + } + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + cv = load64(src, s) + continue + } + + if uint32(cv) == load32(src, candidate) { + break + } + candidate = int(table[hash2]) + if uint32(cv>>8) == load32(src, candidate2) { + table[hash2] = uint32(s + 2) + candidate = candidate2 + s++ + break + } + table[hash2] = uint32(s + 2) + if uint32(cv>>16) == load32(src, candidate) { + s += 2 + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards. + // The top bytes will be rechecked to get the full match. + for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] { + candidate-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + d += emitLiteral(dst[d:], src[nextEmit:s]) + if debug && nextEmit != s { + fmt.Println("emitted ", s-nextEmit, "literals") + } + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + repeat = base - candidate + + // Extend the 4-byte match as long as possible. + s += 4 + candidate += 4 + for s <= len(src)-8 { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopy(dst[d:], repeat, s-base) + if debug { + // Validate match. + if s <= candidate { + panic("s <= candidate") + } + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + if debug { + fmt.Println("emitted src copy, length", s-base, "offset:", repeat, "s:", s) + } + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Check for an immediate match, otherwise start search at s+1 + x := load64(src, s-2) + m2Hash := hash6(x, tableBits) + currHash := hash6(x>>16, tableBits) + candidate = int(table[currHash]) + table[m2Hash] = uint32(s - 2) + table[currHash] = uint32(s) + if debug && s == candidate { + panic("s == candidate") + } + if uint32(x>>16) != load32(src, candidate) { + cv = load64(src, s+1) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + if debug && nextEmit != s { + fmt.Println("emitted ", len(src)-nextEmit, "literals") + } + } + return d +} diff --git a/vendor/github.com/klauspost/compress/s2/encode_amd64.go b/vendor/github.com/klauspost/compress/s2/encode_amd64.go index e612225f4..ebc332ad5 100644 --- a/vendor/github.com/klauspost/compress/s2/encode_amd64.go +++ b/vendor/github.com/klauspost/compress/s2/encode_amd64.go @@ -3,13 +3,16 @@ package s2 +const hasAmd64Asm = true + // encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It // assumes that the varint-encoded length of the decompressed bytes has already // been written. // // It also assumes that: +// // len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize func encodeBlock(dst, src []byte) (d int) { const ( // Use 12 bit table when less than... @@ -43,8 +46,9 @@ func encodeBlock(dst, src []byte) (d int) { // been written. // // It also assumes that: +// // len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize func encodeBlockBetter(dst, src []byte) (d int) { const ( // Use 12 bit table when less than... @@ -78,8 +82,9 @@ func encodeBlockBetter(dst, src []byte) (d int) { // been written. // // It also assumes that: +// // len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize func encodeBlockSnappy(dst, src []byte) (d int) { const ( // Use 12 bit table when less than... @@ -112,8 +117,9 @@ func encodeBlockSnappy(dst, src []byte) (d int) { // been written. // // It also assumes that: +// // len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize func encodeBlockBetterSnappy(dst, src []byte) (d int) { const ( // Use 12 bit table when less than... diff --git a/vendor/github.com/klauspost/compress/s2/encode_best.go b/vendor/github.com/klauspost/compress/s2/encode_best.go index 4bc80bc6a..1d13e869a 100644 --- a/vendor/github.com/klauspost/compress/s2/encode_best.go +++ b/vendor/github.com/klauspost/compress/s2/encode_best.go @@ -7,6 +7,7 @@ package s2 import ( "fmt" + "math" "math/bits" ) @@ -15,9 +16,10 @@ import ( // been written. // // It also assumes that: +// // len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize -func encodeBlockBest(dst, src []byte) (d int) { +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockBest(dst, src []byte, dict *Dict) (d int) { // Initialize the hash tables. const ( // Long hash matches. @@ -29,6 +31,8 @@ func encodeBlockBest(dst, src []byte) (d int) { maxSTableSize = 1 << sTableBits inputMargin = 8 + 2 + + debug = false ) // sLimit is when to stop looking for offset/length copies. The inputMargin @@ -38,6 +42,10 @@ func encodeBlockBest(dst, src []byte) (d int) { if len(src) < minNonLiteralBlockSize { return 0 } + sLimitDict := len(src) - inputMargin + if sLimitDict > MaxDictSrcOffset-inputMargin { + sLimitDict = MaxDictSrcOffset - inputMargin + } var lTable [maxLTableSize]uint64 var sTable [maxSTableSize]uint64 @@ -51,10 +59,15 @@ func encodeBlockBest(dst, src []byte) (d int) { // The encoded form must start with a literal, as there are no previous // bytes to copy, so we start looking for hash matches at s == 1. s := 1 + repeat := 1 + if dict != nil { + dict.initBest() + s = 0 + repeat = len(dict.dict) - dict.repeat + } cv := load64(src, s) // We search for a repeat at -1, but don't output repeats when nextEmit == 0 - repeat := 1 const lowbitMask = 0xffffffff getCur := func(x uint64) int { return int(x & lowbitMask) @@ -66,11 +79,11 @@ func encodeBlockBest(dst, src []byte) (d int) { for { type match struct { - offset int - s int - length int - score int - rep bool + offset int + s int + length int + score int + rep, dict bool } var best match for { @@ -84,6 +97,12 @@ func encodeBlockBest(dst, src []byte) (d int) { if nextS > sLimit { goto emitRemainder } + if dict != nil && s >= MaxDictSrcOffset { + dict = nil + if repeat > s { + repeat = math.MinInt32 + } + } hashL := hash8(cv, lTableBits) hashS := hash4(cv, sTableBits) candidateL := lTable[hashL] @@ -113,7 +132,15 @@ func encodeBlockBest(dst, src []byte) (d int) { } m := match{offset: offset, s: s, length: 4 + offset, rep: rep} s += 4 - for s <= sLimit { + for s < len(src) { + if len(src)-s < 8 { + if src[s] == src[m.length] { + m.length++ + s++ + continue + } + break + } if diff := load64(src, s) ^ load64(src, m.length); diff != 0 { m.length += bits.TrailingZeros64(diff) >> 3 break @@ -129,6 +156,62 @@ func encodeBlockBest(dst, src []byte) (d int) { } return m } + matchDict := func(candidate, s int, first uint32, rep bool) match { + // Calculate offset as if in continuous array with s + offset := -len(dict.dict) + candidate + if best.length != 0 && best.s-best.offset == s-offset && !rep { + // Don't retest if we have the same offset. + return match{offset: offset, s: s} + } + + if load32(dict.dict, candidate) != first { + return match{offset: offset, s: s} + } + m := match{offset: offset, s: s, length: 4 + candidate, rep: rep, dict: true} + s += 4 + if !rep { + for s < sLimitDict && m.length < len(dict.dict) { + if len(src)-s < 8 || len(dict.dict)-m.length < 8 { + if src[s] == dict.dict[m.length] { + m.length++ + s++ + continue + } + break + } + if diff := load64(src, s) ^ load64(dict.dict, m.length); diff != 0 { + m.length += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + m.length += 8 + } + } else { + for s < len(src) && m.length < len(dict.dict) { + if len(src)-s < 8 || len(dict.dict)-m.length < 8 { + if src[s] == dict.dict[m.length] { + m.length++ + s++ + continue + } + break + } + if diff := load64(src, s) ^ load64(dict.dict, m.length); diff != 0 { + m.length += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + m.length += 8 + } + } + m.length -= candidate + m.score = score(m) + if m.score <= -m.s { + // Eliminate if no savings, we might find a better one. + m.length = 0 + } + return m + } bestOf := func(a, b match) match { if b.length == 0 { @@ -145,45 +228,99 @@ func encodeBlockBest(dst, src []byte) (d int) { return b } - best = bestOf(matchAt(getCur(candidateL), s, uint32(cv), false), matchAt(getPrev(candidateL), s, uint32(cv), false)) - best = bestOf(best, matchAt(getCur(candidateS), s, uint32(cv), false)) - best = bestOf(best, matchAt(getPrev(candidateS), s, uint32(cv), false)) - + if s > 0 { + best = bestOf(matchAt(getCur(candidateL), s, uint32(cv), false), matchAt(getPrev(candidateL), s, uint32(cv), false)) + best = bestOf(best, matchAt(getCur(candidateS), s, uint32(cv), false)) + best = bestOf(best, matchAt(getPrev(candidateS), s, uint32(cv), false)) + } + if dict != nil { + candidateL := dict.bestTableLong[hashL] + candidateS := dict.bestTableShort[hashS] + best = bestOf(best, matchDict(int(candidateL&0xffff), s, uint32(cv), false)) + best = bestOf(best, matchDict(int(candidateL>>16), s, uint32(cv), false)) + best = bestOf(best, matchDict(int(candidateS&0xffff), s, uint32(cv), false)) + best = bestOf(best, matchDict(int(candidateS>>16), s, uint32(cv), false)) + } { - best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8), true)) + if (dict == nil || repeat <= s) && repeat > 0 { + best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8), true)) + } else if s-repeat < -4 && dict != nil { + candidate := len(dict.dict) - (repeat - s) + best = bestOf(best, matchDict(candidate, s, uint32(cv), true)) + candidate++ + best = bestOf(best, matchDict(candidate, s+1, uint32(cv>>8), true)) + } + if best.length > 0 { + hashS := hash4(cv>>8, sTableBits) // s+1 - nextShort := sTable[hash4(cv>>8, sTableBits)] + nextShort := sTable[hashS] s := s + 1 cv := load64(src, s) - nextLong := lTable[hash8(cv, lTableBits)] + hashL := hash8(cv, lTableBits) + nextLong := lTable[hashL] best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv), false)) best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv), false)) best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv), false)) best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv), false)) - // Repeat at + 2 - best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8), true)) + + // Dict at + 1 + if dict != nil { + candidateL := dict.bestTableLong[hashL] + candidateS := dict.bestTableShort[hashS] + + best = bestOf(best, matchDict(int(candidateL&0xffff), s, uint32(cv), false)) + best = bestOf(best, matchDict(int(candidateS&0xffff), s, uint32(cv), false)) + } // s+2 if true { - nextShort = sTable[hash4(cv>>8, sTableBits)] + hashS := hash4(cv>>8, sTableBits) + + nextShort = sTable[hashS] s++ cv = load64(src, s) - nextLong = lTable[hash8(cv, lTableBits)] + hashL := hash8(cv, lTableBits) + nextLong = lTable[hashL] + + if (dict == nil || repeat <= s) && repeat > 0 { + // Repeat at + 2 + best = bestOf(best, matchAt(s-repeat, s, uint32(cv), true)) + } else if repeat-s > 4 && dict != nil { + candidate := len(dict.dict) - (repeat - s) + best = bestOf(best, matchDict(candidate, s, uint32(cv), true)) + } best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv), false)) best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv), false)) best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv), false)) best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv), false)) + + // Dict at +2 + // Very small gain + if dict != nil { + candidateL := dict.bestTableLong[hashL] + candidateS := dict.bestTableShort[hashS] + + best = bestOf(best, matchDict(int(candidateL&0xffff), s, uint32(cv), false)) + best = bestOf(best, matchDict(int(candidateS&0xffff), s, uint32(cv), false)) + } } // Search for a match at best match end, see if that is better. - if sAt := best.s + best.length; sAt < sLimit { - sBack := best.s - backL := best.length + // Allow some bytes at the beginning to mismatch. + // Sweet spot is around 1-2 bytes, but depends on input. + // The skipped bytes are tested in Extend backwards, + // and still picked up as part of the match if they do. + const skipBeginning = 2 + const skipEnd = 1 + if sAt := best.s + best.length - skipEnd; sAt < sLimit { + + sBack := best.s + skipBeginning - skipEnd + backL := best.length - skipBeginning // Load initial values cv = load64(src, sBack) - // Search for mismatch + + // Grab candidates... next := lTable[hash8(load64(src, sAt), lTableBits)] - //next := sTable[hash4(load64(src, sAt), sTableBits)] if checkAt := getCur(next) - backL; checkAt > 0 { best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false)) @@ -191,6 +328,16 @@ func encodeBlockBest(dst, src []byte) (d int) { if checkAt := getPrev(next) - backL; checkAt > 0 { best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false)) } + // Disabled: Extremely small gain + if false { + next = sTable[hash4(load64(src, sAt), sTableBits)] + if checkAt := getCur(next) - backL; checkAt > 0 { + best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false)) + } + if checkAt := getPrev(next) - backL; checkAt > 0 { + best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false)) + } + } } } } @@ -209,7 +356,7 @@ func encodeBlockBest(dst, src []byte) (d int) { // Extend backwards, not needed for repeats... s = best.s - if !best.rep { + if !best.rep && !best.dict { for best.offset > 0 && s > nextEmit && src[best.offset-1] == src[s-1] { best.offset-- best.length++ @@ -226,7 +373,6 @@ func encodeBlockBest(dst, src []byte) (d int) { base := s offset := s - best.offset - s += best.length if offset > 65535 && s-base <= 5 && !best.rep { @@ -238,16 +384,28 @@ func encodeBlockBest(dst, src []byte) (d int) { cv = load64(src, s) continue } + if debug && nextEmit != base { + fmt.Println("EMIT", base-nextEmit, "literals. base-after:", base) + } d += emitLiteral(dst[d:], src[nextEmit:base]) if best.rep { - if nextEmit > 0 { + if nextEmit > 0 || best.dict { + if debug { + fmt.Println("REPEAT, length", best.length, "offset:", offset, "s-after:", s, "dict:", best.dict, "best:", best) + } // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. d += emitRepeat(dst[d:], offset, best.length) } else { - // First match, cannot be repeat. + // First match without dict cannot be a repeat. + if debug { + fmt.Println("COPY, length", best.length, "offset:", offset, "s-after:", s, "dict:", best.dict, "best:", best) + } d += emitCopy(dst[d:], offset, best.length) } } else { + if debug { + fmt.Println("COPY, length", best.length, "offset:", offset, "s-after:", s, "dict:", best.dict, "best:", best) + } d += emitCopy(dst[d:], offset, best.length) } repeat = offset @@ -278,6 +436,9 @@ emitRemainder: if d+len(src)-nextEmit > dstLimit { return 0 } + if debug && nextEmit != s { + fmt.Println("emitted ", len(src)-nextEmit, "literals") + } d += emitLiteral(dst[d:], src[nextEmit:]) } return d @@ -288,8 +449,9 @@ emitRemainder: // been written. // // It also assumes that: +// // len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize func encodeBlockBestSnappy(dst, src []byte) (d int) { // Initialize the hash tables. const ( @@ -546,6 +708,7 @@ emitRemainder: // emitCopySize returns the size to encode the offset+length // // It assumes that: +// // 1 <= offset && offset <= math.MaxUint32 // 4 <= length && length <= 1 << 24 func emitCopySize(offset, length int) int { @@ -584,6 +747,7 @@ func emitCopySize(offset, length int) int { // emitCopyNoRepeatSize returns the size to encode the offset+length // // It assumes that: +// // 1 <= offset && offset <= math.MaxUint32 // 4 <= length && length <= 1 << 24 func emitCopyNoRepeatSize(offset, length int) int { @@ -621,7 +785,6 @@ func emitRepeatSize(offset, length int) int { left := 0 if length > maxRepeat { left = length - maxRepeat + 4 - length = maxRepeat - 4 } if left > 0 { return 5 + emitRepeatSize(offset, left) diff --git a/vendor/github.com/klauspost/compress/s2/encode_better.go b/vendor/github.com/klauspost/compress/s2/encode_better.go index 943215b8a..f46adb411 100644 --- a/vendor/github.com/klauspost/compress/s2/encode_better.go +++ b/vendor/github.com/klauspost/compress/s2/encode_better.go @@ -6,6 +6,8 @@ package s2 import ( + "bytes" + "fmt" "math/bits" ) @@ -42,8 +44,9 @@ func hash8(u uint64, h uint8) uint32 { // been written. // // It also assumes that: +// // len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize func encodeBlockBetterGo(dst, src []byte) (d int) { // sLimit is when to stop looking for offset/length copies. The inputMargin // lets us use a fast path for emitLiteral in the main loop, while we are @@ -56,7 +59,7 @@ func encodeBlockBetterGo(dst, src []byte) (d int) { // Initialize the hash tables. const ( // Long hash matches. - lTableBits = 16 + lTableBits = 17 maxLTableSize = 1 << lTableBits // Short hash matches. @@ -97,9 +100,26 @@ func encodeBlockBetterGo(dst, src []byte) (d int) { lTable[hashL] = uint32(s) sTable[hashS] = uint32(s) + valLong := load64(src, candidateL) + valShort := load64(src, candidateS) + + // If long matches at least 8 bytes, use that. + if cv == valLong { + break + } + if cv == valShort { + candidateL = candidateS + break + } + // Check repeat at offset checkRep. const checkRep = 1 - if false && uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { + // Minimum length of a repeat. Tested with various values. + // While 4-5 offers improvements in some, 6 reduces + // regressions significantly. + const wantRepeatBytes = 6 + const repeatMask = ((1 << (wantRepeatBytes * 8)) - 1) << (8 * checkRep) + if false && repeat > 0 && cv&repeatMask == load64(src, s-repeat)&repeatMask { base := s + checkRep // Extend back for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { @@ -109,8 +129,8 @@ func encodeBlockBetterGo(dst, src []byte) (d int) { d += emitLiteral(dst[d:], src[nextEmit:base]) // Extend forward - candidate := s - repeat + 4 + checkRep - s += 4 + checkRep + candidate := s - repeat + wantRepeatBytes + checkRep + s += wantRepeatBytes + checkRep for s < len(src) { if len(src)-s < 8 { if src[s] == src[candidate] { @@ -127,28 +147,40 @@ func encodeBlockBetterGo(dst, src []byte) (d int) { s += 8 candidate += 8 } - if nextEmit > 0 { - // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. - d += emitRepeat(dst[d:], repeat, s-base) - } else { - // First match, cannot be repeat. - d += emitCopy(dst[d:], repeat, s-base) - } + // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. + d += emitRepeat(dst[d:], repeat, s-base) nextEmit = s if s >= sLimit { goto emitRemainder } + // Index in-between + index0 := base + 1 + index1 := s - 2 + + cv = load64(src, s) + for index0 < index1 { + cv0 := load64(src, index0) + cv1 := load64(src, index1) + lTable[hash7(cv0, lTableBits)] = uint32(index0) + sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) + + lTable[hash7(cv1, lTableBits)] = uint32(index1) + sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + index0 += 2 + index1 -= 2 + } cv = load64(src, s) continue } - if uint32(cv) == load32(src, candidateL) { + // Long likely matches 7, so take that. + if uint32(cv) == uint32(valLong) { break } // Check our short candidate - if uint32(cv) == load32(src, candidateS) { + if uint32(cv) == uint32(valShort) { // Try a long candidate at s+1 hashL = hash7(cv>>8, lTableBits) candidateL = int(lTable[hashL]) @@ -227,21 +259,29 @@ func encodeBlockBetterGo(dst, src []byte) (d int) { // Do we have space for more, if not bail. return 0 } - // Index match start+1 (long) and start+2 (short) + + // Index short & long index0 := base + 1 - // Index match end-2 (long) and end-1 (short) index1 := s - 2 cv0 := load64(src, index0) cv1 := load64(src, index1) - cv = load64(src, s) lTable[hash7(cv0, lTableBits)] = uint32(index0) - lTable[hash7(cv0>>8, lTableBits)] = uint32(index0 + 1) - lTable[hash7(cv1, lTableBits)] = uint32(index1) - lTable[hash7(cv1>>8, lTableBits)] = uint32(index1 + 1) sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) - sTable[hash4(cv0>>16, sTableBits)] = uint32(index0 + 2) + + lTable[hash7(cv1, lTableBits)] = uint32(index1) sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + index0 += 1 + index1 -= 1 + cv = load64(src, s) + + // index every second long in between. + for index0 < index1 { + lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0) + lTable[hash7(load64(src, index1), lTableBits)] = uint32(index1) + index0 += 2 + index1 -= 2 + } } emitRemainder: @@ -260,8 +300,9 @@ emitRemainder: // been written. // // It also assumes that: +// // len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize func encodeBlockBetterSnappyGo(dst, src []byte) (d int) { // sLimit is when to stop looking for offset/length copies. The inputMargin // lets us use a fast path for emitLiteral in the main loop, while we are @@ -402,21 +443,649 @@ func encodeBlockBetterSnappyGo(dst, src []byte) (d int) { // Do we have space for more, if not bail. return 0 } - // Index match start+1 (long) and start+2 (short) + + // Index short & long index0 := base + 1 - // Index match end-2 (long) and end-1 (short) index1 := s - 2 cv0 := load64(src, index0) cv1 := load64(src, index1) + lTable[hash7(cv0, lTableBits)] = uint32(index0) + sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) + + lTable[hash7(cv1, lTableBits)] = uint32(index1) + sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + index0 += 1 + index1 -= 1 cv = load64(src, s) + + // index every second long in between. + for index0 < index1 { + lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0) + lTable[hash7(load64(src, index1), lTableBits)] = uint32(index1) + index0 += 2 + index1 -= 2 + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} + +// encodeBlockBetterDict encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockBetterDict(dst, src []byte, dict *Dict) (d int) { + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + // Initialize the hash tables. + const ( + // Long hash matches. + lTableBits = 17 + maxLTableSize = 1 << lTableBits + + // Short hash matches. + sTableBits = 14 + maxSTableSize = 1 << sTableBits + + maxAhead = 8 // maximum bytes ahead without checking sLimit + + debug = false + ) + + sLimit := len(src) - inputMargin + if sLimit > MaxDictSrcOffset-maxAhead { + sLimit = MaxDictSrcOffset - maxAhead + } + if len(src) < minNonLiteralBlockSize { + return 0 + } + + dict.initBetter() + + var lTable [maxLTableSize]uint32 + var sTable [maxSTableSize]uint32 + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 0 + cv := load64(src, s) + + // We initialize repeat to 0, so we never match on first attempt + repeat := len(dict.dict) - dict.repeat + + // While in dict +searchDict: + for { + candidateL := 0 + nextS := 0 + for { + // Next src position to check + nextS = s + (s-nextEmit)>>7 + 1 + if nextS > sLimit { + break searchDict + } + hashL := hash7(cv, lTableBits) + hashS := hash4(cv, sTableBits) + candidateL = int(lTable[hashL]) + candidateS := int(sTable[hashS]) + dictL := int(dict.betterTableLong[hashL]) + dictS := int(dict.betterTableShort[hashS]) + lTable[hashL] = uint32(s) + sTable[hashS] = uint32(s) + + valLong := load64(src, candidateL) + valShort := load64(src, candidateS) + + // If long matches at least 8 bytes, use that. + if s != 0 { + if cv == valLong { + goto emitMatch + } + if cv == valShort { + candidateL = candidateS + goto emitMatch + } + } + + // Check dict repeat. + if repeat >= s+4 { + candidate := len(dict.dict) - repeat + s + if candidate > 0 && uint32(cv) == load32(dict.dict, candidate) { + // Extend back + base := s + for i := candidate; base > nextEmit && i > 0 && dict.dict[i-1] == src[base-1]; { + i-- + base-- + } + d += emitLiteral(dst[d:], src[nextEmit:base]) + if debug && nextEmit != base { + fmt.Println("emitted ", base-nextEmit, "literals") + } + s += 4 + candidate += 4 + for candidate < len(dict.dict)-8 && s <= len(src)-8 { + if diff := load64(src, s) ^ load64(dict.dict, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + d += emitRepeat(dst[d:], repeat, s-base) + if debug { + fmt.Println("emitted dict repeat length", s-base, "offset:", repeat, "s:", s) + } + nextEmit = s + if s >= sLimit { + break searchDict + } + cv = load64(src, s) + // Index in-between + index0 := base + 1 + index1 := s - 2 + + cv = load64(src, s) + for index0 < index1 { + cv0 := load64(src, index0) + cv1 := load64(src, index1) + lTable[hash7(cv0, lTableBits)] = uint32(index0) + sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) + + lTable[hash7(cv1, lTableBits)] = uint32(index1) + sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + index0 += 2 + index1 -= 2 + } + continue + } + } + // Don't try to find match at s==0 + if s == 0 { + cv = load64(src, nextS) + s = nextS + continue + } + + // Long likely matches 7, so take that. + if uint32(cv) == uint32(valLong) { + goto emitMatch + } + + // Long dict... + if uint32(cv) == load32(dict.dict, dictL) { + candidateL = dictL + goto emitDict + } + + // Check our short candidate + if uint32(cv) == uint32(valShort) { + // Try a long candidate at s+1 + hashL = hash7(cv>>8, lTableBits) + candidateL = int(lTable[hashL]) + lTable[hashL] = uint32(s + 1) + if uint32(cv>>8) == load32(src, candidateL) { + s++ + goto emitMatch + } + // Use our short candidate. + candidateL = candidateS + goto emitMatch + } + if uint32(cv) == load32(dict.dict, dictS) { + // Try a long candidate at s+1 + hashL = hash7(cv>>8, lTableBits) + candidateL = int(lTable[hashL]) + lTable[hashL] = uint32(s + 1) + if uint32(cv>>8) == load32(src, candidateL) { + s++ + goto emitMatch + } + candidateL = dictS + goto emitDict + } + cv = load64(src, nextS) + s = nextS + } + emitDict: + { + if debug { + if load32(dict.dict, candidateL) != load32(src, s) { + panic("dict emit mismatch") + } + } + // Extend backwards. + // The top bytes will be rechecked to get the full match. + for candidateL > 0 && s > nextEmit && dict.dict[candidateL-1] == src[s-1] { + candidateL-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + d += emitLiteral(dst[d:], src[nextEmit:s]) + if debug && nextEmit != s { + fmt.Println("emitted ", s-nextEmit, "literals") + } + { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + offset := s + (len(dict.dict)) - candidateL + + // Extend the 4-byte match as long as possible. + s += 4 + candidateL += 4 + for s <= len(src)-8 && len(dict.dict)-candidateL >= 8 { + if diff := load64(src, s) ^ load64(dict.dict, candidateL); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidateL += 8 + } + + if repeat == offset { + if debug { + fmt.Println("emitted dict repeat, length", s-base, "offset:", offset, "s:", s, "dict offset:", candidateL) + } + d += emitRepeat(dst[d:], offset, s-base) + } else { + if debug { + fmt.Println("emitted dict copy, length", s-base, "offset:", offset, "s:", s, "dict offset:", candidateL) + } + // Matches longer than 64 are split. + if s <= sLimit || s-base < 8 { + d += emitCopy(dst[d:], offset, s-base) + } else { + // Split to ensure we don't start a copy within next block. + d += emitCopy(dst[d:], offset, 4) + d += emitRepeat(dst[d:], offset, s-base-4) + } + repeat = offset + } + if false { + // Validate match. + if s <= candidateL { + panic("s <= candidate") + } + a := src[base:s] + b := dict.dict[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + + nextEmit = s + if s >= sLimit { + break searchDict + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + + // Index short & long + index0 := base + 1 + index1 := s - 2 + + cv0 := load64(src, index0) + cv1 := load64(src, index1) + lTable[hash7(cv0, lTableBits)] = uint32(index0) + sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) + + lTable[hash7(cv1, lTableBits)] = uint32(index1) + sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + index0 += 1 + index1 -= 1 + cv = load64(src, s) + + // index every second long in between. + for index0 < index1 { + lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0) + lTable[hash7(load64(src, index1), lTableBits)] = uint32(index1) + index0 += 2 + index1 -= 2 + } + } + continue + } + emitMatch: + + // Extend backwards + for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] { + candidateL-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + base := s + offset := base - candidateL + + // Extend the 4-byte match as long as possible. + s += 4 + candidateL += 4 + for s < len(src) { + if len(src)-s < 8 { + if src[s] == src[candidateL] { + s++ + candidateL++ + continue + } + break + } + if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidateL += 8 + } + + if offset > 65535 && s-base <= 5 && repeat != offset { + // Bail if the match is equal or worse to the encoding. + s = nextS + 1 + if s >= sLimit { + goto emitRemainder + } + cv = load64(src, s) + continue + } + + d += emitLiteral(dst[d:], src[nextEmit:base]) + if debug && nextEmit != s { + fmt.Println("emitted ", s-nextEmit, "literals") + } + if repeat == offset { + if debug { + fmt.Println("emitted match repeat, length", s-base, "offset:", offset, "s:", s) + } + d += emitRepeat(dst[d:], offset, s-base) + } else { + if debug { + fmt.Println("emitted match copy, length", s-base, "offset:", offset, "s:", s) + } + d += emitCopy(dst[d:], offset, s-base) + repeat = offset + } + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + + // Index short & long + index0 := base + 1 + index1 := s - 2 + + cv0 := load64(src, index0) + cv1 := load64(src, index1) lTable[hash7(cv0, lTableBits)] = uint32(index0) - lTable[hash7(cv0>>8, lTableBits)] = uint32(index0 + 1) + sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) + lTable[hash7(cv1, lTableBits)] = uint32(index1) - lTable[hash7(cv1>>8, lTableBits)] = uint32(index1 + 1) + sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + index0 += 1 + index1 -= 1 + cv = load64(src, s) + + // index every second long in between. + for index0 < index1 { + lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0) + lTable[hash7(load64(src, index1), lTableBits)] = uint32(index1) + index0 += 2 + index1 -= 2 + } + } + + // Search without dict: + if repeat > s { + repeat = 0 + } + + // No more dict + sLimit = len(src) - inputMargin + if s >= sLimit { + goto emitRemainder + } + cv = load64(src, s) + if debug { + fmt.Println("now", s, "->", sLimit, "out:", d, "left:", len(src)-s, "nextemit:", nextEmit, "dstLimit:", dstLimit, "s:", s) + } + for { + candidateL := 0 + nextS := 0 + for { + // Next src position to check + nextS = s + (s-nextEmit)>>7 + 1 + if nextS > sLimit { + goto emitRemainder + } + hashL := hash7(cv, lTableBits) + hashS := hash4(cv, sTableBits) + candidateL = int(lTable[hashL]) + candidateS := int(sTable[hashS]) + lTable[hashL] = uint32(s) + sTable[hashS] = uint32(s) + + valLong := load64(src, candidateL) + valShort := load64(src, candidateS) + + // If long matches at least 8 bytes, use that. + if cv == valLong { + break + } + if cv == valShort { + candidateL = candidateS + break + } + + // Check repeat at offset checkRep. + const checkRep = 1 + // Minimum length of a repeat. Tested with various values. + // While 4-5 offers improvements in some, 6 reduces + // regressions significantly. + const wantRepeatBytes = 6 + const repeatMask = ((1 << (wantRepeatBytes * 8)) - 1) << (8 * checkRep) + if false && repeat > 0 && cv&repeatMask == load64(src, s-repeat)&repeatMask { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + d += emitLiteral(dst[d:], src[nextEmit:base]) + + // Extend forward + candidate := s - repeat + wantRepeatBytes + checkRep + s += wantRepeatBytes + checkRep + for s < len(src) { + if len(src)-s < 8 { + if src[s] == src[candidate] { + s++ + candidate++ + continue + } + break + } + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. + d += emitRepeat(dst[d:], repeat, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + // Index in-between + index0 := base + 1 + index1 := s - 2 + + cv = load64(src, s) + for index0 < index1 { + cv0 := load64(src, index0) + cv1 := load64(src, index1) + lTable[hash7(cv0, lTableBits)] = uint32(index0) + sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) + + lTable[hash7(cv1, lTableBits)] = uint32(index1) + sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + index0 += 2 + index1 -= 2 + } + + cv = load64(src, s) + continue + } + + // Long likely matches 7, so take that. + if uint32(cv) == uint32(valLong) { + break + } + + // Check our short candidate + if uint32(cv) == uint32(valShort) { + // Try a long candidate at s+1 + hashL = hash7(cv>>8, lTableBits) + candidateL = int(lTable[hashL]) + lTable[hashL] = uint32(s + 1) + if uint32(cv>>8) == load32(src, candidateL) { + s++ + break + } + // Use our short candidate. + candidateL = candidateS + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards + for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] { + candidateL-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + base := s + offset := base - candidateL + + // Extend the 4-byte match as long as possible. + s += 4 + candidateL += 4 + for s < len(src) { + if len(src)-s < 8 { + if src[s] == src[candidateL] { + s++ + candidateL++ + continue + } + break + } + if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidateL += 8 + } + + if offset > 65535 && s-base <= 5 && repeat != offset { + // Bail if the match is equal or worse to the encoding. + s = nextS + 1 + if s >= sLimit { + goto emitRemainder + } + cv = load64(src, s) + continue + } + + d += emitLiteral(dst[d:], src[nextEmit:base]) + if repeat == offset { + d += emitRepeat(dst[d:], offset, s-base) + } else { + d += emitCopy(dst[d:], offset, s-base) + repeat = offset + } + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + + // Index short & long + index0 := base + 1 + index1 := s - 2 + + cv0 := load64(src, index0) + cv1 := load64(src, index1) + lTable[hash7(cv0, lTableBits)] = uint32(index0) sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) - sTable[hash4(cv0>>16, sTableBits)] = uint32(index0 + 2) + + lTable[hash7(cv1, lTableBits)] = uint32(index1) sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + index0 += 1 + index1 -= 1 + cv = load64(src, s) + + // index every second long in between. + for index0 < index1 { + lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0) + lTable[hash7(load64(src, index1), lTableBits)] = uint32(index1) + index0 += 2 + index1 -= 2 + } } emitRemainder: diff --git a/vendor/github.com/klauspost/compress/s2/encode_go.go b/vendor/github.com/klauspost/compress/s2/encode_go.go index 94784b82a..0d39c7b0e 100644 --- a/vendor/github.com/klauspost/compress/s2/encode_go.go +++ b/vendor/github.com/klauspost/compress/s2/encode_go.go @@ -4,14 +4,18 @@ package s2 import ( + "bytes" "math/bits" ) +const hasAmd64Asm = false + // encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It // assumes that the varint-encoded length of the decompressed bytes has already // been written. // // It also assumes that: +// // len(dst) >= MaxEncodedLen(len(src)) func encodeBlock(dst, src []byte) (d int) { if len(src) < minNonLiteralBlockSize { @@ -25,6 +29,7 @@ func encodeBlock(dst, src []byte) (d int) { // been written. // // It also assumes that: +// // len(dst) >= MaxEncodedLen(len(src)) func encodeBlockBetter(dst, src []byte) (d int) { return encodeBlockBetterGo(dst, src) @@ -35,6 +40,7 @@ func encodeBlockBetter(dst, src []byte) (d int) { // been written. // // It also assumes that: +// // len(dst) >= MaxEncodedLen(len(src)) func encodeBlockBetterSnappy(dst, src []byte) (d int) { return encodeBlockBetterSnappyGo(dst, src) @@ -45,6 +51,7 @@ func encodeBlockBetterSnappy(dst, src []byte) (d int) { // been written. // // It also assumes that: +// // len(dst) >= MaxEncodedLen(len(src)) func encodeBlockSnappy(dst, src []byte) (d int) { if len(src) < minNonLiteralBlockSize { @@ -56,6 +63,7 @@ func encodeBlockSnappy(dst, src []byte) (d int) { // emitLiteral writes a literal chunk and returns the number of bytes written. // // It assumes that: +// // dst is long enough to hold the encoded bytes // 0 <= len(lit) && len(lit) <= math.MaxUint32 func emitLiteral(dst, lit []byte) int { @@ -146,6 +154,7 @@ func emitRepeat(dst []byte, offset, length int) int { // emitCopy writes a copy chunk and returns the number of bytes written. // // It assumes that: +// // dst is long enough to hold the encoded bytes // 1 <= offset && offset <= math.MaxUint32 // 4 <= length && length <= 1 << 24 @@ -214,6 +223,7 @@ func emitCopy(dst []byte, offset, length int) int { // emitCopyNoRepeat writes a copy chunk and returns the number of bytes written. // // It assumes that: +// // dst is long enough to hold the encoded bytes // 1 <= offset && offset <= math.MaxUint32 // 4 <= length && length <= 1 << 24 @@ -273,8 +283,8 @@ func emitCopyNoRepeat(dst []byte, offset, length int) int { // matchLen returns how many bytes match in a and b // // It assumes that: -// len(a) <= len(b) // +// len(a) <= len(b) func matchLen(a []byte, b []byte) int { b = b[:len(a)] var checked int @@ -305,3 +315,413 @@ func matchLen(a []byte, b []byte) int { } return len(a) + checked } + +func calcBlockSize(src []byte) (d int) { + // Initialize the hash table. + const ( + tableBits = 13 + maxTableSize = 1 << tableBits + ) + + var table [maxTableSize]uint32 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 5 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We search for a repeat at -1, but don't output repeats when nextEmit == 0 + repeat := 1 + + for { + candidate := 0 + for { + // Next src position to check + nextS := s + (s-nextEmit)>>6 + 4 + if nextS > sLimit { + goto emitRemainder + } + hash0 := hash6(cv, tableBits) + hash1 := hash6(cv>>8, tableBits) + candidate = int(table[hash0]) + candidate2 := int(table[hash1]) + table[hash0] = uint32(s) + table[hash1] = uint32(s + 1) + hash2 := hash6(cv>>16, tableBits) + + // Check repeat at offset checkRep. + const checkRep = 1 + if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + d += emitLiteralSize(src[nextEmit:base]) + + // Extend forward + candidate := s - repeat + 4 + checkRep + s += 4 + checkRep + for s <= sLimit { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopyNoRepeatSize(repeat, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + cv = load64(src, s) + continue + } + + if uint32(cv) == load32(src, candidate) { + break + } + candidate = int(table[hash2]) + if uint32(cv>>8) == load32(src, candidate2) { + table[hash2] = uint32(s + 2) + candidate = candidate2 + s++ + break + } + table[hash2] = uint32(s + 2) + if uint32(cv>>16) == load32(src, candidate) { + s += 2 + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards + for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] { + candidate-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + d += emitLiteralSize(src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + repeat = base - candidate + + // Extend the 4-byte match as long as possible. + s += 4 + candidate += 4 + for s <= len(src)-8 { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopyNoRepeatSize(repeat, s-base) + if false { + // Validate match. + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Check for an immediate match, otherwise start search at s+1 + x := load64(src, s-2) + m2Hash := hash6(x, tableBits) + currHash := hash6(x>>16, tableBits) + candidate = int(table[currHash]) + table[m2Hash] = uint32(s - 2) + table[currHash] = uint32(s) + if uint32(x>>16) != load32(src, candidate) { + cv = load64(src, s+1) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteralSize(src[nextEmit:]) + } + return d +} + +func calcBlockSizeSmall(src []byte) (d int) { + // Initialize the hash table. + const ( + tableBits = 9 + maxTableSize = 1 << tableBits + ) + + var table [maxTableSize]uint32 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 5 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We search for a repeat at -1, but don't output repeats when nextEmit == 0 + repeat := 1 + + for { + candidate := 0 + for { + // Next src position to check + nextS := s + (s-nextEmit)>>6 + 4 + if nextS > sLimit { + goto emitRemainder + } + hash0 := hash6(cv, tableBits) + hash1 := hash6(cv>>8, tableBits) + candidate = int(table[hash0]) + candidate2 := int(table[hash1]) + table[hash0] = uint32(s) + table[hash1] = uint32(s + 1) + hash2 := hash6(cv>>16, tableBits) + + // Check repeat at offset checkRep. + const checkRep = 1 + if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + d += emitLiteralSize(src[nextEmit:base]) + + // Extend forward + candidate := s - repeat + 4 + checkRep + s += 4 + checkRep + for s <= sLimit { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopyNoRepeatSize(repeat, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + cv = load64(src, s) + continue + } + + if uint32(cv) == load32(src, candidate) { + break + } + candidate = int(table[hash2]) + if uint32(cv>>8) == load32(src, candidate2) { + table[hash2] = uint32(s + 2) + candidate = candidate2 + s++ + break + } + table[hash2] = uint32(s + 2) + if uint32(cv>>16) == load32(src, candidate) { + s += 2 + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards + for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] { + candidate-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + d += emitLiteralSize(src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + repeat = base - candidate + + // Extend the 4-byte match as long as possible. + s += 4 + candidate += 4 + for s <= len(src)-8 { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopyNoRepeatSize(repeat, s-base) + if false { + // Validate match. + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Check for an immediate match, otherwise start search at s+1 + x := load64(src, s-2) + m2Hash := hash6(x, tableBits) + currHash := hash6(x>>16, tableBits) + candidate = int(table[currHash]) + table[m2Hash] = uint32(s - 2) + table[currHash] = uint32(s) + if uint32(x>>16) != load32(src, candidate) { + cv = load64(src, s+1) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteralSize(src[nextEmit:]) + } + return d +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 0 <= len(lit) && len(lit) <= math.MaxUint32 +func emitLiteralSize(lit []byte) int { + if len(lit) == 0 { + return 0 + } + switch { + case len(lit) <= 60: + return len(lit) + 1 + case len(lit) <= 1<<8: + return len(lit) + 2 + case len(lit) <= 1<<16: + return len(lit) + 3 + case len(lit) <= 1<<24: + return len(lit) + 4 + default: + return len(lit) + 5 + } +} + +func cvtLZ4BlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) { + panic("cvtLZ4BlockAsm should be unreachable") +} + +func cvtLZ4BlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) { + panic("cvtLZ4BlockSnappyAsm should be unreachable") +} + +func cvtLZ4sBlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) { + panic("cvtLZ4sBlockAsm should be unreachable") +} + +func cvtLZ4sBlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) { + panic("cvtLZ4sBlockSnappyAsm should be unreachable") +} diff --git a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go index 88f27c099..297e41501 100644 --- a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go +++ b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go @@ -1,7 +1,6 @@ // Code generated by command: go run gen.go -out ../encodeblock_amd64.s -stubs ../encodeblock_amd64.go -pkg=s2. DO NOT EDIT. //go:build !appengine && !noasm && gc && !noasm -// +build !appengine,!noasm,gc,!noasm package s2 @@ -147,11 +146,26 @@ func encodeSnappyBetterBlockAsm10B(dst []byte, src []byte) int //go:noescape func encodeSnappyBetterBlockAsm8B(dst []byte, src []byte) int +// calcBlockSize encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4294967295 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func calcBlockSize(src []byte) int + +// calcBlockSizeSmall encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 1024 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func calcBlockSizeSmall(src []byte) int + // emitLiteral writes a literal chunk and returns the number of bytes written. // // It assumes that: -// dst is long enough to hold the encoded bytes with margin of 0 bytes -// 0 <= len(lit) && len(lit) <= math.MaxUint32 +// +// dst is long enough to hold the encoded bytes with margin of 0 bytes +// 0 <= len(lit) && len(lit) <= math.MaxUint32 // //go:noescape func emitLiteral(dst []byte, lit []byte) int @@ -165,9 +179,10 @@ func emitRepeat(dst []byte, offset int, length int) int // emitCopy writes a copy chunk and returns the number of bytes written. // // It assumes that: -// dst is long enough to hold the encoded bytes -// 1 <= offset && offset <= math.MaxUint32 -// 4 <= length && length <= 1 << 24 +// +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= math.MaxUint32 +// 4 <= length && length <= 1 << 24 // //go:noescape func emitCopy(dst []byte, offset int, length int) int @@ -175,9 +190,10 @@ func emitCopy(dst []byte, offset int, length int) int // emitCopyNoRepeat writes a copy chunk and returns the number of bytes written. // // It assumes that: -// dst is long enough to hold the encoded bytes -// 1 <= offset && offset <= math.MaxUint32 -// 4 <= length && length <= 1 << 24 +// +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= math.MaxUint32 +// 4 <= length && length <= 1 << 24 // //go:noescape func emitCopyNoRepeat(dst []byte, offset int, length int) int @@ -185,7 +201,28 @@ func emitCopyNoRepeat(dst []byte, offset int, length int) int // matchLen returns how many bytes match in a and b // // It assumes that: -// len(a) <= len(b) +// +// len(a) <= len(b) // //go:noescape func matchLen(a []byte, b []byte) int + +// cvtLZ4Block converts an LZ4 block to S2 +// +//go:noescape +func cvtLZ4BlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) + +// cvtLZ4sBlock converts an LZ4s block to S2 +// +//go:noescape +func cvtLZ4sBlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) + +// cvtLZ4Block converts an LZ4 block to Snappy +// +//go:noescape +func cvtLZ4BlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) + +// cvtLZ4sBlock converts an LZ4s block to Snappy +// +//go:noescape +func cvtLZ4sBlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) diff --git a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s index 36915d949..12a4de3be 100644 --- a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s +++ b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s @@ -1,7 +1,6 @@ // Code generated by command: go run gen.go -out ../encodeblock_amd64.s -stubs ../encodeblock_amd64.go -pkg=s2. DO NOT EDIT. //go:build !appengine && !noasm && gc && !noasm -// +build !appengine,!noasm,gc,!noasm #include "textflag.h" @@ -37,8 +36,8 @@ zero_loop_encodeBlockAsm: MOVL $0x00000000, 12(SP) MOVQ src_len+32(FP), CX LEAQ -9(CX), DX - LEAQ -8(CX), SI - MOVL SI, 8(SP) + LEAQ -8(CX), BX + MOVL BX, 8(SP) SHRQ $0x05, CX SUBL CX, DX LEAQ (AX)(DX*1), DX @@ -48,609 +47,601 @@ zero_loop_encodeBlockAsm: MOVQ src_base+24(FP), DX search_loop_encodeBlockAsm: - MOVL CX, SI - SUBL 12(SP), SI - SHRL $0x06, SI - LEAL 4(CX)(SI*1), SI - CMPL SI, 8(SP) + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x06, BX + LEAL 4(CX)(BX*1), BX + CMPL BX, 8(SP) JGE emit_remainder_encodeBlockAsm - MOVQ (DX)(CX*1), DI - MOVL SI, 20(SP) - MOVQ $0x0000cf1bbcdcbf9b, R9 - MOVQ DI, R10 - MOVQ DI, R11 - SHRQ $0x08, R11 - SHLQ $0x10, R10 - IMULQ R9, R10 - SHRQ $0x32, R10 - SHLQ $0x10, R11 - IMULQ R9, R11 - SHRQ $0x32, R11 - MOVL 24(SP)(R10*4), SI - MOVL 24(SP)(R11*4), R8 - MOVL CX, 24(SP)(R10*4) - LEAL 1(CX), R10 - MOVL R10, 24(SP)(R11*4) - MOVQ DI, R10 - SHRQ $0x10, R10 + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ SI, R9 + MOVQ SI, R10 + SHRQ $0x08, R10 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x32, R9 SHLQ $0x10, R10 - IMULQ R9, R10 + IMULQ R8, R10 SHRQ $0x32, R10 - MOVL CX, R9 - SUBL 16(SP), R9 - MOVL 1(DX)(R9*1), R11 - MOVQ DI, R9 - SHRQ $0x08, R9 - CMPL R9, R11 + MOVL 24(SP)(R9*4), BX + MOVL 24(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + LEAL 1(CX), R9 + MOVL R9, 24(SP)(R10*4) + MOVQ SI, R9 + SHRQ $0x10, R9 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x32, R9 + MOVL CX, R8 + SUBL 16(SP), R8 + MOVL 1(DX)(R8*1), R10 + MOVQ SI, R8 + SHRQ $0x08, R8 + CMPL R8, R10 JNE no_repeat_found_encodeBlockAsm - LEAL 1(CX), DI - MOVL 12(SP), R8 - MOVL DI, SI - SUBL 16(SP), SI + LEAL 1(CX), SI + MOVL 12(SP), DI + MOVL SI, BX + SUBL 16(SP), BX JZ repeat_extend_back_end_encodeBlockAsm repeat_extend_back_loop_encodeBlockAsm: - CMPL DI, R8 + CMPL SI, DI JLE repeat_extend_back_end_encodeBlockAsm - MOVB -1(DX)(SI*1), BL - MOVB -1(DX)(DI*1), R9 - CMPB BL, R9 + MOVB -1(DX)(BX*1), R8 + MOVB -1(DX)(SI*1), R9 + CMPB R8, R9 JNE repeat_extend_back_end_encodeBlockAsm - LEAL -1(DI), DI - DECL SI + LEAL -1(SI), SI + DECL BX JNZ repeat_extend_back_loop_encodeBlockAsm repeat_extend_back_end_encodeBlockAsm: - MOVL 12(SP), SI - CMPL SI, DI + MOVL 12(SP), BX + CMPL BX, SI JEQ emit_literal_done_repeat_emit_encodeBlockAsm - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (DX)(SI*1), R10 - SUBL SI, R9 - LEAL -1(R9), SI - CMPL SI, $0x3c + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c JLT one_byte_repeat_emit_encodeBlockAsm - CMPL SI, $0x00000100 + CMPL BX, $0x00000100 JLT two_bytes_repeat_emit_encodeBlockAsm - CMPL SI, $0x00010000 + CMPL BX, $0x00010000 JLT three_bytes_repeat_emit_encodeBlockAsm - CMPL SI, $0x01000000 + CMPL BX, $0x01000000 JLT four_bytes_repeat_emit_encodeBlockAsm MOVB $0xfc, (AX) - MOVL SI, 1(AX) + MOVL BX, 1(AX) ADDQ $0x05, AX JMP memmove_long_repeat_emit_encodeBlockAsm four_bytes_repeat_emit_encodeBlockAsm: - MOVL SI, R11 - SHRL $0x10, R11 + MOVL BX, R10 + SHRL $0x10, R10 MOVB $0xf8, (AX) - MOVW SI, 1(AX) - MOVB R11, 3(AX) + MOVW BX, 1(AX) + MOVB R10, 3(AX) ADDQ $0x04, AX JMP memmove_long_repeat_emit_encodeBlockAsm three_bytes_repeat_emit_encodeBlockAsm: MOVB $0xf4, (AX) - MOVW SI, 1(AX) + MOVW BX, 1(AX) ADDQ $0x03, AX JMP memmove_long_repeat_emit_encodeBlockAsm two_bytes_repeat_emit_encodeBlockAsm: MOVB $0xf0, (AX) - MOVB SI, 1(AX) + MOVB BL, 1(AX) ADDQ $0x02, AX - CMPL SI, $0x40 + CMPL BX, $0x40 JL memmove_repeat_emit_encodeBlockAsm JMP memmove_long_repeat_emit_encodeBlockAsm one_byte_repeat_emit_encodeBlockAsm: - SHLB $0x02, SI - MOVB SI, (AX) + SHLB $0x02, BL + MOVB BL, (AX) ADDQ $0x01, AX memmove_repeat_emit_encodeBlockAsm: - LEAQ (AX)(R9*1), SI + LEAQ (AX)(R8*1), BX // genMemMoveShort - CMPQ R9, $0x08 + CMPQ R8, $0x08 JLE emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8 - CMPQ R9, $0x10 + CMPQ R8, $0x10 JBE emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8through16 - CMPQ R9, $0x20 + CMPQ R8, $0x20 JBE emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_17through32 JMP emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_33through64 emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8: - MOVQ (R10), R11 - MOVQ R11, (AX) + MOVQ (R9), R10 + MOVQ R10, (AX) JMP memmove_end_copy_repeat_emit_encodeBlockAsm emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8through16: - MOVQ (R10), R11 - MOVQ -8(R10)(R9*1), R10 - MOVQ R11, (AX) - MOVQ R10, -8(AX)(R9*1) + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) JMP memmove_end_copy_repeat_emit_encodeBlockAsm emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_17through32: - MOVOU (R10), X0 - MOVOU -16(R10)(R9*1), X1 + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 MOVOU X0, (AX) - MOVOU X1, -16(AX)(R9*1) + MOVOU X1, -16(AX)(R8*1) JMP memmove_end_copy_repeat_emit_encodeBlockAsm emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_33through64: - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) memmove_end_copy_repeat_emit_encodeBlockAsm: - MOVQ SI, AX + MOVQ BX, AX JMP emit_literal_done_repeat_emit_encodeBlockAsm memmove_long_repeat_emit_encodeBlockAsm: - LEAQ (AX)(R9*1), SI + LEAQ (AX)(R8*1), BX // genMemMoveLong - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVQ R9, R12 - SHRQ $0x05, R12 - MOVQ AX, R11 - ANDL $0x0000001f, R11 - MOVQ $0x00000040, R13 - SUBQ R11, R13 - DECQ R12 + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R11 + SHRQ $0x05, R11 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 JA emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_forward_sse_loop_32 - LEAQ -32(R10)(R13*1), R11 - LEAQ -32(AX)(R13*1), R14 + LEAQ -32(R9)(R12*1), R10 + LEAQ -32(AX)(R12*1), R13 emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_big_loop_back: - MOVOU (R11), X4 - MOVOU 16(R11), X5 - MOVOA X4, (R14) - MOVOA X5, 16(R14) - ADDQ $0x20, R14 - ADDQ $0x20, R11 + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) ADDQ $0x20, R13 - DECQ R12 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_big_loop_back emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_forward_sse_loop_32: - MOVOU -32(R10)(R13*1), X4 - MOVOU -16(R10)(R13*1), X5 - MOVOA X4, -32(AX)(R13*1) - MOVOA X5, -16(AX)(R13*1) - ADDQ $0x20, R13 - CMPQ R9, R13 + MOVOU -32(R9)(R12*1), X4 + MOVOU -16(R9)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ R8, R12 JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_forward_sse_loop_32 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) - MOVQ SI, AX + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX emit_literal_done_repeat_emit_encodeBlockAsm: ADDL $0x05, CX - MOVL CX, SI - SUBL 16(SP), SI - MOVQ src_len+32(FP), R9 - SUBL CX, R9 - LEAQ (DX)(CX*1), R10 - LEAQ (DX)(SI*1), SI + MOVL CX, BX + SUBL 16(SP), BX + MOVQ src_len+32(FP), R8 + SUBL CX, R8 + LEAQ (DX)(CX*1), R9 + LEAQ (DX)(BX*1), BX // matchLen - XORL R12, R12 - CMPL R9, $0x08 + XORL R11, R11 + CMPL R8, $0x08 JL matchlen_match4_repeat_extend_encodeBlockAsm matchlen_loopback_repeat_extend_encodeBlockAsm: - MOVQ (R10)(R12*1), R11 - XORQ (SI)(R12*1), R11 - TESTQ R11, R11 + MOVQ (R9)(R11*1), R10 + XORQ (BX)(R11*1), R10 + TESTQ R10, R10 JZ matchlen_loop_repeat_extend_encodeBlockAsm #ifdef GOAMD64_v3 - TZCNTQ R11, R11 + TZCNTQ R10, R10 #else - BSFQ R11, R11 + BSFQ R10, R10 #endif - SARQ $0x03, R11 - LEAL (R12)(R11*1), R12 + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 JMP repeat_extend_forward_end_encodeBlockAsm matchlen_loop_repeat_extend_encodeBlockAsm: - LEAL -8(R9), R9 - LEAL 8(R12), R12 - CMPL R9, $0x08 + LEAL -8(R8), R8 + LEAL 8(R11), R11 + CMPL R8, $0x08 JGE matchlen_loopback_repeat_extend_encodeBlockAsm JZ repeat_extend_forward_end_encodeBlockAsm matchlen_match4_repeat_extend_encodeBlockAsm: - CMPL R9, $0x04 + CMPL R8, $0x04 JL matchlen_match2_repeat_extend_encodeBlockAsm - MOVL (R10)(R12*1), R11 - CMPL (SI)(R12*1), R11 + MOVL (R9)(R11*1), R10 + CMPL (BX)(R11*1), R10 JNE matchlen_match2_repeat_extend_encodeBlockAsm - SUBL $0x04, R9 - LEAL 4(R12), R12 + SUBL $0x04, R8 + LEAL 4(R11), R11 matchlen_match2_repeat_extend_encodeBlockAsm: - CMPL R9, $0x02 + CMPL R8, $0x02 JL matchlen_match1_repeat_extend_encodeBlockAsm - MOVW (R10)(R12*1), R11 - CMPW (SI)(R12*1), R11 + MOVW (R9)(R11*1), R10 + CMPW (BX)(R11*1), R10 JNE matchlen_match1_repeat_extend_encodeBlockAsm - SUBL $0x02, R9 - LEAL 2(R12), R12 + SUBL $0x02, R8 + LEAL 2(R11), R11 matchlen_match1_repeat_extend_encodeBlockAsm: - CMPL R9, $0x01 + CMPL R8, $0x01 JL repeat_extend_forward_end_encodeBlockAsm - MOVB (R10)(R12*1), R11 - CMPB (SI)(R12*1), R11 + MOVB (R9)(R11*1), R10 + CMPB (BX)(R11*1), R10 JNE repeat_extend_forward_end_encodeBlockAsm - LEAL 1(R12), R12 + LEAL 1(R11), R11 repeat_extend_forward_end_encodeBlockAsm: - ADDL R12, CX - MOVL CX, SI - SUBL DI, SI - MOVL 16(SP), DI - TESTL R8, R8 + ADDL R11, CX + MOVL CX, BX + SUBL SI, BX + MOVL 16(SP), SI + TESTL DI, DI JZ repeat_as_copy_encodeBlockAsm // emitRepeat emit_repeat_again_match_repeat_encodeBlockAsm: - MOVL SI, R8 - LEAL -4(SI), SI - CMPL R8, $0x08 + MOVL BX, DI + LEAL -4(BX), BX + CMPL DI, $0x08 JLE repeat_two_match_repeat_encodeBlockAsm - CMPL R8, $0x0c + CMPL DI, $0x0c JGE cant_repeat_two_offset_match_repeat_encodeBlockAsm - CMPL DI, $0x00000800 + CMPL SI, $0x00000800 JLT repeat_two_offset_match_repeat_encodeBlockAsm cant_repeat_two_offset_match_repeat_encodeBlockAsm: - CMPL SI, $0x00000104 + CMPL BX, $0x00000104 JLT repeat_three_match_repeat_encodeBlockAsm - CMPL SI, $0x00010100 + CMPL BX, $0x00010100 JLT repeat_four_match_repeat_encodeBlockAsm - CMPL SI, $0x0100ffff + CMPL BX, $0x0100ffff JLT repeat_five_match_repeat_encodeBlockAsm - LEAL -16842747(SI), SI - MOVW $0x001d, (AX) - MOVW $0xfffb, 2(AX) + LEAL -16842747(BX), BX + MOVL $0xfffb001d, (AX) MOVB $0xff, 4(AX) ADDQ $0x05, AX JMP emit_repeat_again_match_repeat_encodeBlockAsm repeat_five_match_repeat_encodeBlockAsm: - LEAL -65536(SI), SI - MOVL SI, DI + LEAL -65536(BX), BX + MOVL BX, SI MOVW $0x001d, (AX) - MOVW SI, 2(AX) - SARL $0x10, DI - MOVB DI, 4(AX) + MOVW BX, 2(AX) + SARL $0x10, SI + MOVB SI, 4(AX) ADDQ $0x05, AX JMP repeat_end_emit_encodeBlockAsm repeat_four_match_repeat_encodeBlockAsm: - LEAL -256(SI), SI + LEAL -256(BX), BX MOVW $0x0019, (AX) - MOVW SI, 2(AX) + MOVW BX, 2(AX) ADDQ $0x04, AX JMP repeat_end_emit_encodeBlockAsm repeat_three_match_repeat_encodeBlockAsm: - LEAL -4(SI), SI + LEAL -4(BX), BX MOVW $0x0015, (AX) - MOVB SI, 2(AX) + MOVB BL, 2(AX) ADDQ $0x03, AX JMP repeat_end_emit_encodeBlockAsm repeat_two_match_repeat_encodeBlockAsm: - SHLL $0x02, SI - ORL $0x01, SI - MOVW SI, (AX) + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) ADDQ $0x02, AX JMP repeat_end_emit_encodeBlockAsm repeat_two_offset_match_repeat_encodeBlockAsm: - XORQ R8, R8 - LEAL 1(R8)(SI*4), SI - MOVB DI, 1(AX) - SARL $0x08, DI - SHLL $0x05, DI - ORL DI, SI - MOVB SI, (AX) + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) ADDQ $0x02, AX JMP repeat_end_emit_encodeBlockAsm repeat_as_copy_encodeBlockAsm: // emitCopy - CMPL DI, $0x00010000 + CMPL SI, $0x00010000 JL two_byte_offset_repeat_as_copy_encodeBlockAsm - -four_bytes_loop_back_repeat_as_copy_encodeBlockAsm: - CMPL SI, $0x40 + CMPL BX, $0x40 JLE four_bytes_remain_repeat_as_copy_encodeBlockAsm MOVB $0xff, (AX) - MOVL DI, 1(AX) - LEAL -64(SI), SI + MOVL SI, 1(AX) + LEAL -64(BX), BX ADDQ $0x05, AX - CMPL SI, $0x04 + CMPL BX, $0x04 JL four_bytes_remain_repeat_as_copy_encodeBlockAsm // emitRepeat emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy: - MOVL SI, R8 - LEAL -4(SI), SI - CMPL R8, $0x08 + MOVL BX, DI + LEAL -4(BX), BX + CMPL DI, $0x08 JLE repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy - CMPL R8, $0x0c + CMPL DI, $0x0c JGE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy - CMPL DI, $0x00000800 + CMPL SI, $0x00000800 JLT repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy: - CMPL SI, $0x00000104 + CMPL BX, $0x00000104 JLT repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy - CMPL SI, $0x00010100 + CMPL BX, $0x00010100 JLT repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy - CMPL SI, $0x0100ffff + CMPL BX, $0x0100ffff JLT repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy - LEAL -16842747(SI), SI - MOVW $0x001d, (AX) - MOVW $0xfffb, 2(AX) + LEAL -16842747(BX), BX + MOVL $0xfffb001d, (AX) MOVB $0xff, 4(AX) ADDQ $0x05, AX JMP emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy: - LEAL -65536(SI), SI - MOVL SI, DI + LEAL -65536(BX), BX + MOVL BX, SI MOVW $0x001d, (AX) - MOVW SI, 2(AX) - SARL $0x10, DI - MOVB DI, 4(AX) + MOVW BX, 2(AX) + SARL $0x10, SI + MOVB SI, 4(AX) ADDQ $0x05, AX JMP repeat_end_emit_encodeBlockAsm repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy: - LEAL -256(SI), SI + LEAL -256(BX), BX MOVW $0x0019, (AX) - MOVW SI, 2(AX) + MOVW BX, 2(AX) ADDQ $0x04, AX JMP repeat_end_emit_encodeBlockAsm repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy: - LEAL -4(SI), SI + LEAL -4(BX), BX MOVW $0x0015, (AX) - MOVB SI, 2(AX) + MOVB BL, 2(AX) ADDQ $0x03, AX JMP repeat_end_emit_encodeBlockAsm repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy: - SHLL $0x02, SI - ORL $0x01, SI - MOVW SI, (AX) + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) ADDQ $0x02, AX JMP repeat_end_emit_encodeBlockAsm repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy: - XORQ R8, R8 - LEAL 1(R8)(SI*4), SI - MOVB DI, 1(AX) - SARL $0x08, DI - SHLL $0x05, DI - ORL DI, SI - MOVB SI, (AX) + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) ADDQ $0x02, AX JMP repeat_end_emit_encodeBlockAsm - JMP four_bytes_loop_back_repeat_as_copy_encodeBlockAsm four_bytes_remain_repeat_as_copy_encodeBlockAsm: - TESTL SI, SI + TESTL BX, BX JZ repeat_end_emit_encodeBlockAsm - MOVB $0x03, BL - LEAL -4(BX)(SI*4), SI - MOVB SI, (AX) - MOVL DI, 1(AX) + XORL DI, DI + LEAL -1(DI)(BX*4), BX + MOVB BL, (AX) + MOVL SI, 1(AX) ADDQ $0x05, AX JMP repeat_end_emit_encodeBlockAsm two_byte_offset_repeat_as_copy_encodeBlockAsm: - CMPL SI, $0x40 + CMPL BX, $0x40 JLE two_byte_offset_short_repeat_as_copy_encodeBlockAsm - CMPL DI, $0x00000800 + CMPL SI, $0x00000800 JAE long_offset_short_repeat_as_copy_encodeBlockAsm - MOVL $0x00000001, R8 - LEAL 16(R8), R8 - MOVB DI, 1(AX) - MOVL DI, R9 - SHRL $0x08, R9 - SHLL $0x05, R9 - ORL R9, R8 - MOVB R8, (AX) + MOVL $0x00000001, DI + LEAL 16(DI), DI + MOVB SI, 1(AX) + MOVL SI, R8 + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, DI + MOVB DI, (AX) ADDQ $0x02, AX - SUBL $0x08, SI + SUBL $0x08, BX // emitRepeat - LEAL -4(SI), SI + LEAL -4(BX), BX JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b: - MOVL SI, R8 - LEAL -4(SI), SI - CMPL R8, $0x08 + MOVL BX, DI + LEAL -4(BX), BX + CMPL DI, $0x08 JLE repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b - CMPL R8, $0x0c + CMPL DI, $0x0c JGE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b - CMPL DI, $0x00000800 + CMPL SI, $0x00000800 JLT repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b: - CMPL SI, $0x00000104 + CMPL BX, $0x00000104 JLT repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b - CMPL SI, $0x00010100 + CMPL BX, $0x00010100 JLT repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b - CMPL SI, $0x0100ffff + CMPL BX, $0x0100ffff JLT repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b - LEAL -16842747(SI), SI - MOVW $0x001d, (AX) - MOVW $0xfffb, 2(AX) + LEAL -16842747(BX), BX + MOVL $0xfffb001d, (AX) MOVB $0xff, 4(AX) ADDQ $0x05, AX JMP emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b: - LEAL -65536(SI), SI - MOVL SI, DI + LEAL -65536(BX), BX + MOVL BX, SI MOVW $0x001d, (AX) - MOVW SI, 2(AX) - SARL $0x10, DI - MOVB DI, 4(AX) + MOVW BX, 2(AX) + SARL $0x10, SI + MOVB SI, 4(AX) ADDQ $0x05, AX JMP repeat_end_emit_encodeBlockAsm repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b: - LEAL -256(SI), SI + LEAL -256(BX), BX MOVW $0x0019, (AX) - MOVW SI, 2(AX) + MOVW BX, 2(AX) ADDQ $0x04, AX JMP repeat_end_emit_encodeBlockAsm repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b: - LEAL -4(SI), SI + LEAL -4(BX), BX MOVW $0x0015, (AX) - MOVB SI, 2(AX) + MOVB BL, 2(AX) ADDQ $0x03, AX JMP repeat_end_emit_encodeBlockAsm repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b: - SHLL $0x02, SI - ORL $0x01, SI - MOVW SI, (AX) + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) ADDQ $0x02, AX JMP repeat_end_emit_encodeBlockAsm repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b: - XORQ R8, R8 - LEAL 1(R8)(SI*4), SI - MOVB DI, 1(AX) - SARL $0x08, DI - SHLL $0x05, DI - ORL DI, SI - MOVB SI, (AX) + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) ADDQ $0x02, AX JMP repeat_end_emit_encodeBlockAsm long_offset_short_repeat_as_copy_encodeBlockAsm: MOVB $0xee, (AX) - MOVW DI, 1(AX) - LEAL -60(SI), SI + MOVW SI, 1(AX) + LEAL -60(BX), BX ADDQ $0x03, AX // emitRepeat emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short: - MOVL SI, R8 - LEAL -4(SI), SI - CMPL R8, $0x08 + MOVL BX, DI + LEAL -4(BX), BX + CMPL DI, $0x08 JLE repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short - CMPL R8, $0x0c + CMPL DI, $0x0c JGE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short - CMPL DI, $0x00000800 + CMPL SI, $0x00000800 JLT repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short: - CMPL SI, $0x00000104 + CMPL BX, $0x00000104 JLT repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short - CMPL SI, $0x00010100 + CMPL BX, $0x00010100 JLT repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short - CMPL SI, $0x0100ffff + CMPL BX, $0x0100ffff JLT repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short - LEAL -16842747(SI), SI - MOVW $0x001d, (AX) - MOVW $0xfffb, 2(AX) + LEAL -16842747(BX), BX + MOVL $0xfffb001d, (AX) MOVB $0xff, 4(AX) ADDQ $0x05, AX JMP emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short: - LEAL -65536(SI), SI - MOVL SI, DI + LEAL -65536(BX), BX + MOVL BX, SI MOVW $0x001d, (AX) - MOVW SI, 2(AX) - SARL $0x10, DI - MOVB DI, 4(AX) + MOVW BX, 2(AX) + SARL $0x10, SI + MOVB SI, 4(AX) ADDQ $0x05, AX JMP repeat_end_emit_encodeBlockAsm repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short: - LEAL -256(SI), SI + LEAL -256(BX), BX MOVW $0x0019, (AX) - MOVW SI, 2(AX) + MOVW BX, 2(AX) ADDQ $0x04, AX JMP repeat_end_emit_encodeBlockAsm repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short: - LEAL -4(SI), SI + LEAL -4(BX), BX MOVW $0x0015, (AX) - MOVB SI, 2(AX) + MOVB BL, 2(AX) ADDQ $0x03, AX JMP repeat_end_emit_encodeBlockAsm repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short: - SHLL $0x02, SI - ORL $0x01, SI - MOVW SI, (AX) + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) ADDQ $0x02, AX JMP repeat_end_emit_encodeBlockAsm repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short: - XORQ R8, R8 - LEAL 1(R8)(SI*4), SI - MOVB DI, 1(AX) - SARL $0x08, DI - SHLL $0x05, DI - ORL DI, SI - MOVB SI, (AX) - ADDQ $0x02, AX + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) + ADDQ $0x02, AX JMP repeat_end_emit_encodeBlockAsm - JMP two_byte_offset_repeat_as_copy_encodeBlockAsm two_byte_offset_short_repeat_as_copy_encodeBlockAsm: - CMPL SI, $0x0c + MOVL BX, DI + SHLL $0x02, DI + CMPL BX, $0x0c JGE emit_copy_three_repeat_as_copy_encodeBlockAsm - CMPL DI, $0x00000800 + CMPL SI, $0x00000800 JGE emit_copy_three_repeat_as_copy_encodeBlockAsm - MOVB $0x01, BL - LEAL -16(BX)(SI*4), SI - MOVB DI, 1(AX) - SHRL $0x08, DI - SHLL $0x05, DI - ORL DI, SI - MOVB SI, (AX) + LEAL -15(DI), DI + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (AX) ADDQ $0x02, AX JMP repeat_end_emit_encodeBlockAsm emit_copy_three_repeat_as_copy_encodeBlockAsm: - MOVB $0x02, BL - LEAL -4(BX)(SI*4), SI - MOVB SI, (AX) - MOVW DI, 1(AX) + LEAL -2(DI), DI + MOVB DI, (AX) + MOVW SI, 1(AX) ADDQ $0x03, AX repeat_end_emit_encodeBlockAsm: @@ -658,16 +649,16 @@ repeat_end_emit_encodeBlockAsm: JMP search_loop_encodeBlockAsm no_repeat_found_encodeBlockAsm: - CMPL (DX)(SI*1), DI + CMPL (DX)(BX*1), SI JEQ candidate_match_encodeBlockAsm - SHRQ $0x08, DI - MOVL 24(SP)(R10*4), SI - LEAL 2(CX), R9 - CMPL (DX)(R8*1), DI + SHRQ $0x08, SI + MOVL 24(SP)(R9*4), BX + LEAL 2(CX), R8 + CMPL (DX)(DI*1), SI JEQ candidate2_match_encodeBlockAsm - MOVL R9, 24(SP)(R10*4) - SHRQ $0x08, DI - CMPL (DX)(SI*1), DI + MOVL R8, 24(SP)(R9*4) + SHRQ $0x08, SI + CMPL (DX)(BX*1), SI JEQ candidate3_match_encodeBlockAsm MOVL 20(SP), CX JMP search_loop_encodeBlockAsm @@ -677,549 +668,542 @@ candidate3_match_encodeBlockAsm: JMP candidate_match_encodeBlockAsm candidate2_match_encodeBlockAsm: - MOVL R9, 24(SP)(R10*4) + MOVL R8, 24(SP)(R9*4) INCL CX - MOVL R8, SI + MOVL DI, BX candidate_match_encodeBlockAsm: - MOVL 12(SP), DI - TESTL SI, SI + MOVL 12(SP), SI + TESTL BX, BX JZ match_extend_back_end_encodeBlockAsm match_extend_back_loop_encodeBlockAsm: - CMPL CX, DI + CMPL CX, SI JLE match_extend_back_end_encodeBlockAsm - MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(BX*1), DI MOVB -1(DX)(CX*1), R8 - CMPB BL, R8 + CMPB DI, R8 JNE match_extend_back_end_encodeBlockAsm LEAL -1(CX), CX - DECL SI + DECL BX JZ match_extend_back_end_encodeBlockAsm JMP match_extend_back_loop_encodeBlockAsm match_extend_back_end_encodeBlockAsm: - MOVL CX, DI - SUBL 12(SP), DI - LEAQ 5(AX)(DI*1), DI - CMPQ DI, (SP) + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 5(AX)(SI*1), SI + CMPQ SI, (SP) JL match_dst_size_check_encodeBlockAsm MOVQ $0x00000000, ret+48(FP) RET match_dst_size_check_encodeBlockAsm: - MOVL CX, DI - MOVL 12(SP), R8 - CMPL R8, DI + MOVL CX, SI + MOVL 12(SP), DI + CMPL DI, SI JEQ emit_literal_done_match_emit_encodeBlockAsm - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (DX)(R8*1), DI - SUBL R8, R9 - LEAL -1(R9), R8 - CMPL R8, $0x3c + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(DI*1), SI + SUBL DI, R8 + LEAL -1(R8), DI + CMPL DI, $0x3c JLT one_byte_match_emit_encodeBlockAsm - CMPL R8, $0x00000100 + CMPL DI, $0x00000100 JLT two_bytes_match_emit_encodeBlockAsm - CMPL R8, $0x00010000 + CMPL DI, $0x00010000 JLT three_bytes_match_emit_encodeBlockAsm - CMPL R8, $0x01000000 + CMPL DI, $0x01000000 JLT four_bytes_match_emit_encodeBlockAsm MOVB $0xfc, (AX) - MOVL R8, 1(AX) + MOVL DI, 1(AX) ADDQ $0x05, AX JMP memmove_long_match_emit_encodeBlockAsm four_bytes_match_emit_encodeBlockAsm: - MOVL R8, R10 - SHRL $0x10, R10 + MOVL DI, R9 + SHRL $0x10, R9 MOVB $0xf8, (AX) - MOVW R8, 1(AX) - MOVB R10, 3(AX) + MOVW DI, 1(AX) + MOVB R9, 3(AX) ADDQ $0x04, AX JMP memmove_long_match_emit_encodeBlockAsm three_bytes_match_emit_encodeBlockAsm: MOVB $0xf4, (AX) - MOVW R8, 1(AX) + MOVW DI, 1(AX) ADDQ $0x03, AX JMP memmove_long_match_emit_encodeBlockAsm two_bytes_match_emit_encodeBlockAsm: MOVB $0xf0, (AX) - MOVB R8, 1(AX) + MOVB DI, 1(AX) ADDQ $0x02, AX - CMPL R8, $0x40 + CMPL DI, $0x40 JL memmove_match_emit_encodeBlockAsm JMP memmove_long_match_emit_encodeBlockAsm one_byte_match_emit_encodeBlockAsm: - SHLB $0x02, R8 - MOVB R8, (AX) + SHLB $0x02, DI + MOVB DI, (AX) ADDQ $0x01, AX memmove_match_emit_encodeBlockAsm: - LEAQ (AX)(R9*1), R8 + LEAQ (AX)(R8*1), DI // genMemMoveShort - CMPQ R9, $0x08 + CMPQ R8, $0x08 JLE emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8 - CMPQ R9, $0x10 + CMPQ R8, $0x10 JBE emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8through16 - CMPQ R9, $0x20 + CMPQ R8, $0x20 JBE emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_17through32 JMP emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_33through64 emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8: - MOVQ (DI), R10 - MOVQ R10, (AX) + MOVQ (SI), R9 + MOVQ R9, (AX) JMP memmove_end_copy_match_emit_encodeBlockAsm emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8through16: - MOVQ (DI), R10 - MOVQ -8(DI)(R9*1), DI - MOVQ R10, (AX) - MOVQ DI, -8(AX)(R9*1) + MOVQ (SI), R9 + MOVQ -8(SI)(R8*1), SI + MOVQ R9, (AX) + MOVQ SI, -8(AX)(R8*1) JMP memmove_end_copy_match_emit_encodeBlockAsm emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_17through32: - MOVOU (DI), X0 - MOVOU -16(DI)(R9*1), X1 + MOVOU (SI), X0 + MOVOU -16(SI)(R8*1), X1 MOVOU X0, (AX) - MOVOU X1, -16(AX)(R9*1) + MOVOU X1, -16(AX)(R8*1) JMP memmove_end_copy_match_emit_encodeBlockAsm emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_33through64: - MOVOU (DI), X0 - MOVOU 16(DI), X1 - MOVOU -32(DI)(R9*1), X2 - MOVOU -16(DI)(R9*1), X3 + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) memmove_end_copy_match_emit_encodeBlockAsm: - MOVQ R8, AX + MOVQ DI, AX JMP emit_literal_done_match_emit_encodeBlockAsm memmove_long_match_emit_encodeBlockAsm: - LEAQ (AX)(R9*1), R8 + LEAQ (AX)(R8*1), DI // genMemMoveLong - MOVOU (DI), X0 - MOVOU 16(DI), X1 - MOVOU -32(DI)(R9*1), X2 - MOVOU -16(DI)(R9*1), X3 - MOVQ R9, R11 - SHRQ $0x05, R11 - MOVQ AX, R10 - ANDL $0x0000001f, R10 - MOVQ $0x00000040, R12 - SUBQ R10, R12 - DECQ R11 + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVQ R8, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R11 + SUBQ R9, R11 + DECQ R10 JA emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_forward_sse_loop_32 - LEAQ -32(DI)(R12*1), R10 - LEAQ -32(AX)(R12*1), R13 + LEAQ -32(SI)(R11*1), R9 + LEAQ -32(AX)(R11*1), R12 emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_big_loop_back: - MOVOU (R10), X4 - MOVOU 16(R10), X5 - MOVOA X4, (R13) - MOVOA X5, 16(R13) - ADDQ $0x20, R13 - ADDQ $0x20, R10 + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R12) + MOVOA X5, 16(R12) ADDQ $0x20, R12 - DECQ R11 + ADDQ $0x20, R9 + ADDQ $0x20, R11 + DECQ R10 JNA emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_big_loop_back emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_forward_sse_loop_32: - MOVOU -32(DI)(R12*1), X4 - MOVOU -16(DI)(R12*1), X5 - MOVOA X4, -32(AX)(R12*1) - MOVOA X5, -16(AX)(R12*1) - ADDQ $0x20, R12 - CMPQ R9, R12 + MOVOU -32(SI)(R11*1), X4 + MOVOU -16(SI)(R11*1), X5 + MOVOA X4, -32(AX)(R11*1) + MOVOA X5, -16(AX)(R11*1) + ADDQ $0x20, R11 + CMPQ R8, R11 JAE emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_forward_sse_loop_32 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) - MOVQ R8, AX + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ DI, AX emit_literal_done_match_emit_encodeBlockAsm: match_nolit_loop_encodeBlockAsm: - MOVL CX, DI - SUBL SI, DI - MOVL DI, 16(SP) + MOVL CX, SI + SUBL BX, SI + MOVL SI, 16(SP) ADDL $0x04, CX - ADDL $0x04, SI - MOVQ src_len+32(FP), DI - SUBL CX, DI - LEAQ (DX)(CX*1), R8 - LEAQ (DX)(SI*1), SI + ADDL $0x04, BX + MOVQ src_len+32(FP), SI + SUBL CX, SI + LEAQ (DX)(CX*1), DI + LEAQ (DX)(BX*1), BX // matchLen - XORL R10, R10 - CMPL DI, $0x08 + XORL R9, R9 + CMPL SI, $0x08 JL matchlen_match4_match_nolit_encodeBlockAsm matchlen_loopback_match_nolit_encodeBlockAsm: - MOVQ (R8)(R10*1), R9 - XORQ (SI)(R10*1), R9 - TESTQ R9, R9 + MOVQ (DI)(R9*1), R8 + XORQ (BX)(R9*1), R8 + TESTQ R8, R8 JZ matchlen_loop_match_nolit_encodeBlockAsm #ifdef GOAMD64_v3 - TZCNTQ R9, R9 + TZCNTQ R8, R8 #else - BSFQ R9, R9 + BSFQ R8, R8 #endif - SARQ $0x03, R9 - LEAL (R10)(R9*1), R10 + SARQ $0x03, R8 + LEAL (R9)(R8*1), R9 JMP match_nolit_end_encodeBlockAsm matchlen_loop_match_nolit_encodeBlockAsm: - LEAL -8(DI), DI - LEAL 8(R10), R10 - CMPL DI, $0x08 + LEAL -8(SI), SI + LEAL 8(R9), R9 + CMPL SI, $0x08 JGE matchlen_loopback_match_nolit_encodeBlockAsm JZ match_nolit_end_encodeBlockAsm matchlen_match4_match_nolit_encodeBlockAsm: - CMPL DI, $0x04 + CMPL SI, $0x04 JL matchlen_match2_match_nolit_encodeBlockAsm - MOVL (R8)(R10*1), R9 - CMPL (SI)(R10*1), R9 + MOVL (DI)(R9*1), R8 + CMPL (BX)(R9*1), R8 JNE matchlen_match2_match_nolit_encodeBlockAsm - SUBL $0x04, DI - LEAL 4(R10), R10 + SUBL $0x04, SI + LEAL 4(R9), R9 matchlen_match2_match_nolit_encodeBlockAsm: - CMPL DI, $0x02 + CMPL SI, $0x02 JL matchlen_match1_match_nolit_encodeBlockAsm - MOVW (R8)(R10*1), R9 - CMPW (SI)(R10*1), R9 + MOVW (DI)(R9*1), R8 + CMPW (BX)(R9*1), R8 JNE matchlen_match1_match_nolit_encodeBlockAsm - SUBL $0x02, DI - LEAL 2(R10), R10 + SUBL $0x02, SI + LEAL 2(R9), R9 matchlen_match1_match_nolit_encodeBlockAsm: - CMPL DI, $0x01 + CMPL SI, $0x01 JL match_nolit_end_encodeBlockAsm - MOVB (R8)(R10*1), R9 - CMPB (SI)(R10*1), R9 + MOVB (DI)(R9*1), R8 + CMPB (BX)(R9*1), R8 JNE match_nolit_end_encodeBlockAsm - LEAL 1(R10), R10 + LEAL 1(R9), R9 match_nolit_end_encodeBlockAsm: - ADDL R10, CX - MOVL 16(SP), SI - ADDL $0x04, R10 + ADDL R9, CX + MOVL 16(SP), BX + ADDL $0x04, R9 MOVL CX, 12(SP) // emitCopy - CMPL SI, $0x00010000 + CMPL BX, $0x00010000 JL two_byte_offset_match_nolit_encodeBlockAsm - -four_bytes_loop_back_match_nolit_encodeBlockAsm: - CMPL R10, $0x40 + CMPL R9, $0x40 JLE four_bytes_remain_match_nolit_encodeBlockAsm MOVB $0xff, (AX) - MOVL SI, 1(AX) - LEAL -64(R10), R10 + MOVL BX, 1(AX) + LEAL -64(R9), R9 ADDQ $0x05, AX - CMPL R10, $0x04 + CMPL R9, $0x04 JL four_bytes_remain_match_nolit_encodeBlockAsm // emitRepeat emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy: - MOVL R10, DI - LEAL -4(R10), R10 - CMPL DI, $0x08 + MOVL R9, SI + LEAL -4(R9), R9 + CMPL SI, $0x08 JLE repeat_two_match_nolit_encodeBlockAsm_emit_copy - CMPL DI, $0x0c + CMPL SI, $0x0c JGE cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy - CMPL SI, $0x00000800 + CMPL BX, $0x00000800 JLT repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy: - CMPL R10, $0x00000104 + CMPL R9, $0x00000104 JLT repeat_three_match_nolit_encodeBlockAsm_emit_copy - CMPL R10, $0x00010100 + CMPL R9, $0x00010100 JLT repeat_four_match_nolit_encodeBlockAsm_emit_copy - CMPL R10, $0x0100ffff + CMPL R9, $0x0100ffff JLT repeat_five_match_nolit_encodeBlockAsm_emit_copy - LEAL -16842747(R10), R10 - MOVW $0x001d, (AX) - MOVW $0xfffb, 2(AX) + LEAL -16842747(R9), R9 + MOVL $0xfffb001d, (AX) MOVB $0xff, 4(AX) ADDQ $0x05, AX JMP emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy repeat_five_match_nolit_encodeBlockAsm_emit_copy: - LEAL -65536(R10), R10 - MOVL R10, SI + LEAL -65536(R9), R9 + MOVL R9, BX MOVW $0x001d, (AX) - MOVW R10, 2(AX) - SARL $0x10, SI - MOVB SI, 4(AX) + MOVW R9, 2(AX) + SARL $0x10, BX + MOVB BL, 4(AX) ADDQ $0x05, AX JMP match_nolit_emitcopy_end_encodeBlockAsm repeat_four_match_nolit_encodeBlockAsm_emit_copy: - LEAL -256(R10), R10 + LEAL -256(R9), R9 MOVW $0x0019, (AX) - MOVW R10, 2(AX) + MOVW R9, 2(AX) ADDQ $0x04, AX JMP match_nolit_emitcopy_end_encodeBlockAsm repeat_three_match_nolit_encodeBlockAsm_emit_copy: - LEAL -4(R10), R10 + LEAL -4(R9), R9 MOVW $0x0015, (AX) - MOVB R10, 2(AX) + MOVB R9, 2(AX) ADDQ $0x03, AX JMP match_nolit_emitcopy_end_encodeBlockAsm repeat_two_match_nolit_encodeBlockAsm_emit_copy: - SHLL $0x02, R10 - ORL $0x01, R10 - MOVW R10, (AX) + SHLL $0x02, R9 + ORL $0x01, R9 + MOVW R9, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBlockAsm repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy: - XORQ DI, DI - LEAL 1(DI)(R10*4), R10 - MOVB SI, 1(AX) - SARL $0x08, SI - SHLL $0x05, SI - ORL SI, R10 - MOVB R10, (AX) + XORQ SI, SI + LEAL 1(SI)(R9*4), R9 + MOVB BL, 1(AX) + SARL $0x08, BX + SHLL $0x05, BX + ORL BX, R9 + MOVB R9, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBlockAsm - JMP four_bytes_loop_back_match_nolit_encodeBlockAsm four_bytes_remain_match_nolit_encodeBlockAsm: - TESTL R10, R10 + TESTL R9, R9 JZ match_nolit_emitcopy_end_encodeBlockAsm - MOVB $0x03, BL - LEAL -4(BX)(R10*4), R10 - MOVB R10, (AX) - MOVL SI, 1(AX) + XORL SI, SI + LEAL -1(SI)(R9*4), R9 + MOVB R9, (AX) + MOVL BX, 1(AX) ADDQ $0x05, AX JMP match_nolit_emitcopy_end_encodeBlockAsm two_byte_offset_match_nolit_encodeBlockAsm: - CMPL R10, $0x40 + CMPL R9, $0x40 JLE two_byte_offset_short_match_nolit_encodeBlockAsm - CMPL SI, $0x00000800 + CMPL BX, $0x00000800 JAE long_offset_short_match_nolit_encodeBlockAsm - MOVL $0x00000001, DI - LEAL 16(DI), DI - MOVB SI, 1(AX) - MOVL SI, R8 - SHRL $0x08, R8 - SHLL $0x05, R8 - ORL R8, DI - MOVB DI, (AX) + MOVL $0x00000001, SI + LEAL 16(SI), SI + MOVB BL, 1(AX) + MOVL BX, DI + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) ADDQ $0x02, AX - SUBL $0x08, R10 + SUBL $0x08, R9 // emitRepeat - LEAL -4(R10), R10 + LEAL -4(R9), R9 JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short_2b: - MOVL R10, DI - LEAL -4(R10), R10 - CMPL DI, $0x08 + MOVL R9, SI + LEAL -4(R9), R9 + CMPL SI, $0x08 JLE repeat_two_match_nolit_encodeBlockAsm_emit_copy_short_2b - CMPL DI, $0x0c + CMPL SI, $0x0c JGE cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b - CMPL SI, $0x00000800 + CMPL BX, $0x00000800 JLT repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b: - CMPL R10, $0x00000104 + CMPL R9, $0x00000104 JLT repeat_three_match_nolit_encodeBlockAsm_emit_copy_short_2b - CMPL R10, $0x00010100 + CMPL R9, $0x00010100 JLT repeat_four_match_nolit_encodeBlockAsm_emit_copy_short_2b - CMPL R10, $0x0100ffff + CMPL R9, $0x0100ffff JLT repeat_five_match_nolit_encodeBlockAsm_emit_copy_short_2b - LEAL -16842747(R10), R10 - MOVW $0x001d, (AX) - MOVW $0xfffb, 2(AX) + LEAL -16842747(R9), R9 + MOVL $0xfffb001d, (AX) MOVB $0xff, 4(AX) ADDQ $0x05, AX JMP emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short_2b repeat_five_match_nolit_encodeBlockAsm_emit_copy_short_2b: - LEAL -65536(R10), R10 - MOVL R10, SI + LEAL -65536(R9), R9 + MOVL R9, BX MOVW $0x001d, (AX) - MOVW R10, 2(AX) - SARL $0x10, SI - MOVB SI, 4(AX) + MOVW R9, 2(AX) + SARL $0x10, BX + MOVB BL, 4(AX) ADDQ $0x05, AX JMP match_nolit_emitcopy_end_encodeBlockAsm repeat_four_match_nolit_encodeBlockAsm_emit_copy_short_2b: - LEAL -256(R10), R10 + LEAL -256(R9), R9 MOVW $0x0019, (AX) - MOVW R10, 2(AX) + MOVW R9, 2(AX) ADDQ $0x04, AX JMP match_nolit_emitcopy_end_encodeBlockAsm repeat_three_match_nolit_encodeBlockAsm_emit_copy_short_2b: - LEAL -4(R10), R10 + LEAL -4(R9), R9 MOVW $0x0015, (AX) - MOVB R10, 2(AX) + MOVB R9, 2(AX) ADDQ $0x03, AX JMP match_nolit_emitcopy_end_encodeBlockAsm repeat_two_match_nolit_encodeBlockAsm_emit_copy_short_2b: - SHLL $0x02, R10 - ORL $0x01, R10 - MOVW R10, (AX) + SHLL $0x02, R9 + ORL $0x01, R9 + MOVW R9, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBlockAsm repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b: - XORQ DI, DI - LEAL 1(DI)(R10*4), R10 - MOVB SI, 1(AX) - SARL $0x08, SI - SHLL $0x05, SI - ORL SI, R10 - MOVB R10, (AX) + XORQ SI, SI + LEAL 1(SI)(R9*4), R9 + MOVB BL, 1(AX) + SARL $0x08, BX + SHLL $0x05, BX + ORL BX, R9 + MOVB R9, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBlockAsm long_offset_short_match_nolit_encodeBlockAsm: MOVB $0xee, (AX) - MOVW SI, 1(AX) - LEAL -60(R10), R10 + MOVW BX, 1(AX) + LEAL -60(R9), R9 ADDQ $0x03, AX // emitRepeat emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short: - MOVL R10, DI - LEAL -4(R10), R10 - CMPL DI, $0x08 + MOVL R9, SI + LEAL -4(R9), R9 + CMPL SI, $0x08 JLE repeat_two_match_nolit_encodeBlockAsm_emit_copy_short - CMPL DI, $0x0c + CMPL SI, $0x0c JGE cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short - CMPL SI, $0x00000800 + CMPL BX, $0x00000800 JLT repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short: - CMPL R10, $0x00000104 + CMPL R9, $0x00000104 JLT repeat_three_match_nolit_encodeBlockAsm_emit_copy_short - CMPL R10, $0x00010100 + CMPL R9, $0x00010100 JLT repeat_four_match_nolit_encodeBlockAsm_emit_copy_short - CMPL R10, $0x0100ffff + CMPL R9, $0x0100ffff JLT repeat_five_match_nolit_encodeBlockAsm_emit_copy_short - LEAL -16842747(R10), R10 - MOVW $0x001d, (AX) - MOVW $0xfffb, 2(AX) + LEAL -16842747(R9), R9 + MOVL $0xfffb001d, (AX) MOVB $0xff, 4(AX) ADDQ $0x05, AX JMP emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short repeat_five_match_nolit_encodeBlockAsm_emit_copy_short: - LEAL -65536(R10), R10 - MOVL R10, SI + LEAL -65536(R9), R9 + MOVL R9, BX MOVW $0x001d, (AX) - MOVW R10, 2(AX) - SARL $0x10, SI - MOVB SI, 4(AX) + MOVW R9, 2(AX) + SARL $0x10, BX + MOVB BL, 4(AX) ADDQ $0x05, AX JMP match_nolit_emitcopy_end_encodeBlockAsm repeat_four_match_nolit_encodeBlockAsm_emit_copy_short: - LEAL -256(R10), R10 + LEAL -256(R9), R9 MOVW $0x0019, (AX) - MOVW R10, 2(AX) + MOVW R9, 2(AX) ADDQ $0x04, AX JMP match_nolit_emitcopy_end_encodeBlockAsm repeat_three_match_nolit_encodeBlockAsm_emit_copy_short: - LEAL -4(R10), R10 + LEAL -4(R9), R9 MOVW $0x0015, (AX) - MOVB R10, 2(AX) + MOVB R9, 2(AX) ADDQ $0x03, AX JMP match_nolit_emitcopy_end_encodeBlockAsm repeat_two_match_nolit_encodeBlockAsm_emit_copy_short: - SHLL $0x02, R10 - ORL $0x01, R10 - MOVW R10, (AX) + SHLL $0x02, R9 + ORL $0x01, R9 + MOVW R9, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBlockAsm repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short: - XORQ DI, DI - LEAL 1(DI)(R10*4), R10 - MOVB SI, 1(AX) - SARL $0x08, SI - SHLL $0x05, SI - ORL SI, R10 - MOVB R10, (AX) + XORQ SI, SI + LEAL 1(SI)(R9*4), R9 + MOVB BL, 1(AX) + SARL $0x08, BX + SHLL $0x05, BX + ORL BX, R9 + MOVB R9, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBlockAsm - JMP two_byte_offset_match_nolit_encodeBlockAsm two_byte_offset_short_match_nolit_encodeBlockAsm: - CMPL R10, $0x0c + MOVL R9, SI + SHLL $0x02, SI + CMPL R9, $0x0c JGE emit_copy_three_match_nolit_encodeBlockAsm - CMPL SI, $0x00000800 + CMPL BX, $0x00000800 JGE emit_copy_three_match_nolit_encodeBlockAsm - MOVB $0x01, BL - LEAL -16(BX)(R10*4), R10 - MOVB SI, 1(AX) - SHRL $0x08, SI - SHLL $0x05, SI - ORL SI, R10 - MOVB R10, (AX) + LEAL -15(SI), SI + MOVB BL, 1(AX) + SHRL $0x08, BX + SHLL $0x05, BX + ORL BX, SI + MOVB SI, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBlockAsm emit_copy_three_match_nolit_encodeBlockAsm: - MOVB $0x02, BL - LEAL -4(BX)(R10*4), R10 - MOVB R10, (AX) - MOVW SI, 1(AX) + LEAL -2(SI), SI + MOVB SI, (AX) + MOVW BX, 1(AX) ADDQ $0x03, AX match_nolit_emitcopy_end_encodeBlockAsm: CMPL CX, 8(SP) JGE emit_remainder_encodeBlockAsm - MOVQ -2(DX)(CX*1), DI + MOVQ -2(DX)(CX*1), SI CMPQ AX, (SP) JL match_nolit_dst_ok_encodeBlockAsm MOVQ $0x00000000, ret+48(FP) RET match_nolit_dst_ok_encodeBlockAsm: - MOVQ $0x0000cf1bbcdcbf9b, R9 - MOVQ DI, R8 - SHRQ $0x10, DI - MOVQ DI, SI - SHLQ $0x10, R8 - IMULQ R9, R8 - SHRQ $0x32, R8 - SHLQ $0x10, SI - IMULQ R9, SI - SHRQ $0x32, SI - LEAL -2(CX), R9 - LEAQ 24(SP)(SI*4), R10 - MOVL (R10), SI - MOVL R9, 24(SP)(R8*4) - MOVL CX, (R10) - CMPL (DX)(SI*1), DI + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ SI, DI + SHRQ $0x10, SI + MOVQ SI, BX + SHLQ $0x10, DI + IMULQ R8, DI + SHRQ $0x32, DI + SHLQ $0x10, BX + IMULQ R8, BX + SHRQ $0x32, BX + LEAL -2(CX), R8 + LEAQ 24(SP)(BX*4), R9 + MOVL (R9), BX + MOVL R8, 24(SP)(DI*4) + MOVL CX, (R9) + CMPL (DX)(BX*1), SI JEQ match_nolit_loop_encodeBlockAsm INCL CX JMP search_loop_encodeBlockAsm @@ -1423,8 +1407,8 @@ zero_loop_encodeBlockAsm4MB: MOVL $0x00000000, 12(SP) MOVQ src_len+32(FP), CX LEAQ -9(CX), DX - LEAQ -8(CX), SI - MOVL SI, 8(SP) + LEAQ -8(CX), BX + MOVL BX, 8(SP) SHRQ $0x05, CX SUBL CX, DX LEAQ (AX)(DX*1), DX @@ -1434,555 +1418,551 @@ zero_loop_encodeBlockAsm4MB: MOVQ src_base+24(FP), DX search_loop_encodeBlockAsm4MB: - MOVL CX, SI - SUBL 12(SP), SI - SHRL $0x06, SI - LEAL 4(CX)(SI*1), SI - CMPL SI, 8(SP) + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x06, BX + LEAL 4(CX)(BX*1), BX + CMPL BX, 8(SP) JGE emit_remainder_encodeBlockAsm4MB - MOVQ (DX)(CX*1), DI - MOVL SI, 20(SP) - MOVQ $0x0000cf1bbcdcbf9b, R9 - MOVQ DI, R10 - MOVQ DI, R11 - SHRQ $0x08, R11 - SHLQ $0x10, R10 - IMULQ R9, R10 - SHRQ $0x32, R10 - SHLQ $0x10, R11 - IMULQ R9, R11 - SHRQ $0x32, R11 - MOVL 24(SP)(R10*4), SI - MOVL 24(SP)(R11*4), R8 - MOVL CX, 24(SP)(R10*4) - LEAL 1(CX), R10 - MOVL R10, 24(SP)(R11*4) - MOVQ DI, R10 - SHRQ $0x10, R10 + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ SI, R9 + MOVQ SI, R10 + SHRQ $0x08, R10 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x32, R9 SHLQ $0x10, R10 - IMULQ R9, R10 + IMULQ R8, R10 SHRQ $0x32, R10 - MOVL CX, R9 - SUBL 16(SP), R9 - MOVL 1(DX)(R9*1), R11 - MOVQ DI, R9 - SHRQ $0x08, R9 - CMPL R9, R11 + MOVL 24(SP)(R9*4), BX + MOVL 24(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + LEAL 1(CX), R9 + MOVL R9, 24(SP)(R10*4) + MOVQ SI, R9 + SHRQ $0x10, R9 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x32, R9 + MOVL CX, R8 + SUBL 16(SP), R8 + MOVL 1(DX)(R8*1), R10 + MOVQ SI, R8 + SHRQ $0x08, R8 + CMPL R8, R10 JNE no_repeat_found_encodeBlockAsm4MB - LEAL 1(CX), DI - MOVL 12(SP), R8 - MOVL DI, SI - SUBL 16(SP), SI + LEAL 1(CX), SI + MOVL 12(SP), DI + MOVL SI, BX + SUBL 16(SP), BX JZ repeat_extend_back_end_encodeBlockAsm4MB repeat_extend_back_loop_encodeBlockAsm4MB: - CMPL DI, R8 + CMPL SI, DI JLE repeat_extend_back_end_encodeBlockAsm4MB - MOVB -1(DX)(SI*1), BL - MOVB -1(DX)(DI*1), R9 - CMPB BL, R9 + MOVB -1(DX)(BX*1), R8 + MOVB -1(DX)(SI*1), R9 + CMPB R8, R9 JNE repeat_extend_back_end_encodeBlockAsm4MB - LEAL -1(DI), DI - DECL SI + LEAL -1(SI), SI + DECL BX JNZ repeat_extend_back_loop_encodeBlockAsm4MB repeat_extend_back_end_encodeBlockAsm4MB: - MOVL 12(SP), SI - CMPL SI, DI + MOVL 12(SP), BX + CMPL BX, SI JEQ emit_literal_done_repeat_emit_encodeBlockAsm4MB - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (DX)(SI*1), R10 - SUBL SI, R9 - LEAL -1(R9), SI - CMPL SI, $0x3c + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c JLT one_byte_repeat_emit_encodeBlockAsm4MB - CMPL SI, $0x00000100 + CMPL BX, $0x00000100 JLT two_bytes_repeat_emit_encodeBlockAsm4MB - CMPL SI, $0x00010000 + CMPL BX, $0x00010000 JLT three_bytes_repeat_emit_encodeBlockAsm4MB - MOVL SI, R11 - SHRL $0x10, R11 + MOVL BX, R10 + SHRL $0x10, R10 MOVB $0xf8, (AX) - MOVW SI, 1(AX) - MOVB R11, 3(AX) + MOVW BX, 1(AX) + MOVB R10, 3(AX) ADDQ $0x04, AX JMP memmove_long_repeat_emit_encodeBlockAsm4MB three_bytes_repeat_emit_encodeBlockAsm4MB: MOVB $0xf4, (AX) - MOVW SI, 1(AX) + MOVW BX, 1(AX) ADDQ $0x03, AX JMP memmove_long_repeat_emit_encodeBlockAsm4MB two_bytes_repeat_emit_encodeBlockAsm4MB: MOVB $0xf0, (AX) - MOVB SI, 1(AX) + MOVB BL, 1(AX) ADDQ $0x02, AX - CMPL SI, $0x40 + CMPL BX, $0x40 JL memmove_repeat_emit_encodeBlockAsm4MB JMP memmove_long_repeat_emit_encodeBlockAsm4MB one_byte_repeat_emit_encodeBlockAsm4MB: - SHLB $0x02, SI - MOVB SI, (AX) + SHLB $0x02, BL + MOVB BL, (AX) ADDQ $0x01, AX memmove_repeat_emit_encodeBlockAsm4MB: - LEAQ (AX)(R9*1), SI + LEAQ (AX)(R8*1), BX // genMemMoveShort - CMPQ R9, $0x08 + CMPQ R8, $0x08 JLE emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8 - CMPQ R9, $0x10 + CMPQ R8, $0x10 JBE emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8through16 - CMPQ R9, $0x20 + CMPQ R8, $0x20 JBE emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_17through32 JMP emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_33through64 emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8: - MOVQ (R10), R11 - MOVQ R11, (AX) + MOVQ (R9), R10 + MOVQ R10, (AX) JMP memmove_end_copy_repeat_emit_encodeBlockAsm4MB emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8through16: - MOVQ (R10), R11 - MOVQ -8(R10)(R9*1), R10 - MOVQ R11, (AX) - MOVQ R10, -8(AX)(R9*1) + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) JMP memmove_end_copy_repeat_emit_encodeBlockAsm4MB emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_17through32: - MOVOU (R10), X0 - MOVOU -16(R10)(R9*1), X1 + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 MOVOU X0, (AX) - MOVOU X1, -16(AX)(R9*1) + MOVOU X1, -16(AX)(R8*1) JMP memmove_end_copy_repeat_emit_encodeBlockAsm4MB emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_33through64: - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) memmove_end_copy_repeat_emit_encodeBlockAsm4MB: - MOVQ SI, AX + MOVQ BX, AX JMP emit_literal_done_repeat_emit_encodeBlockAsm4MB memmove_long_repeat_emit_encodeBlockAsm4MB: - LEAQ (AX)(R9*1), SI + LEAQ (AX)(R8*1), BX // genMemMoveLong - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVQ R9, R12 - SHRQ $0x05, R12 - MOVQ AX, R11 - ANDL $0x0000001f, R11 - MOVQ $0x00000040, R13 - SUBQ R11, R13 - DECQ R12 + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R11 + SHRQ $0x05, R11 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32 - LEAQ -32(R10)(R13*1), R11 - LEAQ -32(AX)(R13*1), R14 + LEAQ -32(R9)(R12*1), R10 + LEAQ -32(AX)(R12*1), R13 emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_big_loop_back: - MOVOU (R11), X4 - MOVOU 16(R11), X5 - MOVOA X4, (R14) - MOVOA X5, 16(R14) - ADDQ $0x20, R14 - ADDQ $0x20, R11 + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) ADDQ $0x20, R13 - DECQ R12 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_big_loop_back emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32: - MOVOU -32(R10)(R13*1), X4 - MOVOU -16(R10)(R13*1), X5 - MOVOA X4, -32(AX)(R13*1) - MOVOA X5, -16(AX)(R13*1) - ADDQ $0x20, R13 - CMPQ R9, R13 + MOVOU -32(R9)(R12*1), X4 + MOVOU -16(R9)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ R8, R12 JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) - MOVQ SI, AX + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX emit_literal_done_repeat_emit_encodeBlockAsm4MB: ADDL $0x05, CX - MOVL CX, SI - SUBL 16(SP), SI - MOVQ src_len+32(FP), R9 - SUBL CX, R9 - LEAQ (DX)(CX*1), R10 - LEAQ (DX)(SI*1), SI + MOVL CX, BX + SUBL 16(SP), BX + MOVQ src_len+32(FP), R8 + SUBL CX, R8 + LEAQ (DX)(CX*1), R9 + LEAQ (DX)(BX*1), BX // matchLen - XORL R12, R12 - CMPL R9, $0x08 + XORL R11, R11 + CMPL R8, $0x08 JL matchlen_match4_repeat_extend_encodeBlockAsm4MB matchlen_loopback_repeat_extend_encodeBlockAsm4MB: - MOVQ (R10)(R12*1), R11 - XORQ (SI)(R12*1), R11 - TESTQ R11, R11 + MOVQ (R9)(R11*1), R10 + XORQ (BX)(R11*1), R10 + TESTQ R10, R10 JZ matchlen_loop_repeat_extend_encodeBlockAsm4MB #ifdef GOAMD64_v3 - TZCNTQ R11, R11 + TZCNTQ R10, R10 #else - BSFQ R11, R11 + BSFQ R10, R10 #endif - SARQ $0x03, R11 - LEAL (R12)(R11*1), R12 + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 JMP repeat_extend_forward_end_encodeBlockAsm4MB matchlen_loop_repeat_extend_encodeBlockAsm4MB: - LEAL -8(R9), R9 - LEAL 8(R12), R12 - CMPL R9, $0x08 + LEAL -8(R8), R8 + LEAL 8(R11), R11 + CMPL R8, $0x08 JGE matchlen_loopback_repeat_extend_encodeBlockAsm4MB JZ repeat_extend_forward_end_encodeBlockAsm4MB matchlen_match4_repeat_extend_encodeBlockAsm4MB: - CMPL R9, $0x04 + CMPL R8, $0x04 JL matchlen_match2_repeat_extend_encodeBlockAsm4MB - MOVL (R10)(R12*1), R11 - CMPL (SI)(R12*1), R11 + MOVL (R9)(R11*1), R10 + CMPL (BX)(R11*1), R10 JNE matchlen_match2_repeat_extend_encodeBlockAsm4MB - SUBL $0x04, R9 - LEAL 4(R12), R12 + SUBL $0x04, R8 + LEAL 4(R11), R11 matchlen_match2_repeat_extend_encodeBlockAsm4MB: - CMPL R9, $0x02 + CMPL R8, $0x02 JL matchlen_match1_repeat_extend_encodeBlockAsm4MB - MOVW (R10)(R12*1), R11 - CMPW (SI)(R12*1), R11 + MOVW (R9)(R11*1), R10 + CMPW (BX)(R11*1), R10 JNE matchlen_match1_repeat_extend_encodeBlockAsm4MB - SUBL $0x02, R9 - LEAL 2(R12), R12 + SUBL $0x02, R8 + LEAL 2(R11), R11 matchlen_match1_repeat_extend_encodeBlockAsm4MB: - CMPL R9, $0x01 + CMPL R8, $0x01 JL repeat_extend_forward_end_encodeBlockAsm4MB - MOVB (R10)(R12*1), R11 - CMPB (SI)(R12*1), R11 + MOVB (R9)(R11*1), R10 + CMPB (BX)(R11*1), R10 JNE repeat_extend_forward_end_encodeBlockAsm4MB - LEAL 1(R12), R12 + LEAL 1(R11), R11 repeat_extend_forward_end_encodeBlockAsm4MB: - ADDL R12, CX - MOVL CX, SI - SUBL DI, SI - MOVL 16(SP), DI - TESTL R8, R8 + ADDL R11, CX + MOVL CX, BX + SUBL SI, BX + MOVL 16(SP), SI + TESTL DI, DI JZ repeat_as_copy_encodeBlockAsm4MB // emitRepeat - MOVL SI, R8 - LEAL -4(SI), SI - CMPL R8, $0x08 + MOVL BX, DI + LEAL -4(BX), BX + CMPL DI, $0x08 JLE repeat_two_match_repeat_encodeBlockAsm4MB - CMPL R8, $0x0c + CMPL DI, $0x0c JGE cant_repeat_two_offset_match_repeat_encodeBlockAsm4MB - CMPL DI, $0x00000800 + CMPL SI, $0x00000800 JLT repeat_two_offset_match_repeat_encodeBlockAsm4MB cant_repeat_two_offset_match_repeat_encodeBlockAsm4MB: - CMPL SI, $0x00000104 + CMPL BX, $0x00000104 JLT repeat_three_match_repeat_encodeBlockAsm4MB - CMPL SI, $0x00010100 + CMPL BX, $0x00010100 JLT repeat_four_match_repeat_encodeBlockAsm4MB - LEAL -65536(SI), SI - MOVL SI, DI + LEAL -65536(BX), BX + MOVL BX, SI MOVW $0x001d, (AX) - MOVW SI, 2(AX) - SARL $0x10, DI - MOVB DI, 4(AX) + MOVW BX, 2(AX) + SARL $0x10, SI + MOVB SI, 4(AX) ADDQ $0x05, AX JMP repeat_end_emit_encodeBlockAsm4MB repeat_four_match_repeat_encodeBlockAsm4MB: - LEAL -256(SI), SI + LEAL -256(BX), BX MOVW $0x0019, (AX) - MOVW SI, 2(AX) + MOVW BX, 2(AX) ADDQ $0x04, AX JMP repeat_end_emit_encodeBlockAsm4MB repeat_three_match_repeat_encodeBlockAsm4MB: - LEAL -4(SI), SI + LEAL -4(BX), BX MOVW $0x0015, (AX) - MOVB SI, 2(AX) + MOVB BL, 2(AX) ADDQ $0x03, AX JMP repeat_end_emit_encodeBlockAsm4MB repeat_two_match_repeat_encodeBlockAsm4MB: - SHLL $0x02, SI - ORL $0x01, SI - MOVW SI, (AX) + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) ADDQ $0x02, AX JMP repeat_end_emit_encodeBlockAsm4MB repeat_two_offset_match_repeat_encodeBlockAsm4MB: - XORQ R8, R8 - LEAL 1(R8)(SI*4), SI - MOVB DI, 1(AX) - SARL $0x08, DI - SHLL $0x05, DI - ORL DI, SI - MOVB SI, (AX) + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) ADDQ $0x02, AX JMP repeat_end_emit_encodeBlockAsm4MB repeat_as_copy_encodeBlockAsm4MB: // emitCopy - CMPL DI, $0x00010000 + CMPL SI, $0x00010000 JL two_byte_offset_repeat_as_copy_encodeBlockAsm4MB - -four_bytes_loop_back_repeat_as_copy_encodeBlockAsm4MB: - CMPL SI, $0x40 + CMPL BX, $0x40 JLE four_bytes_remain_repeat_as_copy_encodeBlockAsm4MB MOVB $0xff, (AX) - MOVL DI, 1(AX) - LEAL -64(SI), SI + MOVL SI, 1(AX) + LEAL -64(BX), BX ADDQ $0x05, AX - CMPL SI, $0x04 + CMPL BX, $0x04 JL four_bytes_remain_repeat_as_copy_encodeBlockAsm4MB // emitRepeat - MOVL SI, R8 - LEAL -4(SI), SI - CMPL R8, $0x08 + MOVL BX, DI + LEAL -4(BX), BX + CMPL DI, $0x08 JLE repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy - CMPL R8, $0x0c + CMPL DI, $0x0c JGE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy - CMPL DI, $0x00000800 + CMPL SI, $0x00000800 JLT repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy: - CMPL SI, $0x00000104 + CMPL BX, $0x00000104 JLT repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy - CMPL SI, $0x00010100 + CMPL BX, $0x00010100 JLT repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy - LEAL -65536(SI), SI - MOVL SI, DI + LEAL -65536(BX), BX + MOVL BX, SI MOVW $0x001d, (AX) - MOVW SI, 2(AX) - SARL $0x10, DI - MOVB DI, 4(AX) + MOVW BX, 2(AX) + SARL $0x10, SI + MOVB SI, 4(AX) ADDQ $0x05, AX JMP repeat_end_emit_encodeBlockAsm4MB repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy: - LEAL -256(SI), SI + LEAL -256(BX), BX MOVW $0x0019, (AX) - MOVW SI, 2(AX) + MOVW BX, 2(AX) ADDQ $0x04, AX JMP repeat_end_emit_encodeBlockAsm4MB repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy: - LEAL -4(SI), SI + LEAL -4(BX), BX MOVW $0x0015, (AX) - MOVB SI, 2(AX) + MOVB BL, 2(AX) ADDQ $0x03, AX JMP repeat_end_emit_encodeBlockAsm4MB repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy: - SHLL $0x02, SI - ORL $0x01, SI - MOVW SI, (AX) + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) ADDQ $0x02, AX JMP repeat_end_emit_encodeBlockAsm4MB repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy: - XORQ R8, R8 - LEAL 1(R8)(SI*4), SI - MOVB DI, 1(AX) - SARL $0x08, DI - SHLL $0x05, DI - ORL DI, SI - MOVB SI, (AX) + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) ADDQ $0x02, AX JMP repeat_end_emit_encodeBlockAsm4MB - JMP four_bytes_loop_back_repeat_as_copy_encodeBlockAsm4MB four_bytes_remain_repeat_as_copy_encodeBlockAsm4MB: - TESTL SI, SI + TESTL BX, BX JZ repeat_end_emit_encodeBlockAsm4MB - MOVB $0x03, BL - LEAL -4(BX)(SI*4), SI - MOVB SI, (AX) - MOVL DI, 1(AX) + XORL DI, DI + LEAL -1(DI)(BX*4), BX + MOVB BL, (AX) + MOVL SI, 1(AX) ADDQ $0x05, AX JMP repeat_end_emit_encodeBlockAsm4MB two_byte_offset_repeat_as_copy_encodeBlockAsm4MB: - CMPL SI, $0x40 + CMPL BX, $0x40 JLE two_byte_offset_short_repeat_as_copy_encodeBlockAsm4MB - CMPL DI, $0x00000800 + CMPL SI, $0x00000800 JAE long_offset_short_repeat_as_copy_encodeBlockAsm4MB - MOVL $0x00000001, R8 - LEAL 16(R8), R8 - MOVB DI, 1(AX) - SHRL $0x08, DI - SHLL $0x05, DI - ORL DI, R8 - MOVB R8, (AX) + MOVL $0x00000001, DI + LEAL 16(DI), DI + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (AX) ADDQ $0x02, AX - SUBL $0x08, SI + SUBL $0x08, BX // emitRepeat - LEAL -4(SI), SI + LEAL -4(BX), BX JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b - MOVL SI, R8 - LEAL -4(SI), SI - CMPL R8, $0x08 + MOVL BX, DI + LEAL -4(BX), BX + CMPL DI, $0x08 JLE repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b - CMPL R8, $0x0c + CMPL DI, $0x0c JGE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b - CMPL DI, $0x00000800 + CMPL SI, $0x00000800 JLT repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b: - CMPL SI, $0x00000104 + CMPL BX, $0x00000104 JLT repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b - CMPL SI, $0x00010100 + CMPL BX, $0x00010100 JLT repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b - LEAL -65536(SI), SI - MOVL SI, DI + LEAL -65536(BX), BX + MOVL BX, SI MOVW $0x001d, (AX) - MOVW SI, 2(AX) - SARL $0x10, DI - MOVB DI, 4(AX) + MOVW BX, 2(AX) + SARL $0x10, SI + MOVB SI, 4(AX) ADDQ $0x05, AX JMP repeat_end_emit_encodeBlockAsm4MB repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b: - LEAL -256(SI), SI + LEAL -256(BX), BX MOVW $0x0019, (AX) - MOVW SI, 2(AX) + MOVW BX, 2(AX) ADDQ $0x04, AX JMP repeat_end_emit_encodeBlockAsm4MB repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b: - LEAL -4(SI), SI + LEAL -4(BX), BX MOVW $0x0015, (AX) - MOVB SI, 2(AX) + MOVB BL, 2(AX) ADDQ $0x03, AX JMP repeat_end_emit_encodeBlockAsm4MB repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b: - SHLL $0x02, SI - ORL $0x01, SI - MOVW SI, (AX) + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) ADDQ $0x02, AX JMP repeat_end_emit_encodeBlockAsm4MB repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b: - XORQ R8, R8 - LEAL 1(R8)(SI*4), SI - MOVB DI, 1(AX) - SARL $0x08, DI - SHLL $0x05, DI - ORL DI, SI - MOVB SI, (AX) + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) ADDQ $0x02, AX JMP repeat_end_emit_encodeBlockAsm4MB long_offset_short_repeat_as_copy_encodeBlockAsm4MB: MOVB $0xee, (AX) - MOVW DI, 1(AX) - LEAL -60(SI), SI + MOVW SI, 1(AX) + LEAL -60(BX), BX ADDQ $0x03, AX // emitRepeat - MOVL SI, R8 - LEAL -4(SI), SI - CMPL R8, $0x08 + MOVL BX, DI + LEAL -4(BX), BX + CMPL DI, $0x08 JLE repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short - CMPL R8, $0x0c + CMPL DI, $0x0c JGE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short - CMPL DI, $0x00000800 + CMPL SI, $0x00000800 JLT repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short: - CMPL SI, $0x00000104 + CMPL BX, $0x00000104 JLT repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short - CMPL SI, $0x00010100 + CMPL BX, $0x00010100 JLT repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short - LEAL -65536(SI), SI - MOVL SI, DI + LEAL -65536(BX), BX + MOVL BX, SI MOVW $0x001d, (AX) - MOVW SI, 2(AX) - SARL $0x10, DI - MOVB DI, 4(AX) + MOVW BX, 2(AX) + SARL $0x10, SI + MOVB SI, 4(AX) ADDQ $0x05, AX JMP repeat_end_emit_encodeBlockAsm4MB repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short: - LEAL -256(SI), SI + LEAL -256(BX), BX MOVW $0x0019, (AX) - MOVW SI, 2(AX) + MOVW BX, 2(AX) ADDQ $0x04, AX JMP repeat_end_emit_encodeBlockAsm4MB repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short: - LEAL -4(SI), SI + LEAL -4(BX), BX MOVW $0x0015, (AX) - MOVB SI, 2(AX) + MOVB BL, 2(AX) ADDQ $0x03, AX JMP repeat_end_emit_encodeBlockAsm4MB repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short: - SHLL $0x02, SI - ORL $0x01, SI - MOVW SI, (AX) + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) ADDQ $0x02, AX JMP repeat_end_emit_encodeBlockAsm4MB repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short: - XORQ R8, R8 - LEAL 1(R8)(SI*4), SI - MOVB DI, 1(AX) - SARL $0x08, DI - SHLL $0x05, DI - ORL DI, SI - MOVB SI, (AX) + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) ADDQ $0x02, AX JMP repeat_end_emit_encodeBlockAsm4MB - JMP two_byte_offset_repeat_as_copy_encodeBlockAsm4MB two_byte_offset_short_repeat_as_copy_encodeBlockAsm4MB: - CMPL SI, $0x0c + MOVL BX, DI + SHLL $0x02, DI + CMPL BX, $0x0c JGE emit_copy_three_repeat_as_copy_encodeBlockAsm4MB - CMPL DI, $0x00000800 + CMPL SI, $0x00000800 JGE emit_copy_three_repeat_as_copy_encodeBlockAsm4MB - MOVB $0x01, BL - LEAL -16(BX)(SI*4), SI - MOVB DI, 1(AX) - SHRL $0x08, DI - SHLL $0x05, DI - ORL DI, SI - MOVB SI, (AX) + LEAL -15(DI), DI + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (AX) ADDQ $0x02, AX JMP repeat_end_emit_encodeBlockAsm4MB emit_copy_three_repeat_as_copy_encodeBlockAsm4MB: - MOVB $0x02, BL - LEAL -4(BX)(SI*4), SI - MOVB SI, (AX) - MOVW DI, 1(AX) + LEAL -2(DI), DI + MOVB DI, (AX) + MOVW SI, 1(AX) ADDQ $0x03, AX repeat_end_emit_encodeBlockAsm4MB: @@ -1990,16 +1970,16 @@ repeat_end_emit_encodeBlockAsm4MB: JMP search_loop_encodeBlockAsm4MB no_repeat_found_encodeBlockAsm4MB: - CMPL (DX)(SI*1), DI + CMPL (DX)(BX*1), SI JEQ candidate_match_encodeBlockAsm4MB - SHRQ $0x08, DI - MOVL 24(SP)(R10*4), SI - LEAL 2(CX), R9 - CMPL (DX)(R8*1), DI + SHRQ $0x08, SI + MOVL 24(SP)(R9*4), BX + LEAL 2(CX), R8 + CMPL (DX)(DI*1), SI JEQ candidate2_match_encodeBlockAsm4MB - MOVL R9, 24(SP)(R10*4) - SHRQ $0x08, DI - CMPL (DX)(SI*1), DI + MOVL R8, 24(SP)(R9*4) + SHRQ $0x08, SI + CMPL (DX)(BX*1), SI JEQ candidate3_match_encodeBlockAsm4MB MOVL 20(SP), CX JMP search_loop_encodeBlockAsm4MB @@ -2009,506 +1989,502 @@ candidate3_match_encodeBlockAsm4MB: JMP candidate_match_encodeBlockAsm4MB candidate2_match_encodeBlockAsm4MB: - MOVL R9, 24(SP)(R10*4) + MOVL R8, 24(SP)(R9*4) INCL CX - MOVL R8, SI + MOVL DI, BX candidate_match_encodeBlockAsm4MB: - MOVL 12(SP), DI - TESTL SI, SI + MOVL 12(SP), SI + TESTL BX, BX JZ match_extend_back_end_encodeBlockAsm4MB match_extend_back_loop_encodeBlockAsm4MB: - CMPL CX, DI + CMPL CX, SI JLE match_extend_back_end_encodeBlockAsm4MB - MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(BX*1), DI MOVB -1(DX)(CX*1), R8 - CMPB BL, R8 + CMPB DI, R8 JNE match_extend_back_end_encodeBlockAsm4MB LEAL -1(CX), CX - DECL SI + DECL BX JZ match_extend_back_end_encodeBlockAsm4MB JMP match_extend_back_loop_encodeBlockAsm4MB match_extend_back_end_encodeBlockAsm4MB: - MOVL CX, DI - SUBL 12(SP), DI - LEAQ 4(AX)(DI*1), DI - CMPQ DI, (SP) + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 4(AX)(SI*1), SI + CMPQ SI, (SP) JL match_dst_size_check_encodeBlockAsm4MB MOVQ $0x00000000, ret+48(FP) RET match_dst_size_check_encodeBlockAsm4MB: - MOVL CX, DI - MOVL 12(SP), R8 - CMPL R8, DI - JEQ emit_literal_done_match_emit_encodeBlockAsm4MB - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (DX)(R8*1), DI - SUBL R8, R9 - LEAL -1(R9), R8 - CMPL R8, $0x3c + MOVL CX, SI + MOVL 12(SP), DI + CMPL DI, SI + JEQ emit_literal_done_match_emit_encodeBlockAsm4MB + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(DI*1), SI + SUBL DI, R8 + LEAL -1(R8), DI + CMPL DI, $0x3c JLT one_byte_match_emit_encodeBlockAsm4MB - CMPL R8, $0x00000100 + CMPL DI, $0x00000100 JLT two_bytes_match_emit_encodeBlockAsm4MB - CMPL R8, $0x00010000 + CMPL DI, $0x00010000 JLT three_bytes_match_emit_encodeBlockAsm4MB - MOVL R8, R10 - SHRL $0x10, R10 + MOVL DI, R9 + SHRL $0x10, R9 MOVB $0xf8, (AX) - MOVW R8, 1(AX) - MOVB R10, 3(AX) + MOVW DI, 1(AX) + MOVB R9, 3(AX) ADDQ $0x04, AX JMP memmove_long_match_emit_encodeBlockAsm4MB three_bytes_match_emit_encodeBlockAsm4MB: MOVB $0xf4, (AX) - MOVW R8, 1(AX) + MOVW DI, 1(AX) ADDQ $0x03, AX JMP memmove_long_match_emit_encodeBlockAsm4MB two_bytes_match_emit_encodeBlockAsm4MB: MOVB $0xf0, (AX) - MOVB R8, 1(AX) + MOVB DI, 1(AX) ADDQ $0x02, AX - CMPL R8, $0x40 + CMPL DI, $0x40 JL memmove_match_emit_encodeBlockAsm4MB JMP memmove_long_match_emit_encodeBlockAsm4MB one_byte_match_emit_encodeBlockAsm4MB: - SHLB $0x02, R8 - MOVB R8, (AX) + SHLB $0x02, DI + MOVB DI, (AX) ADDQ $0x01, AX memmove_match_emit_encodeBlockAsm4MB: - LEAQ (AX)(R9*1), R8 + LEAQ (AX)(R8*1), DI // genMemMoveShort - CMPQ R9, $0x08 + CMPQ R8, $0x08 JLE emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8 - CMPQ R9, $0x10 + CMPQ R8, $0x10 JBE emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8through16 - CMPQ R9, $0x20 + CMPQ R8, $0x20 JBE emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_17through32 JMP emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_33through64 emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8: - MOVQ (DI), R10 - MOVQ R10, (AX) + MOVQ (SI), R9 + MOVQ R9, (AX) JMP memmove_end_copy_match_emit_encodeBlockAsm4MB emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8through16: - MOVQ (DI), R10 - MOVQ -8(DI)(R9*1), DI - MOVQ R10, (AX) - MOVQ DI, -8(AX)(R9*1) + MOVQ (SI), R9 + MOVQ -8(SI)(R8*1), SI + MOVQ R9, (AX) + MOVQ SI, -8(AX)(R8*1) JMP memmove_end_copy_match_emit_encodeBlockAsm4MB emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_17through32: - MOVOU (DI), X0 - MOVOU -16(DI)(R9*1), X1 + MOVOU (SI), X0 + MOVOU -16(SI)(R8*1), X1 MOVOU X0, (AX) - MOVOU X1, -16(AX)(R9*1) + MOVOU X1, -16(AX)(R8*1) JMP memmove_end_copy_match_emit_encodeBlockAsm4MB emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_33through64: - MOVOU (DI), X0 - MOVOU 16(DI), X1 - MOVOU -32(DI)(R9*1), X2 - MOVOU -16(DI)(R9*1), X3 + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) memmove_end_copy_match_emit_encodeBlockAsm4MB: - MOVQ R8, AX + MOVQ DI, AX JMP emit_literal_done_match_emit_encodeBlockAsm4MB memmove_long_match_emit_encodeBlockAsm4MB: - LEAQ (AX)(R9*1), R8 + LEAQ (AX)(R8*1), DI // genMemMoveLong - MOVOU (DI), X0 - MOVOU 16(DI), X1 - MOVOU -32(DI)(R9*1), X2 - MOVOU -16(DI)(R9*1), X3 - MOVQ R9, R11 - SHRQ $0x05, R11 - MOVQ AX, R10 - ANDL $0x0000001f, R10 - MOVQ $0x00000040, R12 - SUBQ R10, R12 - DECQ R11 + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVQ R8, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R11 + SUBQ R9, R11 + DECQ R10 JA emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32 - LEAQ -32(DI)(R12*1), R10 - LEAQ -32(AX)(R12*1), R13 + LEAQ -32(SI)(R11*1), R9 + LEAQ -32(AX)(R11*1), R12 emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_big_loop_back: - MOVOU (R10), X4 - MOVOU 16(R10), X5 - MOVOA X4, (R13) - MOVOA X5, 16(R13) - ADDQ $0x20, R13 - ADDQ $0x20, R10 + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R12) + MOVOA X5, 16(R12) ADDQ $0x20, R12 - DECQ R11 + ADDQ $0x20, R9 + ADDQ $0x20, R11 + DECQ R10 JNA emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_big_loop_back emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32: - MOVOU -32(DI)(R12*1), X4 - MOVOU -16(DI)(R12*1), X5 - MOVOA X4, -32(AX)(R12*1) - MOVOA X5, -16(AX)(R12*1) - ADDQ $0x20, R12 - CMPQ R9, R12 + MOVOU -32(SI)(R11*1), X4 + MOVOU -16(SI)(R11*1), X5 + MOVOA X4, -32(AX)(R11*1) + MOVOA X5, -16(AX)(R11*1) + ADDQ $0x20, R11 + CMPQ R8, R11 JAE emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) - MOVQ R8, AX + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ DI, AX emit_literal_done_match_emit_encodeBlockAsm4MB: match_nolit_loop_encodeBlockAsm4MB: - MOVL CX, DI - SUBL SI, DI - MOVL DI, 16(SP) + MOVL CX, SI + SUBL BX, SI + MOVL SI, 16(SP) ADDL $0x04, CX - ADDL $0x04, SI - MOVQ src_len+32(FP), DI - SUBL CX, DI - LEAQ (DX)(CX*1), R8 - LEAQ (DX)(SI*1), SI + ADDL $0x04, BX + MOVQ src_len+32(FP), SI + SUBL CX, SI + LEAQ (DX)(CX*1), DI + LEAQ (DX)(BX*1), BX // matchLen - XORL R10, R10 - CMPL DI, $0x08 + XORL R9, R9 + CMPL SI, $0x08 JL matchlen_match4_match_nolit_encodeBlockAsm4MB matchlen_loopback_match_nolit_encodeBlockAsm4MB: - MOVQ (R8)(R10*1), R9 - XORQ (SI)(R10*1), R9 - TESTQ R9, R9 + MOVQ (DI)(R9*1), R8 + XORQ (BX)(R9*1), R8 + TESTQ R8, R8 JZ matchlen_loop_match_nolit_encodeBlockAsm4MB #ifdef GOAMD64_v3 - TZCNTQ R9, R9 + TZCNTQ R8, R8 #else - BSFQ R9, R9 + BSFQ R8, R8 #endif - SARQ $0x03, R9 - LEAL (R10)(R9*1), R10 + SARQ $0x03, R8 + LEAL (R9)(R8*1), R9 JMP match_nolit_end_encodeBlockAsm4MB matchlen_loop_match_nolit_encodeBlockAsm4MB: - LEAL -8(DI), DI - LEAL 8(R10), R10 - CMPL DI, $0x08 + LEAL -8(SI), SI + LEAL 8(R9), R9 + CMPL SI, $0x08 JGE matchlen_loopback_match_nolit_encodeBlockAsm4MB JZ match_nolit_end_encodeBlockAsm4MB matchlen_match4_match_nolit_encodeBlockAsm4MB: - CMPL DI, $0x04 + CMPL SI, $0x04 JL matchlen_match2_match_nolit_encodeBlockAsm4MB - MOVL (R8)(R10*1), R9 - CMPL (SI)(R10*1), R9 + MOVL (DI)(R9*1), R8 + CMPL (BX)(R9*1), R8 JNE matchlen_match2_match_nolit_encodeBlockAsm4MB - SUBL $0x04, DI - LEAL 4(R10), R10 + SUBL $0x04, SI + LEAL 4(R9), R9 matchlen_match2_match_nolit_encodeBlockAsm4MB: - CMPL DI, $0x02 + CMPL SI, $0x02 JL matchlen_match1_match_nolit_encodeBlockAsm4MB - MOVW (R8)(R10*1), R9 - CMPW (SI)(R10*1), R9 + MOVW (DI)(R9*1), R8 + CMPW (BX)(R9*1), R8 JNE matchlen_match1_match_nolit_encodeBlockAsm4MB - SUBL $0x02, DI - LEAL 2(R10), R10 + SUBL $0x02, SI + LEAL 2(R9), R9 matchlen_match1_match_nolit_encodeBlockAsm4MB: - CMPL DI, $0x01 + CMPL SI, $0x01 JL match_nolit_end_encodeBlockAsm4MB - MOVB (R8)(R10*1), R9 - CMPB (SI)(R10*1), R9 + MOVB (DI)(R9*1), R8 + CMPB (BX)(R9*1), R8 JNE match_nolit_end_encodeBlockAsm4MB - LEAL 1(R10), R10 + LEAL 1(R9), R9 match_nolit_end_encodeBlockAsm4MB: - ADDL R10, CX - MOVL 16(SP), SI - ADDL $0x04, R10 + ADDL R9, CX + MOVL 16(SP), BX + ADDL $0x04, R9 MOVL CX, 12(SP) // emitCopy - CMPL SI, $0x00010000 + CMPL BX, $0x00010000 JL two_byte_offset_match_nolit_encodeBlockAsm4MB - -four_bytes_loop_back_match_nolit_encodeBlockAsm4MB: - CMPL R10, $0x40 + CMPL R9, $0x40 JLE four_bytes_remain_match_nolit_encodeBlockAsm4MB MOVB $0xff, (AX) - MOVL SI, 1(AX) - LEAL -64(R10), R10 + MOVL BX, 1(AX) + LEAL -64(R9), R9 ADDQ $0x05, AX - CMPL R10, $0x04 + CMPL R9, $0x04 JL four_bytes_remain_match_nolit_encodeBlockAsm4MB // emitRepeat - MOVL R10, DI - LEAL -4(R10), R10 - CMPL DI, $0x08 + MOVL R9, SI + LEAL -4(R9), R9 + CMPL SI, $0x08 JLE repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy - CMPL DI, $0x0c + CMPL SI, $0x0c JGE cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy - CMPL SI, $0x00000800 + CMPL BX, $0x00000800 JLT repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy: - CMPL R10, $0x00000104 + CMPL R9, $0x00000104 JLT repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy - CMPL R10, $0x00010100 + CMPL R9, $0x00010100 JLT repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy - LEAL -65536(R10), R10 - MOVL R10, SI + LEAL -65536(R9), R9 + MOVL R9, BX MOVW $0x001d, (AX) - MOVW R10, 2(AX) - SARL $0x10, SI - MOVB SI, 4(AX) + MOVW R9, 2(AX) + SARL $0x10, BX + MOVB BL, 4(AX) ADDQ $0x05, AX JMP match_nolit_emitcopy_end_encodeBlockAsm4MB repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy: - LEAL -256(R10), R10 + LEAL -256(R9), R9 MOVW $0x0019, (AX) - MOVW R10, 2(AX) + MOVW R9, 2(AX) ADDQ $0x04, AX JMP match_nolit_emitcopy_end_encodeBlockAsm4MB repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy: - LEAL -4(R10), R10 + LEAL -4(R9), R9 MOVW $0x0015, (AX) - MOVB R10, 2(AX) + MOVB R9, 2(AX) ADDQ $0x03, AX JMP match_nolit_emitcopy_end_encodeBlockAsm4MB repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy: - SHLL $0x02, R10 - ORL $0x01, R10 - MOVW R10, (AX) + SHLL $0x02, R9 + ORL $0x01, R9 + MOVW R9, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBlockAsm4MB repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy: - XORQ DI, DI - LEAL 1(DI)(R10*4), R10 - MOVB SI, 1(AX) - SARL $0x08, SI - SHLL $0x05, SI - ORL SI, R10 - MOVB R10, (AX) + XORQ SI, SI + LEAL 1(SI)(R9*4), R9 + MOVB BL, 1(AX) + SARL $0x08, BX + SHLL $0x05, BX + ORL BX, R9 + MOVB R9, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBlockAsm4MB - JMP four_bytes_loop_back_match_nolit_encodeBlockAsm4MB four_bytes_remain_match_nolit_encodeBlockAsm4MB: - TESTL R10, R10 + TESTL R9, R9 JZ match_nolit_emitcopy_end_encodeBlockAsm4MB - MOVB $0x03, BL - LEAL -4(BX)(R10*4), R10 - MOVB R10, (AX) - MOVL SI, 1(AX) + XORL SI, SI + LEAL -1(SI)(R9*4), R9 + MOVB R9, (AX) + MOVL BX, 1(AX) ADDQ $0x05, AX JMP match_nolit_emitcopy_end_encodeBlockAsm4MB two_byte_offset_match_nolit_encodeBlockAsm4MB: - CMPL R10, $0x40 + CMPL R9, $0x40 JLE two_byte_offset_short_match_nolit_encodeBlockAsm4MB - CMPL SI, $0x00000800 + CMPL BX, $0x00000800 JAE long_offset_short_match_nolit_encodeBlockAsm4MB - MOVL $0x00000001, DI - LEAL 16(DI), DI - MOVB SI, 1(AX) - SHRL $0x08, SI - SHLL $0x05, SI - ORL SI, DI - MOVB DI, (AX) + MOVL $0x00000001, SI + LEAL 16(SI), SI + MOVB BL, 1(AX) + SHRL $0x08, BX + SHLL $0x05, BX + ORL BX, SI + MOVB SI, (AX) ADDQ $0x02, AX - SUBL $0x08, R10 + SUBL $0x08, R9 // emitRepeat - LEAL -4(R10), R10 + LEAL -4(R9), R9 JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b - MOVL R10, DI - LEAL -4(R10), R10 - CMPL DI, $0x08 + MOVL R9, SI + LEAL -4(R9), R9 + CMPL SI, $0x08 JLE repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b - CMPL DI, $0x0c + CMPL SI, $0x0c JGE cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b - CMPL SI, $0x00000800 + CMPL BX, $0x00000800 JLT repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b: - CMPL R10, $0x00000104 + CMPL R9, $0x00000104 JLT repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b - CMPL R10, $0x00010100 + CMPL R9, $0x00010100 JLT repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b - LEAL -65536(R10), R10 - MOVL R10, SI + LEAL -65536(R9), R9 + MOVL R9, BX MOVW $0x001d, (AX) - MOVW R10, 2(AX) - SARL $0x10, SI - MOVB SI, 4(AX) + MOVW R9, 2(AX) + SARL $0x10, BX + MOVB BL, 4(AX) ADDQ $0x05, AX JMP match_nolit_emitcopy_end_encodeBlockAsm4MB repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b: - LEAL -256(R10), R10 + LEAL -256(R9), R9 MOVW $0x0019, (AX) - MOVW R10, 2(AX) + MOVW R9, 2(AX) ADDQ $0x04, AX JMP match_nolit_emitcopy_end_encodeBlockAsm4MB repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b: - LEAL -4(R10), R10 + LEAL -4(R9), R9 MOVW $0x0015, (AX) - MOVB R10, 2(AX) + MOVB R9, 2(AX) ADDQ $0x03, AX JMP match_nolit_emitcopy_end_encodeBlockAsm4MB repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b: - SHLL $0x02, R10 - ORL $0x01, R10 - MOVW R10, (AX) + SHLL $0x02, R9 + ORL $0x01, R9 + MOVW R9, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBlockAsm4MB repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b: - XORQ DI, DI - LEAL 1(DI)(R10*4), R10 - MOVB SI, 1(AX) - SARL $0x08, SI - SHLL $0x05, SI - ORL SI, R10 - MOVB R10, (AX) + XORQ SI, SI + LEAL 1(SI)(R9*4), R9 + MOVB BL, 1(AX) + SARL $0x08, BX + SHLL $0x05, BX + ORL BX, R9 + MOVB R9, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBlockAsm4MB long_offset_short_match_nolit_encodeBlockAsm4MB: MOVB $0xee, (AX) - MOVW SI, 1(AX) - LEAL -60(R10), R10 + MOVW BX, 1(AX) + LEAL -60(R9), R9 ADDQ $0x03, AX // emitRepeat - MOVL R10, DI - LEAL -4(R10), R10 - CMPL DI, $0x08 + MOVL R9, SI + LEAL -4(R9), R9 + CMPL SI, $0x08 JLE repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short - CMPL DI, $0x0c + CMPL SI, $0x0c JGE cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short - CMPL SI, $0x00000800 + CMPL BX, $0x00000800 JLT repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short: - CMPL R10, $0x00000104 + CMPL R9, $0x00000104 JLT repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short - CMPL R10, $0x00010100 + CMPL R9, $0x00010100 JLT repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short - LEAL -65536(R10), R10 - MOVL R10, SI + LEAL -65536(R9), R9 + MOVL R9, BX MOVW $0x001d, (AX) - MOVW R10, 2(AX) - SARL $0x10, SI - MOVB SI, 4(AX) + MOVW R9, 2(AX) + SARL $0x10, BX + MOVB BL, 4(AX) ADDQ $0x05, AX JMP match_nolit_emitcopy_end_encodeBlockAsm4MB repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short: - LEAL -256(R10), R10 + LEAL -256(R9), R9 MOVW $0x0019, (AX) - MOVW R10, 2(AX) + MOVW R9, 2(AX) ADDQ $0x04, AX JMP match_nolit_emitcopy_end_encodeBlockAsm4MB repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short: - LEAL -4(R10), R10 + LEAL -4(R9), R9 MOVW $0x0015, (AX) - MOVB R10, 2(AX) + MOVB R9, 2(AX) ADDQ $0x03, AX JMP match_nolit_emitcopy_end_encodeBlockAsm4MB repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short: - SHLL $0x02, R10 - ORL $0x01, R10 - MOVW R10, (AX) + SHLL $0x02, R9 + ORL $0x01, R9 + MOVW R9, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBlockAsm4MB repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short: - XORQ DI, DI - LEAL 1(DI)(R10*4), R10 - MOVB SI, 1(AX) - SARL $0x08, SI - SHLL $0x05, SI - ORL SI, R10 - MOVB R10, (AX) + XORQ SI, SI + LEAL 1(SI)(R9*4), R9 + MOVB BL, 1(AX) + SARL $0x08, BX + SHLL $0x05, BX + ORL BX, R9 + MOVB R9, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBlockAsm4MB - JMP two_byte_offset_match_nolit_encodeBlockAsm4MB two_byte_offset_short_match_nolit_encodeBlockAsm4MB: - CMPL R10, $0x0c + MOVL R9, SI + SHLL $0x02, SI + CMPL R9, $0x0c JGE emit_copy_three_match_nolit_encodeBlockAsm4MB - CMPL SI, $0x00000800 + CMPL BX, $0x00000800 JGE emit_copy_three_match_nolit_encodeBlockAsm4MB - MOVB $0x01, BL - LEAL -16(BX)(R10*4), R10 - MOVB SI, 1(AX) - SHRL $0x08, SI - SHLL $0x05, SI - ORL SI, R10 - MOVB R10, (AX) + LEAL -15(SI), SI + MOVB BL, 1(AX) + SHRL $0x08, BX + SHLL $0x05, BX + ORL BX, SI + MOVB SI, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBlockAsm4MB emit_copy_three_match_nolit_encodeBlockAsm4MB: - MOVB $0x02, BL - LEAL -4(BX)(R10*4), R10 - MOVB R10, (AX) - MOVW SI, 1(AX) + LEAL -2(SI), SI + MOVB SI, (AX) + MOVW BX, 1(AX) ADDQ $0x03, AX match_nolit_emitcopy_end_encodeBlockAsm4MB: CMPL CX, 8(SP) JGE emit_remainder_encodeBlockAsm4MB - MOVQ -2(DX)(CX*1), DI + MOVQ -2(DX)(CX*1), SI CMPQ AX, (SP) JL match_nolit_dst_ok_encodeBlockAsm4MB MOVQ $0x00000000, ret+48(FP) RET match_nolit_dst_ok_encodeBlockAsm4MB: - MOVQ $0x0000cf1bbcdcbf9b, R9 - MOVQ DI, R8 - SHRQ $0x10, DI - MOVQ DI, SI - SHLQ $0x10, R8 - IMULQ R9, R8 - SHRQ $0x32, R8 - SHLQ $0x10, SI - IMULQ R9, SI - SHRQ $0x32, SI - LEAL -2(CX), R9 - LEAQ 24(SP)(SI*4), R10 - MOVL (R10), SI - MOVL R9, 24(SP)(R8*4) - MOVL CX, (R10) - CMPL (DX)(SI*1), DI + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ SI, DI + SHRQ $0x10, SI + MOVQ SI, BX + SHLQ $0x10, DI + IMULQ R8, DI + SHRQ $0x32, DI + SHLQ $0x10, BX + IMULQ R8, BX + SHRQ $0x32, BX + LEAL -2(CX), R8 + LEAQ 24(SP)(BX*4), R9 + MOVL (R9), BX + MOVL R8, 24(SP)(DI*4) + MOVL CX, (R9) + CMPL (DX)(BX*1), SI JEQ match_nolit_loop_encodeBlockAsm4MB INCL CX JMP search_loop_encodeBlockAsm4MB @@ -2704,8 +2680,8 @@ zero_loop_encodeBlockAsm12B: MOVL $0x00000000, 12(SP) MOVQ src_len+32(FP), CX LEAQ -9(CX), DX - LEAQ -8(CX), SI - MOVL SI, 8(SP) + LEAQ -8(CX), BX + MOVL BX, 8(SP) SHRQ $0x05, CX SUBL CX, DX LEAQ (AX)(DX*1), DX @@ -2715,428 +2691,426 @@ zero_loop_encodeBlockAsm12B: MOVQ src_base+24(FP), DX search_loop_encodeBlockAsm12B: - MOVL CX, SI - SUBL 12(SP), SI - SHRL $0x05, SI - LEAL 4(CX)(SI*1), SI - CMPL SI, 8(SP) + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x05, BX + LEAL 4(CX)(BX*1), BX + CMPL BX, 8(SP) JGE emit_remainder_encodeBlockAsm12B - MOVQ (DX)(CX*1), DI - MOVL SI, 20(SP) - MOVQ $0x000000cf1bbcdcbb, R9 - MOVQ DI, R10 - MOVQ DI, R11 - SHRQ $0x08, R11 - SHLQ $0x18, R10 - IMULQ R9, R10 - SHRQ $0x34, R10 - SHLQ $0x18, R11 - IMULQ R9, R11 - SHRQ $0x34, R11 - MOVL 24(SP)(R10*4), SI - MOVL 24(SP)(R11*4), R8 - MOVL CX, 24(SP)(R10*4) - LEAL 1(CX), R10 - MOVL R10, 24(SP)(R11*4) - MOVQ DI, R10 - SHRQ $0x10, R10 + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x000000cf1bbcdcbb, R8 + MOVQ SI, R9 + MOVQ SI, R10 + SHRQ $0x08, R10 + SHLQ $0x18, R9 + IMULQ R8, R9 + SHRQ $0x34, R9 SHLQ $0x18, R10 - IMULQ R9, R10 + IMULQ R8, R10 SHRQ $0x34, R10 - MOVL CX, R9 - SUBL 16(SP), R9 - MOVL 1(DX)(R9*1), R11 - MOVQ DI, R9 - SHRQ $0x08, R9 - CMPL R9, R11 + MOVL 24(SP)(R9*4), BX + MOVL 24(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + LEAL 1(CX), R9 + MOVL R9, 24(SP)(R10*4) + MOVQ SI, R9 + SHRQ $0x10, R9 + SHLQ $0x18, R9 + IMULQ R8, R9 + SHRQ $0x34, R9 + MOVL CX, R8 + SUBL 16(SP), R8 + MOVL 1(DX)(R8*1), R10 + MOVQ SI, R8 + SHRQ $0x08, R8 + CMPL R8, R10 JNE no_repeat_found_encodeBlockAsm12B - LEAL 1(CX), DI - MOVL 12(SP), R8 - MOVL DI, SI - SUBL 16(SP), SI + LEAL 1(CX), SI + MOVL 12(SP), DI + MOVL SI, BX + SUBL 16(SP), BX JZ repeat_extend_back_end_encodeBlockAsm12B repeat_extend_back_loop_encodeBlockAsm12B: - CMPL DI, R8 + CMPL SI, DI JLE repeat_extend_back_end_encodeBlockAsm12B - MOVB -1(DX)(SI*1), BL - MOVB -1(DX)(DI*1), R9 - CMPB BL, R9 + MOVB -1(DX)(BX*1), R8 + MOVB -1(DX)(SI*1), R9 + CMPB R8, R9 JNE repeat_extend_back_end_encodeBlockAsm12B - LEAL -1(DI), DI - DECL SI + LEAL -1(SI), SI + DECL BX JNZ repeat_extend_back_loop_encodeBlockAsm12B repeat_extend_back_end_encodeBlockAsm12B: - MOVL 12(SP), SI - CMPL SI, DI + MOVL 12(SP), BX + CMPL BX, SI JEQ emit_literal_done_repeat_emit_encodeBlockAsm12B - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (DX)(SI*1), R10 - SUBL SI, R9 - LEAL -1(R9), SI - CMPL SI, $0x3c + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c JLT one_byte_repeat_emit_encodeBlockAsm12B - CMPL SI, $0x00000100 + CMPL BX, $0x00000100 JLT two_bytes_repeat_emit_encodeBlockAsm12B MOVB $0xf4, (AX) - MOVW SI, 1(AX) + MOVW BX, 1(AX) ADDQ $0x03, AX JMP memmove_long_repeat_emit_encodeBlockAsm12B two_bytes_repeat_emit_encodeBlockAsm12B: MOVB $0xf0, (AX) - MOVB SI, 1(AX) + MOVB BL, 1(AX) ADDQ $0x02, AX - CMPL SI, $0x40 + CMPL BX, $0x40 JL memmove_repeat_emit_encodeBlockAsm12B JMP memmove_long_repeat_emit_encodeBlockAsm12B one_byte_repeat_emit_encodeBlockAsm12B: - SHLB $0x02, SI - MOVB SI, (AX) + SHLB $0x02, BL + MOVB BL, (AX) ADDQ $0x01, AX memmove_repeat_emit_encodeBlockAsm12B: - LEAQ (AX)(R9*1), SI + LEAQ (AX)(R8*1), BX // genMemMoveShort - CMPQ R9, $0x08 + CMPQ R8, $0x08 JLE emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8 - CMPQ R9, $0x10 + CMPQ R8, $0x10 JBE emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8through16 - CMPQ R9, $0x20 + CMPQ R8, $0x20 JBE emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_17through32 JMP emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_33through64 emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8: - MOVQ (R10), R11 - MOVQ R11, (AX) + MOVQ (R9), R10 + MOVQ R10, (AX) JMP memmove_end_copy_repeat_emit_encodeBlockAsm12B emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8through16: - MOVQ (R10), R11 - MOVQ -8(R10)(R9*1), R10 - MOVQ R11, (AX) - MOVQ R10, -8(AX)(R9*1) + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) JMP memmove_end_copy_repeat_emit_encodeBlockAsm12B emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_17through32: - MOVOU (R10), X0 - MOVOU -16(R10)(R9*1), X1 + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 MOVOU X0, (AX) - MOVOU X1, -16(AX)(R9*1) + MOVOU X1, -16(AX)(R8*1) JMP memmove_end_copy_repeat_emit_encodeBlockAsm12B emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_33through64: - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) memmove_end_copy_repeat_emit_encodeBlockAsm12B: - MOVQ SI, AX + MOVQ BX, AX JMP emit_literal_done_repeat_emit_encodeBlockAsm12B memmove_long_repeat_emit_encodeBlockAsm12B: - LEAQ (AX)(R9*1), SI + LEAQ (AX)(R8*1), BX // genMemMoveLong - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVQ R9, R12 - SHRQ $0x05, R12 - MOVQ AX, R11 - ANDL $0x0000001f, R11 - MOVQ $0x00000040, R13 - SUBQ R11, R13 - DECQ R12 - JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_forward_sse_loop_32 - LEAQ -32(R10)(R13*1), R11 - LEAQ -32(AX)(R13*1), R14 - -emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_big_loop_back: - MOVOU (R11), X4 - MOVOU 16(R11), X5 - MOVOA X4, (R14) - MOVOA X5, 16(R14) - ADDQ $0x20, R14 - ADDQ $0x20, R11 + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R11 + SHRQ $0x05, R11 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(R9)(R12*1), R10 + LEAQ -32(AX)(R12*1), R13 + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) ADDQ $0x20, R13 - DECQ R12 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_big_loop_back emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_forward_sse_loop_32: - MOVOU -32(R10)(R13*1), X4 - MOVOU -16(R10)(R13*1), X5 - MOVOA X4, -32(AX)(R13*1) - MOVOA X5, -16(AX)(R13*1) - ADDQ $0x20, R13 - CMPQ R9, R13 + MOVOU -32(R9)(R12*1), X4 + MOVOU -16(R9)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ R8, R12 JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_forward_sse_loop_32 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) - MOVQ SI, AX + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX emit_literal_done_repeat_emit_encodeBlockAsm12B: ADDL $0x05, CX - MOVL CX, SI - SUBL 16(SP), SI - MOVQ src_len+32(FP), R9 - SUBL CX, R9 - LEAQ (DX)(CX*1), R10 - LEAQ (DX)(SI*1), SI + MOVL CX, BX + SUBL 16(SP), BX + MOVQ src_len+32(FP), R8 + SUBL CX, R8 + LEAQ (DX)(CX*1), R9 + LEAQ (DX)(BX*1), BX // matchLen - XORL R12, R12 - CMPL R9, $0x08 + XORL R11, R11 + CMPL R8, $0x08 JL matchlen_match4_repeat_extend_encodeBlockAsm12B matchlen_loopback_repeat_extend_encodeBlockAsm12B: - MOVQ (R10)(R12*1), R11 - XORQ (SI)(R12*1), R11 - TESTQ R11, R11 + MOVQ (R9)(R11*1), R10 + XORQ (BX)(R11*1), R10 + TESTQ R10, R10 JZ matchlen_loop_repeat_extend_encodeBlockAsm12B #ifdef GOAMD64_v3 - TZCNTQ R11, R11 + TZCNTQ R10, R10 #else - BSFQ R11, R11 + BSFQ R10, R10 #endif - SARQ $0x03, R11 - LEAL (R12)(R11*1), R12 + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 JMP repeat_extend_forward_end_encodeBlockAsm12B matchlen_loop_repeat_extend_encodeBlockAsm12B: - LEAL -8(R9), R9 - LEAL 8(R12), R12 - CMPL R9, $0x08 + LEAL -8(R8), R8 + LEAL 8(R11), R11 + CMPL R8, $0x08 JGE matchlen_loopback_repeat_extend_encodeBlockAsm12B JZ repeat_extend_forward_end_encodeBlockAsm12B matchlen_match4_repeat_extend_encodeBlockAsm12B: - CMPL R9, $0x04 + CMPL R8, $0x04 JL matchlen_match2_repeat_extend_encodeBlockAsm12B - MOVL (R10)(R12*1), R11 - CMPL (SI)(R12*1), R11 + MOVL (R9)(R11*1), R10 + CMPL (BX)(R11*1), R10 JNE matchlen_match2_repeat_extend_encodeBlockAsm12B - SUBL $0x04, R9 - LEAL 4(R12), R12 + SUBL $0x04, R8 + LEAL 4(R11), R11 matchlen_match2_repeat_extend_encodeBlockAsm12B: - CMPL R9, $0x02 + CMPL R8, $0x02 JL matchlen_match1_repeat_extend_encodeBlockAsm12B - MOVW (R10)(R12*1), R11 - CMPW (SI)(R12*1), R11 + MOVW (R9)(R11*1), R10 + CMPW (BX)(R11*1), R10 JNE matchlen_match1_repeat_extend_encodeBlockAsm12B - SUBL $0x02, R9 - LEAL 2(R12), R12 + SUBL $0x02, R8 + LEAL 2(R11), R11 matchlen_match1_repeat_extend_encodeBlockAsm12B: - CMPL R9, $0x01 + CMPL R8, $0x01 JL repeat_extend_forward_end_encodeBlockAsm12B - MOVB (R10)(R12*1), R11 - CMPB (SI)(R12*1), R11 + MOVB (R9)(R11*1), R10 + CMPB (BX)(R11*1), R10 JNE repeat_extend_forward_end_encodeBlockAsm12B - LEAL 1(R12), R12 + LEAL 1(R11), R11 repeat_extend_forward_end_encodeBlockAsm12B: - ADDL R12, CX - MOVL CX, SI - SUBL DI, SI - MOVL 16(SP), DI - TESTL R8, R8 + ADDL R11, CX + MOVL CX, BX + SUBL SI, BX + MOVL 16(SP), SI + TESTL DI, DI JZ repeat_as_copy_encodeBlockAsm12B // emitRepeat - MOVL SI, R8 - LEAL -4(SI), SI - CMPL R8, $0x08 + MOVL BX, DI + LEAL -4(BX), BX + CMPL DI, $0x08 JLE repeat_two_match_repeat_encodeBlockAsm12B - CMPL R8, $0x0c + CMPL DI, $0x0c JGE cant_repeat_two_offset_match_repeat_encodeBlockAsm12B - CMPL DI, $0x00000800 + CMPL SI, $0x00000800 JLT repeat_two_offset_match_repeat_encodeBlockAsm12B cant_repeat_two_offset_match_repeat_encodeBlockAsm12B: - CMPL SI, $0x00000104 + CMPL BX, $0x00000104 JLT repeat_three_match_repeat_encodeBlockAsm12B - LEAL -256(SI), SI + LEAL -256(BX), BX MOVW $0x0019, (AX) - MOVW SI, 2(AX) + MOVW BX, 2(AX) ADDQ $0x04, AX JMP repeat_end_emit_encodeBlockAsm12B repeat_three_match_repeat_encodeBlockAsm12B: - LEAL -4(SI), SI + LEAL -4(BX), BX MOVW $0x0015, (AX) - MOVB SI, 2(AX) + MOVB BL, 2(AX) ADDQ $0x03, AX JMP repeat_end_emit_encodeBlockAsm12B repeat_two_match_repeat_encodeBlockAsm12B: - SHLL $0x02, SI - ORL $0x01, SI - MOVW SI, (AX) + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) ADDQ $0x02, AX JMP repeat_end_emit_encodeBlockAsm12B repeat_two_offset_match_repeat_encodeBlockAsm12B: - XORQ R8, R8 - LEAL 1(R8)(SI*4), SI - MOVB DI, 1(AX) - SARL $0x08, DI - SHLL $0x05, DI - ORL DI, SI - MOVB SI, (AX) + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) ADDQ $0x02, AX JMP repeat_end_emit_encodeBlockAsm12B repeat_as_copy_encodeBlockAsm12B: // emitCopy -two_byte_offset_repeat_as_copy_encodeBlockAsm12B: - CMPL SI, $0x40 + CMPL BX, $0x40 JLE two_byte_offset_short_repeat_as_copy_encodeBlockAsm12B - CMPL DI, $0x00000800 + CMPL SI, $0x00000800 JAE long_offset_short_repeat_as_copy_encodeBlockAsm12B - MOVL $0x00000001, R8 - LEAL 16(R8), R8 - MOVB DI, 1(AX) - SHRL $0x08, DI - SHLL $0x05, DI - ORL DI, R8 - MOVB R8, (AX) + MOVL $0x00000001, DI + LEAL 16(DI), DI + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (AX) ADDQ $0x02, AX - SUBL $0x08, SI + SUBL $0x08, BX // emitRepeat - LEAL -4(SI), SI + LEAL -4(BX), BX JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b - MOVL SI, R8 - LEAL -4(SI), SI - CMPL R8, $0x08 + MOVL BX, DI + LEAL -4(BX), BX + CMPL DI, $0x08 JLE repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b - CMPL R8, $0x0c + CMPL DI, $0x0c JGE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b - CMPL DI, $0x00000800 + CMPL SI, $0x00000800 JLT repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b: - CMPL SI, $0x00000104 + CMPL BX, $0x00000104 JLT repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b - LEAL -256(SI), SI + LEAL -256(BX), BX MOVW $0x0019, (AX) - MOVW SI, 2(AX) + MOVW BX, 2(AX) ADDQ $0x04, AX JMP repeat_end_emit_encodeBlockAsm12B repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b: - LEAL -4(SI), SI + LEAL -4(BX), BX MOVW $0x0015, (AX) - MOVB SI, 2(AX) + MOVB BL, 2(AX) ADDQ $0x03, AX JMP repeat_end_emit_encodeBlockAsm12B repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b: - SHLL $0x02, SI - ORL $0x01, SI - MOVW SI, (AX) + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) ADDQ $0x02, AX JMP repeat_end_emit_encodeBlockAsm12B repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b: - XORQ R8, R8 - LEAL 1(R8)(SI*4), SI - MOVB DI, 1(AX) - SARL $0x08, DI - SHLL $0x05, DI - ORL DI, SI - MOVB SI, (AX) + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) ADDQ $0x02, AX JMP repeat_end_emit_encodeBlockAsm12B long_offset_short_repeat_as_copy_encodeBlockAsm12B: MOVB $0xee, (AX) - MOVW DI, 1(AX) - LEAL -60(SI), SI + MOVW SI, 1(AX) + LEAL -60(BX), BX ADDQ $0x03, AX // emitRepeat - MOVL SI, R8 - LEAL -4(SI), SI - CMPL R8, $0x08 + MOVL BX, DI + LEAL -4(BX), BX + CMPL DI, $0x08 JLE repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short - CMPL R8, $0x0c + CMPL DI, $0x0c JGE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short - CMPL DI, $0x00000800 + CMPL SI, $0x00000800 JLT repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short: - CMPL SI, $0x00000104 + CMPL BX, $0x00000104 JLT repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short - LEAL -256(SI), SI + LEAL -256(BX), BX MOVW $0x0019, (AX) - MOVW SI, 2(AX) + MOVW BX, 2(AX) ADDQ $0x04, AX JMP repeat_end_emit_encodeBlockAsm12B repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short: - LEAL -4(SI), SI + LEAL -4(BX), BX MOVW $0x0015, (AX) - MOVB SI, 2(AX) + MOVB BL, 2(AX) ADDQ $0x03, AX JMP repeat_end_emit_encodeBlockAsm12B repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short: - SHLL $0x02, SI - ORL $0x01, SI - MOVW SI, (AX) + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) ADDQ $0x02, AX JMP repeat_end_emit_encodeBlockAsm12B repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short: - XORQ R8, R8 - LEAL 1(R8)(SI*4), SI - MOVB DI, 1(AX) - SARL $0x08, DI - SHLL $0x05, DI - ORL DI, SI - MOVB SI, (AX) + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) ADDQ $0x02, AX JMP repeat_end_emit_encodeBlockAsm12B - JMP two_byte_offset_repeat_as_copy_encodeBlockAsm12B two_byte_offset_short_repeat_as_copy_encodeBlockAsm12B: - CMPL SI, $0x0c + MOVL BX, DI + SHLL $0x02, DI + CMPL BX, $0x0c JGE emit_copy_three_repeat_as_copy_encodeBlockAsm12B - CMPL DI, $0x00000800 + CMPL SI, $0x00000800 JGE emit_copy_three_repeat_as_copy_encodeBlockAsm12B - MOVB $0x01, BL - LEAL -16(BX)(SI*4), SI - MOVB DI, 1(AX) - SHRL $0x08, DI - SHLL $0x05, DI - ORL DI, SI - MOVB SI, (AX) + LEAL -15(DI), DI + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (AX) ADDQ $0x02, AX JMP repeat_end_emit_encodeBlockAsm12B emit_copy_three_repeat_as_copy_encodeBlockAsm12B: - MOVB $0x02, BL - LEAL -4(BX)(SI*4), SI - MOVB SI, (AX) - MOVW DI, 1(AX) + LEAL -2(DI), DI + MOVB DI, (AX) + MOVW SI, 1(AX) ADDQ $0x03, AX repeat_end_emit_encodeBlockAsm12B: @@ -3144,16 +3118,16 @@ repeat_end_emit_encodeBlockAsm12B: JMP search_loop_encodeBlockAsm12B no_repeat_found_encodeBlockAsm12B: - CMPL (DX)(SI*1), DI + CMPL (DX)(BX*1), SI JEQ candidate_match_encodeBlockAsm12B - SHRQ $0x08, DI - MOVL 24(SP)(R10*4), SI - LEAL 2(CX), R9 - CMPL (DX)(R8*1), DI + SHRQ $0x08, SI + MOVL 24(SP)(R9*4), BX + LEAL 2(CX), R8 + CMPL (DX)(DI*1), SI JEQ candidate2_match_encodeBlockAsm12B - MOVL R9, 24(SP)(R10*4) - SHRQ $0x08, DI - CMPL (DX)(SI*1), DI + MOVL R8, 24(SP)(R9*4) + SHRQ $0x08, SI + CMPL (DX)(BX*1), SI JEQ candidate3_match_encodeBlockAsm12B MOVL 20(SP), CX JMP search_loop_encodeBlockAsm12B @@ -3163,391 +3137,389 @@ candidate3_match_encodeBlockAsm12B: JMP candidate_match_encodeBlockAsm12B candidate2_match_encodeBlockAsm12B: - MOVL R9, 24(SP)(R10*4) + MOVL R8, 24(SP)(R9*4) INCL CX - MOVL R8, SI + MOVL DI, BX candidate_match_encodeBlockAsm12B: - MOVL 12(SP), DI - TESTL SI, SI + MOVL 12(SP), SI + TESTL BX, BX JZ match_extend_back_end_encodeBlockAsm12B match_extend_back_loop_encodeBlockAsm12B: - CMPL CX, DI + CMPL CX, SI JLE match_extend_back_end_encodeBlockAsm12B - MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(BX*1), DI MOVB -1(DX)(CX*1), R8 - CMPB BL, R8 + CMPB DI, R8 JNE match_extend_back_end_encodeBlockAsm12B LEAL -1(CX), CX - DECL SI + DECL BX JZ match_extend_back_end_encodeBlockAsm12B JMP match_extend_back_loop_encodeBlockAsm12B match_extend_back_end_encodeBlockAsm12B: - MOVL CX, DI - SUBL 12(SP), DI - LEAQ 3(AX)(DI*1), DI - CMPQ DI, (SP) + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 3(AX)(SI*1), SI + CMPQ SI, (SP) JL match_dst_size_check_encodeBlockAsm12B MOVQ $0x00000000, ret+48(FP) RET match_dst_size_check_encodeBlockAsm12B: - MOVL CX, DI - MOVL 12(SP), R8 - CMPL R8, DI + MOVL CX, SI + MOVL 12(SP), DI + CMPL DI, SI JEQ emit_literal_done_match_emit_encodeBlockAsm12B - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (DX)(R8*1), DI - SUBL R8, R9 - LEAL -1(R9), R8 - CMPL R8, $0x3c + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(DI*1), SI + SUBL DI, R8 + LEAL -1(R8), DI + CMPL DI, $0x3c JLT one_byte_match_emit_encodeBlockAsm12B - CMPL R8, $0x00000100 + CMPL DI, $0x00000100 JLT two_bytes_match_emit_encodeBlockAsm12B MOVB $0xf4, (AX) - MOVW R8, 1(AX) + MOVW DI, 1(AX) ADDQ $0x03, AX JMP memmove_long_match_emit_encodeBlockAsm12B two_bytes_match_emit_encodeBlockAsm12B: MOVB $0xf0, (AX) - MOVB R8, 1(AX) + MOVB DI, 1(AX) ADDQ $0x02, AX - CMPL R8, $0x40 + CMPL DI, $0x40 JL memmove_match_emit_encodeBlockAsm12B JMP memmove_long_match_emit_encodeBlockAsm12B one_byte_match_emit_encodeBlockAsm12B: - SHLB $0x02, R8 - MOVB R8, (AX) + SHLB $0x02, DI + MOVB DI, (AX) ADDQ $0x01, AX memmove_match_emit_encodeBlockAsm12B: - LEAQ (AX)(R9*1), R8 + LEAQ (AX)(R8*1), DI // genMemMoveShort - CMPQ R9, $0x08 + CMPQ R8, $0x08 JLE emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8 - CMPQ R9, $0x10 + CMPQ R8, $0x10 JBE emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8through16 - CMPQ R9, $0x20 + CMPQ R8, $0x20 JBE emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_17through32 JMP emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_33through64 emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8: - MOVQ (DI), R10 - MOVQ R10, (AX) + MOVQ (SI), R9 + MOVQ R9, (AX) JMP memmove_end_copy_match_emit_encodeBlockAsm12B emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8through16: - MOVQ (DI), R10 - MOVQ -8(DI)(R9*1), DI - MOVQ R10, (AX) - MOVQ DI, -8(AX)(R9*1) + MOVQ (SI), R9 + MOVQ -8(SI)(R8*1), SI + MOVQ R9, (AX) + MOVQ SI, -8(AX)(R8*1) JMP memmove_end_copy_match_emit_encodeBlockAsm12B emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_17through32: - MOVOU (DI), X0 - MOVOU -16(DI)(R9*1), X1 + MOVOU (SI), X0 + MOVOU -16(SI)(R8*1), X1 MOVOU X0, (AX) - MOVOU X1, -16(AX)(R9*1) + MOVOU X1, -16(AX)(R8*1) JMP memmove_end_copy_match_emit_encodeBlockAsm12B emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_33through64: - MOVOU (DI), X0 - MOVOU 16(DI), X1 - MOVOU -32(DI)(R9*1), X2 - MOVOU -16(DI)(R9*1), X3 + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) memmove_end_copy_match_emit_encodeBlockAsm12B: - MOVQ R8, AX + MOVQ DI, AX JMP emit_literal_done_match_emit_encodeBlockAsm12B memmove_long_match_emit_encodeBlockAsm12B: - LEAQ (AX)(R9*1), R8 + LEAQ (AX)(R8*1), DI // genMemMoveLong - MOVOU (DI), X0 - MOVOU 16(DI), X1 - MOVOU -32(DI)(R9*1), X2 - MOVOU -16(DI)(R9*1), X3 - MOVQ R9, R11 - SHRQ $0x05, R11 - MOVQ AX, R10 - ANDL $0x0000001f, R10 - MOVQ $0x00000040, R12 - SUBQ R10, R12 - DECQ R11 + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVQ R8, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R11 + SUBQ R9, R11 + DECQ R10 JA emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_forward_sse_loop_32 - LEAQ -32(DI)(R12*1), R10 - LEAQ -32(AX)(R12*1), R13 + LEAQ -32(SI)(R11*1), R9 + LEAQ -32(AX)(R11*1), R12 emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_big_loop_back: - MOVOU (R10), X4 - MOVOU 16(R10), X5 - MOVOA X4, (R13) - MOVOA X5, 16(R13) - ADDQ $0x20, R13 - ADDQ $0x20, R10 + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R12) + MOVOA X5, 16(R12) ADDQ $0x20, R12 - DECQ R11 + ADDQ $0x20, R9 + ADDQ $0x20, R11 + DECQ R10 JNA emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_big_loop_back emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_forward_sse_loop_32: - MOVOU -32(DI)(R12*1), X4 - MOVOU -16(DI)(R12*1), X5 - MOVOA X4, -32(AX)(R12*1) - MOVOA X5, -16(AX)(R12*1) - ADDQ $0x20, R12 - CMPQ R9, R12 + MOVOU -32(SI)(R11*1), X4 + MOVOU -16(SI)(R11*1), X5 + MOVOA X4, -32(AX)(R11*1) + MOVOA X5, -16(AX)(R11*1) + ADDQ $0x20, R11 + CMPQ R8, R11 JAE emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_forward_sse_loop_32 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) - MOVQ R8, AX + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ DI, AX emit_literal_done_match_emit_encodeBlockAsm12B: match_nolit_loop_encodeBlockAsm12B: - MOVL CX, DI - SUBL SI, DI - MOVL DI, 16(SP) + MOVL CX, SI + SUBL BX, SI + MOVL SI, 16(SP) ADDL $0x04, CX - ADDL $0x04, SI - MOVQ src_len+32(FP), DI - SUBL CX, DI - LEAQ (DX)(CX*1), R8 - LEAQ (DX)(SI*1), SI + ADDL $0x04, BX + MOVQ src_len+32(FP), SI + SUBL CX, SI + LEAQ (DX)(CX*1), DI + LEAQ (DX)(BX*1), BX // matchLen - XORL R10, R10 - CMPL DI, $0x08 + XORL R9, R9 + CMPL SI, $0x08 JL matchlen_match4_match_nolit_encodeBlockAsm12B matchlen_loopback_match_nolit_encodeBlockAsm12B: - MOVQ (R8)(R10*1), R9 - XORQ (SI)(R10*1), R9 - TESTQ R9, R9 + MOVQ (DI)(R9*1), R8 + XORQ (BX)(R9*1), R8 + TESTQ R8, R8 JZ matchlen_loop_match_nolit_encodeBlockAsm12B #ifdef GOAMD64_v3 - TZCNTQ R9, R9 + TZCNTQ R8, R8 #else - BSFQ R9, R9 + BSFQ R8, R8 #endif - SARQ $0x03, R9 - LEAL (R10)(R9*1), R10 + SARQ $0x03, R8 + LEAL (R9)(R8*1), R9 JMP match_nolit_end_encodeBlockAsm12B matchlen_loop_match_nolit_encodeBlockAsm12B: - LEAL -8(DI), DI - LEAL 8(R10), R10 - CMPL DI, $0x08 + LEAL -8(SI), SI + LEAL 8(R9), R9 + CMPL SI, $0x08 JGE matchlen_loopback_match_nolit_encodeBlockAsm12B JZ match_nolit_end_encodeBlockAsm12B matchlen_match4_match_nolit_encodeBlockAsm12B: - CMPL DI, $0x04 + CMPL SI, $0x04 JL matchlen_match2_match_nolit_encodeBlockAsm12B - MOVL (R8)(R10*1), R9 - CMPL (SI)(R10*1), R9 + MOVL (DI)(R9*1), R8 + CMPL (BX)(R9*1), R8 JNE matchlen_match2_match_nolit_encodeBlockAsm12B - SUBL $0x04, DI - LEAL 4(R10), R10 + SUBL $0x04, SI + LEAL 4(R9), R9 matchlen_match2_match_nolit_encodeBlockAsm12B: - CMPL DI, $0x02 + CMPL SI, $0x02 JL matchlen_match1_match_nolit_encodeBlockAsm12B - MOVW (R8)(R10*1), R9 - CMPW (SI)(R10*1), R9 + MOVW (DI)(R9*1), R8 + CMPW (BX)(R9*1), R8 JNE matchlen_match1_match_nolit_encodeBlockAsm12B - SUBL $0x02, DI - LEAL 2(R10), R10 + SUBL $0x02, SI + LEAL 2(R9), R9 matchlen_match1_match_nolit_encodeBlockAsm12B: - CMPL DI, $0x01 + CMPL SI, $0x01 JL match_nolit_end_encodeBlockAsm12B - MOVB (R8)(R10*1), R9 - CMPB (SI)(R10*1), R9 + MOVB (DI)(R9*1), R8 + CMPB (BX)(R9*1), R8 JNE match_nolit_end_encodeBlockAsm12B - LEAL 1(R10), R10 + LEAL 1(R9), R9 match_nolit_end_encodeBlockAsm12B: - ADDL R10, CX - MOVL 16(SP), SI - ADDL $0x04, R10 + ADDL R9, CX + MOVL 16(SP), BX + ADDL $0x04, R9 MOVL CX, 12(SP) // emitCopy -two_byte_offset_match_nolit_encodeBlockAsm12B: - CMPL R10, $0x40 + CMPL R9, $0x40 JLE two_byte_offset_short_match_nolit_encodeBlockAsm12B - CMPL SI, $0x00000800 + CMPL BX, $0x00000800 JAE long_offset_short_match_nolit_encodeBlockAsm12B - MOVL $0x00000001, DI - LEAL 16(DI), DI - MOVB SI, 1(AX) - SHRL $0x08, SI - SHLL $0x05, SI - ORL SI, DI - MOVB DI, (AX) + MOVL $0x00000001, SI + LEAL 16(SI), SI + MOVB BL, 1(AX) + SHRL $0x08, BX + SHLL $0x05, BX + ORL BX, SI + MOVB SI, (AX) ADDQ $0x02, AX - SUBL $0x08, R10 + SUBL $0x08, R9 // emitRepeat - LEAL -4(R10), R10 + LEAL -4(R9), R9 JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b - MOVL R10, DI - LEAL -4(R10), R10 - CMPL DI, $0x08 + MOVL R9, SI + LEAL -4(R9), R9 + CMPL SI, $0x08 JLE repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short_2b - CMPL DI, $0x0c + CMPL SI, $0x0c JGE cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b - CMPL SI, $0x00000800 + CMPL BX, $0x00000800 JLT repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b: - CMPL R10, $0x00000104 + CMPL R9, $0x00000104 JLT repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short_2b - LEAL -256(R10), R10 + LEAL -256(R9), R9 MOVW $0x0019, (AX) - MOVW R10, 2(AX) + MOVW R9, 2(AX) ADDQ $0x04, AX JMP match_nolit_emitcopy_end_encodeBlockAsm12B repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short_2b: - LEAL -4(R10), R10 + LEAL -4(R9), R9 MOVW $0x0015, (AX) - MOVB R10, 2(AX) + MOVB R9, 2(AX) ADDQ $0x03, AX JMP match_nolit_emitcopy_end_encodeBlockAsm12B repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short_2b: - SHLL $0x02, R10 - ORL $0x01, R10 - MOVW R10, (AX) + SHLL $0x02, R9 + ORL $0x01, R9 + MOVW R9, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBlockAsm12B repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b: - XORQ DI, DI - LEAL 1(DI)(R10*4), R10 - MOVB SI, 1(AX) - SARL $0x08, SI - SHLL $0x05, SI - ORL SI, R10 - MOVB R10, (AX) + XORQ SI, SI + LEAL 1(SI)(R9*4), R9 + MOVB BL, 1(AX) + SARL $0x08, BX + SHLL $0x05, BX + ORL BX, R9 + MOVB R9, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBlockAsm12B long_offset_short_match_nolit_encodeBlockAsm12B: MOVB $0xee, (AX) - MOVW SI, 1(AX) - LEAL -60(R10), R10 + MOVW BX, 1(AX) + LEAL -60(R9), R9 ADDQ $0x03, AX // emitRepeat - MOVL R10, DI - LEAL -4(R10), R10 - CMPL DI, $0x08 + MOVL R9, SI + LEAL -4(R9), R9 + CMPL SI, $0x08 JLE repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short - CMPL DI, $0x0c + CMPL SI, $0x0c JGE cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short - CMPL SI, $0x00000800 + CMPL BX, $0x00000800 JLT repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short: - CMPL R10, $0x00000104 + CMPL R9, $0x00000104 JLT repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short - LEAL -256(R10), R10 + LEAL -256(R9), R9 MOVW $0x0019, (AX) - MOVW R10, 2(AX) + MOVW R9, 2(AX) ADDQ $0x04, AX JMP match_nolit_emitcopy_end_encodeBlockAsm12B repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short: - LEAL -4(R10), R10 + LEAL -4(R9), R9 MOVW $0x0015, (AX) - MOVB R10, 2(AX) + MOVB R9, 2(AX) ADDQ $0x03, AX JMP match_nolit_emitcopy_end_encodeBlockAsm12B repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short: - SHLL $0x02, R10 - ORL $0x01, R10 - MOVW R10, (AX) + SHLL $0x02, R9 + ORL $0x01, R9 + MOVW R9, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBlockAsm12B repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short: - XORQ DI, DI - LEAL 1(DI)(R10*4), R10 - MOVB SI, 1(AX) - SARL $0x08, SI - SHLL $0x05, SI - ORL SI, R10 - MOVB R10, (AX) + XORQ SI, SI + LEAL 1(SI)(R9*4), R9 + MOVB BL, 1(AX) + SARL $0x08, BX + SHLL $0x05, BX + ORL BX, R9 + MOVB R9, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBlockAsm12B - JMP two_byte_offset_match_nolit_encodeBlockAsm12B two_byte_offset_short_match_nolit_encodeBlockAsm12B: - CMPL R10, $0x0c + MOVL R9, SI + SHLL $0x02, SI + CMPL R9, $0x0c JGE emit_copy_three_match_nolit_encodeBlockAsm12B - CMPL SI, $0x00000800 + CMPL BX, $0x00000800 JGE emit_copy_three_match_nolit_encodeBlockAsm12B - MOVB $0x01, BL - LEAL -16(BX)(R10*4), R10 - MOVB SI, 1(AX) - SHRL $0x08, SI - SHLL $0x05, SI - ORL SI, R10 - MOVB R10, (AX) + LEAL -15(SI), SI + MOVB BL, 1(AX) + SHRL $0x08, BX + SHLL $0x05, BX + ORL BX, SI + MOVB SI, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBlockAsm12B emit_copy_three_match_nolit_encodeBlockAsm12B: - MOVB $0x02, BL - LEAL -4(BX)(R10*4), R10 - MOVB R10, (AX) - MOVW SI, 1(AX) + LEAL -2(SI), SI + MOVB SI, (AX) + MOVW BX, 1(AX) ADDQ $0x03, AX match_nolit_emitcopy_end_encodeBlockAsm12B: CMPL CX, 8(SP) JGE emit_remainder_encodeBlockAsm12B - MOVQ -2(DX)(CX*1), DI + MOVQ -2(DX)(CX*1), SI CMPQ AX, (SP) JL match_nolit_dst_ok_encodeBlockAsm12B MOVQ $0x00000000, ret+48(FP) RET match_nolit_dst_ok_encodeBlockAsm12B: - MOVQ $0x000000cf1bbcdcbb, R9 - MOVQ DI, R8 - SHRQ $0x10, DI - MOVQ DI, SI - SHLQ $0x18, R8 - IMULQ R9, R8 - SHRQ $0x34, R8 - SHLQ $0x18, SI - IMULQ R9, SI - SHRQ $0x34, SI - LEAL -2(CX), R9 - LEAQ 24(SP)(SI*4), R10 - MOVL (R10), SI - MOVL R9, 24(SP)(R8*4) - MOVL CX, (R10) - CMPL (DX)(SI*1), DI + MOVQ $0x000000cf1bbcdcbb, R8 + MOVQ SI, DI + SHRQ $0x10, SI + MOVQ SI, BX + SHLQ $0x18, DI + IMULQ R8, DI + SHRQ $0x34, DI + SHLQ $0x18, BX + IMULQ R8, BX + SHRQ $0x34, BX + LEAL -2(CX), R8 + LEAQ 24(SP)(BX*4), R9 + MOVL (R9), BX + MOVL R8, 24(SP)(DI*4) + MOVL CX, (R9) + CMPL (DX)(BX*1), SI JEQ match_nolit_loop_encodeBlockAsm12B INCL CX JMP search_loop_encodeBlockAsm12B @@ -3732,8 +3704,8 @@ zero_loop_encodeBlockAsm10B: MOVL $0x00000000, 12(SP) MOVQ src_len+32(FP), CX LEAQ -9(CX), DX - LEAQ -8(CX), SI - MOVL SI, 8(SP) + LEAQ -8(CX), BX + MOVL BX, 8(SP) SHRQ $0x05, CX SUBL CX, DX LEAQ (AX)(DX*1), DX @@ -3743,428 +3715,426 @@ zero_loop_encodeBlockAsm10B: MOVQ src_base+24(FP), DX search_loop_encodeBlockAsm10B: - MOVL CX, SI - SUBL 12(SP), SI - SHRL $0x05, SI - LEAL 4(CX)(SI*1), SI - CMPL SI, 8(SP) + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x05, BX + LEAL 4(CX)(BX*1), BX + CMPL BX, 8(SP) JGE emit_remainder_encodeBlockAsm10B - MOVQ (DX)(CX*1), DI - MOVL SI, 20(SP) - MOVQ $0x9e3779b1, R9 - MOVQ DI, R10 - MOVQ DI, R11 - SHRQ $0x08, R11 - SHLQ $0x20, R10 - IMULQ R9, R10 - SHRQ $0x36, R10 - SHLQ $0x20, R11 - IMULQ R9, R11 - SHRQ $0x36, R11 - MOVL 24(SP)(R10*4), SI - MOVL 24(SP)(R11*4), R8 - MOVL CX, 24(SP)(R10*4) - LEAL 1(CX), R10 - MOVL R10, 24(SP)(R11*4) - MOVQ DI, R10 - SHRQ $0x10, R10 + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x9e3779b1, R8 + MOVQ SI, R9 + MOVQ SI, R10 + SHRQ $0x08, R10 + SHLQ $0x20, R9 + IMULQ R8, R9 + SHRQ $0x36, R9 SHLQ $0x20, R10 - IMULQ R9, R10 + IMULQ R8, R10 SHRQ $0x36, R10 - MOVL CX, R9 - SUBL 16(SP), R9 - MOVL 1(DX)(R9*1), R11 - MOVQ DI, R9 - SHRQ $0x08, R9 - CMPL R9, R11 + MOVL 24(SP)(R9*4), BX + MOVL 24(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + LEAL 1(CX), R9 + MOVL R9, 24(SP)(R10*4) + MOVQ SI, R9 + SHRQ $0x10, R9 + SHLQ $0x20, R9 + IMULQ R8, R9 + SHRQ $0x36, R9 + MOVL CX, R8 + SUBL 16(SP), R8 + MOVL 1(DX)(R8*1), R10 + MOVQ SI, R8 + SHRQ $0x08, R8 + CMPL R8, R10 JNE no_repeat_found_encodeBlockAsm10B - LEAL 1(CX), DI - MOVL 12(SP), R8 - MOVL DI, SI - SUBL 16(SP), SI + LEAL 1(CX), SI + MOVL 12(SP), DI + MOVL SI, BX + SUBL 16(SP), BX JZ repeat_extend_back_end_encodeBlockAsm10B repeat_extend_back_loop_encodeBlockAsm10B: - CMPL DI, R8 + CMPL SI, DI JLE repeat_extend_back_end_encodeBlockAsm10B - MOVB -1(DX)(SI*1), BL - MOVB -1(DX)(DI*1), R9 - CMPB BL, R9 + MOVB -1(DX)(BX*1), R8 + MOVB -1(DX)(SI*1), R9 + CMPB R8, R9 JNE repeat_extend_back_end_encodeBlockAsm10B - LEAL -1(DI), DI - DECL SI + LEAL -1(SI), SI + DECL BX JNZ repeat_extend_back_loop_encodeBlockAsm10B repeat_extend_back_end_encodeBlockAsm10B: - MOVL 12(SP), SI - CMPL SI, DI + MOVL 12(SP), BX + CMPL BX, SI JEQ emit_literal_done_repeat_emit_encodeBlockAsm10B - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (DX)(SI*1), R10 - SUBL SI, R9 - LEAL -1(R9), SI - CMPL SI, $0x3c + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c JLT one_byte_repeat_emit_encodeBlockAsm10B - CMPL SI, $0x00000100 + CMPL BX, $0x00000100 JLT two_bytes_repeat_emit_encodeBlockAsm10B MOVB $0xf4, (AX) - MOVW SI, 1(AX) + MOVW BX, 1(AX) ADDQ $0x03, AX JMP memmove_long_repeat_emit_encodeBlockAsm10B two_bytes_repeat_emit_encodeBlockAsm10B: MOVB $0xf0, (AX) - MOVB SI, 1(AX) + MOVB BL, 1(AX) ADDQ $0x02, AX - CMPL SI, $0x40 + CMPL BX, $0x40 JL memmove_repeat_emit_encodeBlockAsm10B JMP memmove_long_repeat_emit_encodeBlockAsm10B one_byte_repeat_emit_encodeBlockAsm10B: - SHLB $0x02, SI - MOVB SI, (AX) + SHLB $0x02, BL + MOVB BL, (AX) ADDQ $0x01, AX memmove_repeat_emit_encodeBlockAsm10B: - LEAQ (AX)(R9*1), SI + LEAQ (AX)(R8*1), BX // genMemMoveShort - CMPQ R9, $0x08 + CMPQ R8, $0x08 JLE emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8 - CMPQ R9, $0x10 + CMPQ R8, $0x10 JBE emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8through16 - CMPQ R9, $0x20 + CMPQ R8, $0x20 JBE emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_17through32 JMP emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_33through64 emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8: - MOVQ (R10), R11 - MOVQ R11, (AX) + MOVQ (R9), R10 + MOVQ R10, (AX) JMP memmove_end_copy_repeat_emit_encodeBlockAsm10B emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8through16: - MOVQ (R10), R11 - MOVQ -8(R10)(R9*1), R10 - MOVQ R11, (AX) - MOVQ R10, -8(AX)(R9*1) + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) JMP memmove_end_copy_repeat_emit_encodeBlockAsm10B emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_17through32: - MOVOU (R10), X0 - MOVOU -16(R10)(R9*1), X1 + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 MOVOU X0, (AX) - MOVOU X1, -16(AX)(R9*1) + MOVOU X1, -16(AX)(R8*1) JMP memmove_end_copy_repeat_emit_encodeBlockAsm10B emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_33through64: - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) memmove_end_copy_repeat_emit_encodeBlockAsm10B: - MOVQ SI, AX + MOVQ BX, AX JMP emit_literal_done_repeat_emit_encodeBlockAsm10B memmove_long_repeat_emit_encodeBlockAsm10B: - LEAQ (AX)(R9*1), SI + LEAQ (AX)(R8*1), BX // genMemMoveLong - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVQ R9, R12 - SHRQ $0x05, R12 - MOVQ AX, R11 - ANDL $0x0000001f, R11 - MOVQ $0x00000040, R13 - SUBQ R11, R13 - DECQ R12 + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R11 + SHRQ $0x05, R11 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_forward_sse_loop_32 - LEAQ -32(R10)(R13*1), R11 - LEAQ -32(AX)(R13*1), R14 + LEAQ -32(R9)(R12*1), R10 + LEAQ -32(AX)(R12*1), R13 emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_big_loop_back: - MOVOU (R11), X4 - MOVOU 16(R11), X5 - MOVOA X4, (R14) - MOVOA X5, 16(R14) - ADDQ $0x20, R14 - ADDQ $0x20, R11 + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) ADDQ $0x20, R13 - DECQ R12 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_big_loop_back emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_forward_sse_loop_32: - MOVOU -32(R10)(R13*1), X4 - MOVOU -16(R10)(R13*1), X5 - MOVOA X4, -32(AX)(R13*1) - MOVOA X5, -16(AX)(R13*1) - ADDQ $0x20, R13 - CMPQ R9, R13 + MOVOU -32(R9)(R12*1), X4 + MOVOU -16(R9)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ R8, R12 JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_forward_sse_loop_32 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) - MOVQ SI, AX + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX emit_literal_done_repeat_emit_encodeBlockAsm10B: ADDL $0x05, CX - MOVL CX, SI - SUBL 16(SP), SI - MOVQ src_len+32(FP), R9 - SUBL CX, R9 - LEAQ (DX)(CX*1), R10 - LEAQ (DX)(SI*1), SI + MOVL CX, BX + SUBL 16(SP), BX + MOVQ src_len+32(FP), R8 + SUBL CX, R8 + LEAQ (DX)(CX*1), R9 + LEAQ (DX)(BX*1), BX // matchLen - XORL R12, R12 - CMPL R9, $0x08 + XORL R11, R11 + CMPL R8, $0x08 JL matchlen_match4_repeat_extend_encodeBlockAsm10B matchlen_loopback_repeat_extend_encodeBlockAsm10B: - MOVQ (R10)(R12*1), R11 - XORQ (SI)(R12*1), R11 - TESTQ R11, R11 + MOVQ (R9)(R11*1), R10 + XORQ (BX)(R11*1), R10 + TESTQ R10, R10 JZ matchlen_loop_repeat_extend_encodeBlockAsm10B #ifdef GOAMD64_v3 - TZCNTQ R11, R11 + TZCNTQ R10, R10 #else - BSFQ R11, R11 + BSFQ R10, R10 #endif - SARQ $0x03, R11 - LEAL (R12)(R11*1), R12 + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 JMP repeat_extend_forward_end_encodeBlockAsm10B matchlen_loop_repeat_extend_encodeBlockAsm10B: - LEAL -8(R9), R9 - LEAL 8(R12), R12 - CMPL R9, $0x08 + LEAL -8(R8), R8 + LEAL 8(R11), R11 + CMPL R8, $0x08 JGE matchlen_loopback_repeat_extend_encodeBlockAsm10B JZ repeat_extend_forward_end_encodeBlockAsm10B matchlen_match4_repeat_extend_encodeBlockAsm10B: - CMPL R9, $0x04 + CMPL R8, $0x04 JL matchlen_match2_repeat_extend_encodeBlockAsm10B - MOVL (R10)(R12*1), R11 - CMPL (SI)(R12*1), R11 + MOVL (R9)(R11*1), R10 + CMPL (BX)(R11*1), R10 JNE matchlen_match2_repeat_extend_encodeBlockAsm10B - SUBL $0x04, R9 - LEAL 4(R12), R12 + SUBL $0x04, R8 + LEAL 4(R11), R11 matchlen_match2_repeat_extend_encodeBlockAsm10B: - CMPL R9, $0x02 + CMPL R8, $0x02 JL matchlen_match1_repeat_extend_encodeBlockAsm10B - MOVW (R10)(R12*1), R11 - CMPW (SI)(R12*1), R11 + MOVW (R9)(R11*1), R10 + CMPW (BX)(R11*1), R10 JNE matchlen_match1_repeat_extend_encodeBlockAsm10B - SUBL $0x02, R9 - LEAL 2(R12), R12 + SUBL $0x02, R8 + LEAL 2(R11), R11 matchlen_match1_repeat_extend_encodeBlockAsm10B: - CMPL R9, $0x01 + CMPL R8, $0x01 JL repeat_extend_forward_end_encodeBlockAsm10B - MOVB (R10)(R12*1), R11 - CMPB (SI)(R12*1), R11 + MOVB (R9)(R11*1), R10 + CMPB (BX)(R11*1), R10 JNE repeat_extend_forward_end_encodeBlockAsm10B - LEAL 1(R12), R12 + LEAL 1(R11), R11 repeat_extend_forward_end_encodeBlockAsm10B: - ADDL R12, CX - MOVL CX, SI - SUBL DI, SI - MOVL 16(SP), DI - TESTL R8, R8 + ADDL R11, CX + MOVL CX, BX + SUBL SI, BX + MOVL 16(SP), SI + TESTL DI, DI JZ repeat_as_copy_encodeBlockAsm10B // emitRepeat - MOVL SI, R8 - LEAL -4(SI), SI - CMPL R8, $0x08 + MOVL BX, DI + LEAL -4(BX), BX + CMPL DI, $0x08 JLE repeat_two_match_repeat_encodeBlockAsm10B - CMPL R8, $0x0c + CMPL DI, $0x0c JGE cant_repeat_two_offset_match_repeat_encodeBlockAsm10B - CMPL DI, $0x00000800 + CMPL SI, $0x00000800 JLT repeat_two_offset_match_repeat_encodeBlockAsm10B cant_repeat_two_offset_match_repeat_encodeBlockAsm10B: - CMPL SI, $0x00000104 + CMPL BX, $0x00000104 JLT repeat_three_match_repeat_encodeBlockAsm10B - LEAL -256(SI), SI + LEAL -256(BX), BX MOVW $0x0019, (AX) - MOVW SI, 2(AX) + MOVW BX, 2(AX) ADDQ $0x04, AX JMP repeat_end_emit_encodeBlockAsm10B repeat_three_match_repeat_encodeBlockAsm10B: - LEAL -4(SI), SI + LEAL -4(BX), BX MOVW $0x0015, (AX) - MOVB SI, 2(AX) + MOVB BL, 2(AX) ADDQ $0x03, AX JMP repeat_end_emit_encodeBlockAsm10B repeat_two_match_repeat_encodeBlockAsm10B: - SHLL $0x02, SI - ORL $0x01, SI - MOVW SI, (AX) + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) ADDQ $0x02, AX JMP repeat_end_emit_encodeBlockAsm10B repeat_two_offset_match_repeat_encodeBlockAsm10B: - XORQ R8, R8 - LEAL 1(R8)(SI*4), SI - MOVB DI, 1(AX) - SARL $0x08, DI - SHLL $0x05, DI - ORL DI, SI - MOVB SI, (AX) + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) ADDQ $0x02, AX JMP repeat_end_emit_encodeBlockAsm10B repeat_as_copy_encodeBlockAsm10B: // emitCopy -two_byte_offset_repeat_as_copy_encodeBlockAsm10B: - CMPL SI, $0x40 + CMPL BX, $0x40 JLE two_byte_offset_short_repeat_as_copy_encodeBlockAsm10B - CMPL DI, $0x00000800 + CMPL SI, $0x00000800 JAE long_offset_short_repeat_as_copy_encodeBlockAsm10B - MOVL $0x00000001, R8 - LEAL 16(R8), R8 - MOVB DI, 1(AX) - SHRL $0x08, DI - SHLL $0x05, DI - ORL DI, R8 - MOVB R8, (AX) + MOVL $0x00000001, DI + LEAL 16(DI), DI + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (AX) ADDQ $0x02, AX - SUBL $0x08, SI + SUBL $0x08, BX // emitRepeat - LEAL -4(SI), SI + LEAL -4(BX), BX JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b - MOVL SI, R8 - LEAL -4(SI), SI - CMPL R8, $0x08 + MOVL BX, DI + LEAL -4(BX), BX + CMPL DI, $0x08 JLE repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b - CMPL R8, $0x0c + CMPL DI, $0x0c JGE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b - CMPL DI, $0x00000800 + CMPL SI, $0x00000800 JLT repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b: - CMPL SI, $0x00000104 + CMPL BX, $0x00000104 JLT repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b - LEAL -256(SI), SI + LEAL -256(BX), BX MOVW $0x0019, (AX) - MOVW SI, 2(AX) + MOVW BX, 2(AX) ADDQ $0x04, AX JMP repeat_end_emit_encodeBlockAsm10B repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b: - LEAL -4(SI), SI + LEAL -4(BX), BX MOVW $0x0015, (AX) - MOVB SI, 2(AX) + MOVB BL, 2(AX) ADDQ $0x03, AX JMP repeat_end_emit_encodeBlockAsm10B repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b: - SHLL $0x02, SI - ORL $0x01, SI - MOVW SI, (AX) + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) ADDQ $0x02, AX JMP repeat_end_emit_encodeBlockAsm10B repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b: - XORQ R8, R8 - LEAL 1(R8)(SI*4), SI - MOVB DI, 1(AX) - SARL $0x08, DI - SHLL $0x05, DI - ORL DI, SI - MOVB SI, (AX) + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) ADDQ $0x02, AX JMP repeat_end_emit_encodeBlockAsm10B long_offset_short_repeat_as_copy_encodeBlockAsm10B: MOVB $0xee, (AX) - MOVW DI, 1(AX) - LEAL -60(SI), SI + MOVW SI, 1(AX) + LEAL -60(BX), BX ADDQ $0x03, AX // emitRepeat - MOVL SI, R8 - LEAL -4(SI), SI - CMPL R8, $0x08 + MOVL BX, DI + LEAL -4(BX), BX + CMPL DI, $0x08 JLE repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short - CMPL R8, $0x0c + CMPL DI, $0x0c JGE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short - CMPL DI, $0x00000800 + CMPL SI, $0x00000800 JLT repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short: - CMPL SI, $0x00000104 + CMPL BX, $0x00000104 JLT repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short - LEAL -256(SI), SI + LEAL -256(BX), BX MOVW $0x0019, (AX) - MOVW SI, 2(AX) + MOVW BX, 2(AX) ADDQ $0x04, AX JMP repeat_end_emit_encodeBlockAsm10B repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short: - LEAL -4(SI), SI + LEAL -4(BX), BX MOVW $0x0015, (AX) - MOVB SI, 2(AX) + MOVB BL, 2(AX) ADDQ $0x03, AX JMP repeat_end_emit_encodeBlockAsm10B repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short: - SHLL $0x02, SI - ORL $0x01, SI - MOVW SI, (AX) + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) ADDQ $0x02, AX JMP repeat_end_emit_encodeBlockAsm10B repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short: - XORQ R8, R8 - LEAL 1(R8)(SI*4), SI - MOVB DI, 1(AX) - SARL $0x08, DI - SHLL $0x05, DI - ORL DI, SI - MOVB SI, (AX) + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) ADDQ $0x02, AX JMP repeat_end_emit_encodeBlockAsm10B - JMP two_byte_offset_repeat_as_copy_encodeBlockAsm10B two_byte_offset_short_repeat_as_copy_encodeBlockAsm10B: - CMPL SI, $0x0c + MOVL BX, DI + SHLL $0x02, DI + CMPL BX, $0x0c JGE emit_copy_three_repeat_as_copy_encodeBlockAsm10B - CMPL DI, $0x00000800 + CMPL SI, $0x00000800 JGE emit_copy_three_repeat_as_copy_encodeBlockAsm10B - MOVB $0x01, BL - LEAL -16(BX)(SI*4), SI - MOVB DI, 1(AX) - SHRL $0x08, DI - SHLL $0x05, DI - ORL DI, SI - MOVB SI, (AX) + LEAL -15(DI), DI + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (AX) ADDQ $0x02, AX JMP repeat_end_emit_encodeBlockAsm10B emit_copy_three_repeat_as_copy_encodeBlockAsm10B: - MOVB $0x02, BL - LEAL -4(BX)(SI*4), SI - MOVB SI, (AX) - MOVW DI, 1(AX) + LEAL -2(DI), DI + MOVB DI, (AX) + MOVW SI, 1(AX) ADDQ $0x03, AX repeat_end_emit_encodeBlockAsm10B: @@ -4172,16 +4142,16 @@ repeat_end_emit_encodeBlockAsm10B: JMP search_loop_encodeBlockAsm10B no_repeat_found_encodeBlockAsm10B: - CMPL (DX)(SI*1), DI + CMPL (DX)(BX*1), SI JEQ candidate_match_encodeBlockAsm10B - SHRQ $0x08, DI - MOVL 24(SP)(R10*4), SI - LEAL 2(CX), R9 - CMPL (DX)(R8*1), DI + SHRQ $0x08, SI + MOVL 24(SP)(R9*4), BX + LEAL 2(CX), R8 + CMPL (DX)(DI*1), SI JEQ candidate2_match_encodeBlockAsm10B - MOVL R9, 24(SP)(R10*4) - SHRQ $0x08, DI - CMPL (DX)(SI*1), DI + MOVL R8, 24(SP)(R9*4) + SHRQ $0x08, SI + CMPL (DX)(BX*1), SI JEQ candidate3_match_encodeBlockAsm10B MOVL 20(SP), CX JMP search_loop_encodeBlockAsm10B @@ -4191,391 +4161,389 @@ candidate3_match_encodeBlockAsm10B: JMP candidate_match_encodeBlockAsm10B candidate2_match_encodeBlockAsm10B: - MOVL R9, 24(SP)(R10*4) + MOVL R8, 24(SP)(R9*4) INCL CX - MOVL R8, SI + MOVL DI, BX candidate_match_encodeBlockAsm10B: - MOVL 12(SP), DI - TESTL SI, SI + MOVL 12(SP), SI + TESTL BX, BX JZ match_extend_back_end_encodeBlockAsm10B match_extend_back_loop_encodeBlockAsm10B: - CMPL CX, DI + CMPL CX, SI JLE match_extend_back_end_encodeBlockAsm10B - MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(BX*1), DI MOVB -1(DX)(CX*1), R8 - CMPB BL, R8 + CMPB DI, R8 JNE match_extend_back_end_encodeBlockAsm10B LEAL -1(CX), CX - DECL SI + DECL BX JZ match_extend_back_end_encodeBlockAsm10B JMP match_extend_back_loop_encodeBlockAsm10B match_extend_back_end_encodeBlockAsm10B: - MOVL CX, DI - SUBL 12(SP), DI - LEAQ 3(AX)(DI*1), DI - CMPQ DI, (SP) + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 3(AX)(SI*1), SI + CMPQ SI, (SP) JL match_dst_size_check_encodeBlockAsm10B MOVQ $0x00000000, ret+48(FP) RET match_dst_size_check_encodeBlockAsm10B: - MOVL CX, DI - MOVL 12(SP), R8 - CMPL R8, DI + MOVL CX, SI + MOVL 12(SP), DI + CMPL DI, SI JEQ emit_literal_done_match_emit_encodeBlockAsm10B - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (DX)(R8*1), DI - SUBL R8, R9 - LEAL -1(R9), R8 - CMPL R8, $0x3c + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(DI*1), SI + SUBL DI, R8 + LEAL -1(R8), DI + CMPL DI, $0x3c JLT one_byte_match_emit_encodeBlockAsm10B - CMPL R8, $0x00000100 + CMPL DI, $0x00000100 JLT two_bytes_match_emit_encodeBlockAsm10B MOVB $0xf4, (AX) - MOVW R8, 1(AX) + MOVW DI, 1(AX) ADDQ $0x03, AX JMP memmove_long_match_emit_encodeBlockAsm10B two_bytes_match_emit_encodeBlockAsm10B: MOVB $0xf0, (AX) - MOVB R8, 1(AX) + MOVB DI, 1(AX) ADDQ $0x02, AX - CMPL R8, $0x40 + CMPL DI, $0x40 JL memmove_match_emit_encodeBlockAsm10B JMP memmove_long_match_emit_encodeBlockAsm10B one_byte_match_emit_encodeBlockAsm10B: - SHLB $0x02, R8 - MOVB R8, (AX) + SHLB $0x02, DI + MOVB DI, (AX) ADDQ $0x01, AX memmove_match_emit_encodeBlockAsm10B: - LEAQ (AX)(R9*1), R8 + LEAQ (AX)(R8*1), DI // genMemMoveShort - CMPQ R9, $0x08 + CMPQ R8, $0x08 JLE emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8 - CMPQ R9, $0x10 + CMPQ R8, $0x10 JBE emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8through16 - CMPQ R9, $0x20 + CMPQ R8, $0x20 JBE emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_17through32 JMP emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_33through64 emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8: - MOVQ (DI), R10 - MOVQ R10, (AX) + MOVQ (SI), R9 + MOVQ R9, (AX) JMP memmove_end_copy_match_emit_encodeBlockAsm10B emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8through16: - MOVQ (DI), R10 - MOVQ -8(DI)(R9*1), DI - MOVQ R10, (AX) - MOVQ DI, -8(AX)(R9*1) + MOVQ (SI), R9 + MOVQ -8(SI)(R8*1), SI + MOVQ R9, (AX) + MOVQ SI, -8(AX)(R8*1) JMP memmove_end_copy_match_emit_encodeBlockAsm10B emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_17through32: - MOVOU (DI), X0 - MOVOU -16(DI)(R9*1), X1 + MOVOU (SI), X0 + MOVOU -16(SI)(R8*1), X1 MOVOU X0, (AX) - MOVOU X1, -16(AX)(R9*1) + MOVOU X1, -16(AX)(R8*1) JMP memmove_end_copy_match_emit_encodeBlockAsm10B emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_33through64: - MOVOU (DI), X0 - MOVOU 16(DI), X1 - MOVOU -32(DI)(R9*1), X2 - MOVOU -16(DI)(R9*1), X3 + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) memmove_end_copy_match_emit_encodeBlockAsm10B: - MOVQ R8, AX + MOVQ DI, AX JMP emit_literal_done_match_emit_encodeBlockAsm10B memmove_long_match_emit_encodeBlockAsm10B: - LEAQ (AX)(R9*1), R8 + LEAQ (AX)(R8*1), DI // genMemMoveLong - MOVOU (DI), X0 - MOVOU 16(DI), X1 - MOVOU -32(DI)(R9*1), X2 - MOVOU -16(DI)(R9*1), X3 - MOVQ R9, R11 - SHRQ $0x05, R11 - MOVQ AX, R10 - ANDL $0x0000001f, R10 - MOVQ $0x00000040, R12 - SUBQ R10, R12 - DECQ R11 + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVQ R8, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R11 + SUBQ R9, R11 + DECQ R10 JA emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_forward_sse_loop_32 - LEAQ -32(DI)(R12*1), R10 - LEAQ -32(AX)(R12*1), R13 + LEAQ -32(SI)(R11*1), R9 + LEAQ -32(AX)(R11*1), R12 emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_big_loop_back: - MOVOU (R10), X4 - MOVOU 16(R10), X5 - MOVOA X4, (R13) - MOVOA X5, 16(R13) - ADDQ $0x20, R13 - ADDQ $0x20, R10 + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R12) + MOVOA X5, 16(R12) ADDQ $0x20, R12 - DECQ R11 + ADDQ $0x20, R9 + ADDQ $0x20, R11 + DECQ R10 JNA emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_big_loop_back emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_forward_sse_loop_32: - MOVOU -32(DI)(R12*1), X4 - MOVOU -16(DI)(R12*1), X5 - MOVOA X4, -32(AX)(R12*1) - MOVOA X5, -16(AX)(R12*1) - ADDQ $0x20, R12 - CMPQ R9, R12 + MOVOU -32(SI)(R11*1), X4 + MOVOU -16(SI)(R11*1), X5 + MOVOA X4, -32(AX)(R11*1) + MOVOA X5, -16(AX)(R11*1) + ADDQ $0x20, R11 + CMPQ R8, R11 JAE emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_forward_sse_loop_32 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) - MOVQ R8, AX + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ DI, AX emit_literal_done_match_emit_encodeBlockAsm10B: match_nolit_loop_encodeBlockAsm10B: - MOVL CX, DI - SUBL SI, DI - MOVL DI, 16(SP) + MOVL CX, SI + SUBL BX, SI + MOVL SI, 16(SP) ADDL $0x04, CX - ADDL $0x04, SI - MOVQ src_len+32(FP), DI - SUBL CX, DI - LEAQ (DX)(CX*1), R8 - LEAQ (DX)(SI*1), SI + ADDL $0x04, BX + MOVQ src_len+32(FP), SI + SUBL CX, SI + LEAQ (DX)(CX*1), DI + LEAQ (DX)(BX*1), BX // matchLen - XORL R10, R10 - CMPL DI, $0x08 + XORL R9, R9 + CMPL SI, $0x08 JL matchlen_match4_match_nolit_encodeBlockAsm10B matchlen_loopback_match_nolit_encodeBlockAsm10B: - MOVQ (R8)(R10*1), R9 - XORQ (SI)(R10*1), R9 - TESTQ R9, R9 + MOVQ (DI)(R9*1), R8 + XORQ (BX)(R9*1), R8 + TESTQ R8, R8 JZ matchlen_loop_match_nolit_encodeBlockAsm10B #ifdef GOAMD64_v3 - TZCNTQ R9, R9 + TZCNTQ R8, R8 #else - BSFQ R9, R9 + BSFQ R8, R8 #endif - SARQ $0x03, R9 - LEAL (R10)(R9*1), R10 + SARQ $0x03, R8 + LEAL (R9)(R8*1), R9 JMP match_nolit_end_encodeBlockAsm10B matchlen_loop_match_nolit_encodeBlockAsm10B: - LEAL -8(DI), DI - LEAL 8(R10), R10 - CMPL DI, $0x08 + LEAL -8(SI), SI + LEAL 8(R9), R9 + CMPL SI, $0x08 JGE matchlen_loopback_match_nolit_encodeBlockAsm10B JZ match_nolit_end_encodeBlockAsm10B matchlen_match4_match_nolit_encodeBlockAsm10B: - CMPL DI, $0x04 + CMPL SI, $0x04 JL matchlen_match2_match_nolit_encodeBlockAsm10B - MOVL (R8)(R10*1), R9 - CMPL (SI)(R10*1), R9 + MOVL (DI)(R9*1), R8 + CMPL (BX)(R9*1), R8 JNE matchlen_match2_match_nolit_encodeBlockAsm10B - SUBL $0x04, DI - LEAL 4(R10), R10 + SUBL $0x04, SI + LEAL 4(R9), R9 matchlen_match2_match_nolit_encodeBlockAsm10B: - CMPL DI, $0x02 + CMPL SI, $0x02 JL matchlen_match1_match_nolit_encodeBlockAsm10B - MOVW (R8)(R10*1), R9 - CMPW (SI)(R10*1), R9 + MOVW (DI)(R9*1), R8 + CMPW (BX)(R9*1), R8 JNE matchlen_match1_match_nolit_encodeBlockAsm10B - SUBL $0x02, DI - LEAL 2(R10), R10 + SUBL $0x02, SI + LEAL 2(R9), R9 matchlen_match1_match_nolit_encodeBlockAsm10B: - CMPL DI, $0x01 + CMPL SI, $0x01 JL match_nolit_end_encodeBlockAsm10B - MOVB (R8)(R10*1), R9 - CMPB (SI)(R10*1), R9 + MOVB (DI)(R9*1), R8 + CMPB (BX)(R9*1), R8 JNE match_nolit_end_encodeBlockAsm10B - LEAL 1(R10), R10 + LEAL 1(R9), R9 match_nolit_end_encodeBlockAsm10B: - ADDL R10, CX - MOVL 16(SP), SI - ADDL $0x04, R10 + ADDL R9, CX + MOVL 16(SP), BX + ADDL $0x04, R9 MOVL CX, 12(SP) // emitCopy -two_byte_offset_match_nolit_encodeBlockAsm10B: - CMPL R10, $0x40 + CMPL R9, $0x40 JLE two_byte_offset_short_match_nolit_encodeBlockAsm10B - CMPL SI, $0x00000800 + CMPL BX, $0x00000800 JAE long_offset_short_match_nolit_encodeBlockAsm10B - MOVL $0x00000001, DI - LEAL 16(DI), DI - MOVB SI, 1(AX) - SHRL $0x08, SI - SHLL $0x05, SI - ORL SI, DI - MOVB DI, (AX) + MOVL $0x00000001, SI + LEAL 16(SI), SI + MOVB BL, 1(AX) + SHRL $0x08, BX + SHLL $0x05, BX + ORL BX, SI + MOVB SI, (AX) ADDQ $0x02, AX - SUBL $0x08, R10 + SUBL $0x08, R9 // emitRepeat - LEAL -4(R10), R10 + LEAL -4(R9), R9 JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b - MOVL R10, DI - LEAL -4(R10), R10 - CMPL DI, $0x08 + MOVL R9, SI + LEAL -4(R9), R9 + CMPL SI, $0x08 JLE repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short_2b - CMPL DI, $0x0c + CMPL SI, $0x0c JGE cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b - CMPL SI, $0x00000800 + CMPL BX, $0x00000800 JLT repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b: - CMPL R10, $0x00000104 + CMPL R9, $0x00000104 JLT repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short_2b - LEAL -256(R10), R10 + LEAL -256(R9), R9 MOVW $0x0019, (AX) - MOVW R10, 2(AX) + MOVW R9, 2(AX) ADDQ $0x04, AX JMP match_nolit_emitcopy_end_encodeBlockAsm10B repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short_2b: - LEAL -4(R10), R10 + LEAL -4(R9), R9 MOVW $0x0015, (AX) - MOVB R10, 2(AX) + MOVB R9, 2(AX) ADDQ $0x03, AX JMP match_nolit_emitcopy_end_encodeBlockAsm10B repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short_2b: - SHLL $0x02, R10 - ORL $0x01, R10 - MOVW R10, (AX) + SHLL $0x02, R9 + ORL $0x01, R9 + MOVW R9, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBlockAsm10B repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b: - XORQ DI, DI - LEAL 1(DI)(R10*4), R10 - MOVB SI, 1(AX) - SARL $0x08, SI - SHLL $0x05, SI - ORL SI, R10 - MOVB R10, (AX) + XORQ SI, SI + LEAL 1(SI)(R9*4), R9 + MOVB BL, 1(AX) + SARL $0x08, BX + SHLL $0x05, BX + ORL BX, R9 + MOVB R9, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBlockAsm10B long_offset_short_match_nolit_encodeBlockAsm10B: MOVB $0xee, (AX) - MOVW SI, 1(AX) - LEAL -60(R10), R10 + MOVW BX, 1(AX) + LEAL -60(R9), R9 ADDQ $0x03, AX // emitRepeat - MOVL R10, DI - LEAL -4(R10), R10 - CMPL DI, $0x08 + MOVL R9, SI + LEAL -4(R9), R9 + CMPL SI, $0x08 JLE repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short - CMPL DI, $0x0c + CMPL SI, $0x0c JGE cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short - CMPL SI, $0x00000800 + CMPL BX, $0x00000800 JLT repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short: - CMPL R10, $0x00000104 + CMPL R9, $0x00000104 JLT repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short - LEAL -256(R10), R10 + LEAL -256(R9), R9 MOVW $0x0019, (AX) - MOVW R10, 2(AX) + MOVW R9, 2(AX) ADDQ $0x04, AX JMP match_nolit_emitcopy_end_encodeBlockAsm10B repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short: - LEAL -4(R10), R10 + LEAL -4(R9), R9 MOVW $0x0015, (AX) - MOVB R10, 2(AX) + MOVB R9, 2(AX) ADDQ $0x03, AX JMP match_nolit_emitcopy_end_encodeBlockAsm10B repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short: - SHLL $0x02, R10 - ORL $0x01, R10 - MOVW R10, (AX) + SHLL $0x02, R9 + ORL $0x01, R9 + MOVW R9, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBlockAsm10B repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short: - XORQ DI, DI - LEAL 1(DI)(R10*4), R10 - MOVB SI, 1(AX) - SARL $0x08, SI - SHLL $0x05, SI - ORL SI, R10 - MOVB R10, (AX) + XORQ SI, SI + LEAL 1(SI)(R9*4), R9 + MOVB BL, 1(AX) + SARL $0x08, BX + SHLL $0x05, BX + ORL BX, R9 + MOVB R9, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBlockAsm10B - JMP two_byte_offset_match_nolit_encodeBlockAsm10B two_byte_offset_short_match_nolit_encodeBlockAsm10B: - CMPL R10, $0x0c + MOVL R9, SI + SHLL $0x02, SI + CMPL R9, $0x0c JGE emit_copy_three_match_nolit_encodeBlockAsm10B - CMPL SI, $0x00000800 + CMPL BX, $0x00000800 JGE emit_copy_three_match_nolit_encodeBlockAsm10B - MOVB $0x01, BL - LEAL -16(BX)(R10*4), R10 - MOVB SI, 1(AX) - SHRL $0x08, SI - SHLL $0x05, SI - ORL SI, R10 - MOVB R10, (AX) + LEAL -15(SI), SI + MOVB BL, 1(AX) + SHRL $0x08, BX + SHLL $0x05, BX + ORL BX, SI + MOVB SI, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBlockAsm10B emit_copy_three_match_nolit_encodeBlockAsm10B: - MOVB $0x02, BL - LEAL -4(BX)(R10*4), R10 - MOVB R10, (AX) - MOVW SI, 1(AX) + LEAL -2(SI), SI + MOVB SI, (AX) + MOVW BX, 1(AX) ADDQ $0x03, AX match_nolit_emitcopy_end_encodeBlockAsm10B: CMPL CX, 8(SP) JGE emit_remainder_encodeBlockAsm10B - MOVQ -2(DX)(CX*1), DI + MOVQ -2(DX)(CX*1), SI CMPQ AX, (SP) JL match_nolit_dst_ok_encodeBlockAsm10B MOVQ $0x00000000, ret+48(FP) RET match_nolit_dst_ok_encodeBlockAsm10B: - MOVQ $0x9e3779b1, R9 - MOVQ DI, R8 - SHRQ $0x10, DI - MOVQ DI, SI - SHLQ $0x20, R8 - IMULQ R9, R8 - SHRQ $0x36, R8 - SHLQ $0x20, SI - IMULQ R9, SI - SHRQ $0x36, SI - LEAL -2(CX), R9 - LEAQ 24(SP)(SI*4), R10 - MOVL (R10), SI - MOVL R9, 24(SP)(R8*4) - MOVL CX, (R10) - CMPL (DX)(SI*1), DI + MOVQ $0x9e3779b1, R8 + MOVQ SI, DI + SHRQ $0x10, SI + MOVQ SI, BX + SHLQ $0x20, DI + IMULQ R8, DI + SHRQ $0x36, DI + SHLQ $0x20, BX + IMULQ R8, BX + SHRQ $0x36, BX + LEAL -2(CX), R8 + LEAQ 24(SP)(BX*4), R9 + MOVL (R9), BX + MOVL R8, 24(SP)(DI*4) + MOVL CX, (R9) + CMPL (DX)(BX*1), SI JEQ match_nolit_loop_encodeBlockAsm10B INCL CX JMP search_loop_encodeBlockAsm10B @@ -4760,8 +4728,8 @@ zero_loop_encodeBlockAsm8B: MOVL $0x00000000, 12(SP) MOVQ src_len+32(FP), CX LEAQ -9(CX), DX - LEAQ -8(CX), SI - MOVL SI, 8(SP) + LEAQ -8(CX), BX + MOVL BX, 8(SP) SHRQ $0x05, CX SUBL CX, DX LEAQ (AX)(DX*1), DX @@ -4771,414 +4739,412 @@ zero_loop_encodeBlockAsm8B: MOVQ src_base+24(FP), DX search_loop_encodeBlockAsm8B: - MOVL CX, SI - SUBL 12(SP), SI - SHRL $0x04, SI - LEAL 4(CX)(SI*1), SI - CMPL SI, 8(SP) + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x04, BX + LEAL 4(CX)(BX*1), BX + CMPL BX, 8(SP) JGE emit_remainder_encodeBlockAsm8B - MOVQ (DX)(CX*1), DI - MOVL SI, 20(SP) - MOVQ $0x9e3779b1, R9 - MOVQ DI, R10 - MOVQ DI, R11 - SHRQ $0x08, R11 - SHLQ $0x20, R10 - IMULQ R9, R10 - SHRQ $0x38, R10 - SHLQ $0x20, R11 - IMULQ R9, R11 - SHRQ $0x38, R11 - MOVL 24(SP)(R10*4), SI - MOVL 24(SP)(R11*4), R8 - MOVL CX, 24(SP)(R10*4) - LEAL 1(CX), R10 - MOVL R10, 24(SP)(R11*4) - MOVQ DI, R10 - SHRQ $0x10, R10 + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x9e3779b1, R8 + MOVQ SI, R9 + MOVQ SI, R10 + SHRQ $0x08, R10 + SHLQ $0x20, R9 + IMULQ R8, R9 + SHRQ $0x38, R9 SHLQ $0x20, R10 - IMULQ R9, R10 + IMULQ R8, R10 SHRQ $0x38, R10 - MOVL CX, R9 - SUBL 16(SP), R9 - MOVL 1(DX)(R9*1), R11 - MOVQ DI, R9 - SHRQ $0x08, R9 - CMPL R9, R11 + MOVL 24(SP)(R9*4), BX + MOVL 24(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + LEAL 1(CX), R9 + MOVL R9, 24(SP)(R10*4) + MOVQ SI, R9 + SHRQ $0x10, R9 + SHLQ $0x20, R9 + IMULQ R8, R9 + SHRQ $0x38, R9 + MOVL CX, R8 + SUBL 16(SP), R8 + MOVL 1(DX)(R8*1), R10 + MOVQ SI, R8 + SHRQ $0x08, R8 + CMPL R8, R10 JNE no_repeat_found_encodeBlockAsm8B - LEAL 1(CX), DI - MOVL 12(SP), R8 - MOVL DI, SI - SUBL 16(SP), SI + LEAL 1(CX), SI + MOVL 12(SP), DI + MOVL SI, BX + SUBL 16(SP), BX JZ repeat_extend_back_end_encodeBlockAsm8B repeat_extend_back_loop_encodeBlockAsm8B: - CMPL DI, R8 + CMPL SI, DI JLE repeat_extend_back_end_encodeBlockAsm8B - MOVB -1(DX)(SI*1), BL - MOVB -1(DX)(DI*1), R9 - CMPB BL, R9 + MOVB -1(DX)(BX*1), R8 + MOVB -1(DX)(SI*1), R9 + CMPB R8, R9 JNE repeat_extend_back_end_encodeBlockAsm8B - LEAL -1(DI), DI - DECL SI + LEAL -1(SI), SI + DECL BX JNZ repeat_extend_back_loop_encodeBlockAsm8B repeat_extend_back_end_encodeBlockAsm8B: - MOVL 12(SP), SI - CMPL SI, DI + MOVL 12(SP), BX + CMPL BX, SI JEQ emit_literal_done_repeat_emit_encodeBlockAsm8B - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (DX)(SI*1), R10 - SUBL SI, R9 - LEAL -1(R9), SI - CMPL SI, $0x3c + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c JLT one_byte_repeat_emit_encodeBlockAsm8B - CMPL SI, $0x00000100 + CMPL BX, $0x00000100 JLT two_bytes_repeat_emit_encodeBlockAsm8B MOVB $0xf4, (AX) - MOVW SI, 1(AX) + MOVW BX, 1(AX) ADDQ $0x03, AX JMP memmove_long_repeat_emit_encodeBlockAsm8B two_bytes_repeat_emit_encodeBlockAsm8B: MOVB $0xf0, (AX) - MOVB SI, 1(AX) + MOVB BL, 1(AX) ADDQ $0x02, AX - CMPL SI, $0x40 + CMPL BX, $0x40 JL memmove_repeat_emit_encodeBlockAsm8B JMP memmove_long_repeat_emit_encodeBlockAsm8B one_byte_repeat_emit_encodeBlockAsm8B: - SHLB $0x02, SI - MOVB SI, (AX) + SHLB $0x02, BL + MOVB BL, (AX) ADDQ $0x01, AX memmove_repeat_emit_encodeBlockAsm8B: - LEAQ (AX)(R9*1), SI + LEAQ (AX)(R8*1), BX // genMemMoveShort - CMPQ R9, $0x08 + CMPQ R8, $0x08 JLE emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8 - CMPQ R9, $0x10 + CMPQ R8, $0x10 JBE emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8through16 - CMPQ R9, $0x20 + CMPQ R8, $0x20 JBE emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_17through32 JMP emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_33through64 emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8: - MOVQ (R10), R11 - MOVQ R11, (AX) + MOVQ (R9), R10 + MOVQ R10, (AX) JMP memmove_end_copy_repeat_emit_encodeBlockAsm8B emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8through16: - MOVQ (R10), R11 - MOVQ -8(R10)(R9*1), R10 - MOVQ R11, (AX) - MOVQ R10, -8(AX)(R9*1) + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) JMP memmove_end_copy_repeat_emit_encodeBlockAsm8B emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_17through32: - MOVOU (R10), X0 - MOVOU -16(R10)(R9*1), X1 + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 MOVOU X0, (AX) - MOVOU X1, -16(AX)(R9*1) + MOVOU X1, -16(AX)(R8*1) JMP memmove_end_copy_repeat_emit_encodeBlockAsm8B emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_33through64: - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) memmove_end_copy_repeat_emit_encodeBlockAsm8B: - MOVQ SI, AX + MOVQ BX, AX JMP emit_literal_done_repeat_emit_encodeBlockAsm8B memmove_long_repeat_emit_encodeBlockAsm8B: - LEAQ (AX)(R9*1), SI + LEAQ (AX)(R8*1), BX // genMemMoveLong - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVQ R9, R12 - SHRQ $0x05, R12 - MOVQ AX, R11 - ANDL $0x0000001f, R11 - MOVQ $0x00000040, R13 - SUBQ R11, R13 - DECQ R12 + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R11 + SHRQ $0x05, R11 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_forward_sse_loop_32 - LEAQ -32(R10)(R13*1), R11 - LEAQ -32(AX)(R13*1), R14 + LEAQ -32(R9)(R12*1), R10 + LEAQ -32(AX)(R12*1), R13 emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_big_loop_back: - MOVOU (R11), X4 - MOVOU 16(R11), X5 - MOVOA X4, (R14) - MOVOA X5, 16(R14) - ADDQ $0x20, R14 - ADDQ $0x20, R11 + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) ADDQ $0x20, R13 - DECQ R12 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_big_loop_back emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_forward_sse_loop_32: - MOVOU -32(R10)(R13*1), X4 - MOVOU -16(R10)(R13*1), X5 - MOVOA X4, -32(AX)(R13*1) - MOVOA X5, -16(AX)(R13*1) - ADDQ $0x20, R13 - CMPQ R9, R13 - JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_forward_sse_loop_32 - MOVOU X0, (AX) + MOVOU -32(R9)(R12*1), X4 + MOVOU -16(R9)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ R8, R12 + JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) - MOVQ SI, AX + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX emit_literal_done_repeat_emit_encodeBlockAsm8B: ADDL $0x05, CX - MOVL CX, SI - SUBL 16(SP), SI - MOVQ src_len+32(FP), R9 - SUBL CX, R9 - LEAQ (DX)(CX*1), R10 - LEAQ (DX)(SI*1), SI + MOVL CX, BX + SUBL 16(SP), BX + MOVQ src_len+32(FP), R8 + SUBL CX, R8 + LEAQ (DX)(CX*1), R9 + LEAQ (DX)(BX*1), BX // matchLen - XORL R12, R12 - CMPL R9, $0x08 + XORL R11, R11 + CMPL R8, $0x08 JL matchlen_match4_repeat_extend_encodeBlockAsm8B matchlen_loopback_repeat_extend_encodeBlockAsm8B: - MOVQ (R10)(R12*1), R11 - XORQ (SI)(R12*1), R11 - TESTQ R11, R11 + MOVQ (R9)(R11*1), R10 + XORQ (BX)(R11*1), R10 + TESTQ R10, R10 JZ matchlen_loop_repeat_extend_encodeBlockAsm8B #ifdef GOAMD64_v3 - TZCNTQ R11, R11 + TZCNTQ R10, R10 #else - BSFQ R11, R11 + BSFQ R10, R10 #endif - SARQ $0x03, R11 - LEAL (R12)(R11*1), R12 + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 JMP repeat_extend_forward_end_encodeBlockAsm8B matchlen_loop_repeat_extend_encodeBlockAsm8B: - LEAL -8(R9), R9 - LEAL 8(R12), R12 - CMPL R9, $0x08 + LEAL -8(R8), R8 + LEAL 8(R11), R11 + CMPL R8, $0x08 JGE matchlen_loopback_repeat_extend_encodeBlockAsm8B JZ repeat_extend_forward_end_encodeBlockAsm8B matchlen_match4_repeat_extend_encodeBlockAsm8B: - CMPL R9, $0x04 + CMPL R8, $0x04 JL matchlen_match2_repeat_extend_encodeBlockAsm8B - MOVL (R10)(R12*1), R11 - CMPL (SI)(R12*1), R11 + MOVL (R9)(R11*1), R10 + CMPL (BX)(R11*1), R10 JNE matchlen_match2_repeat_extend_encodeBlockAsm8B - SUBL $0x04, R9 - LEAL 4(R12), R12 + SUBL $0x04, R8 + LEAL 4(R11), R11 matchlen_match2_repeat_extend_encodeBlockAsm8B: - CMPL R9, $0x02 + CMPL R8, $0x02 JL matchlen_match1_repeat_extend_encodeBlockAsm8B - MOVW (R10)(R12*1), R11 - CMPW (SI)(R12*1), R11 + MOVW (R9)(R11*1), R10 + CMPW (BX)(R11*1), R10 JNE matchlen_match1_repeat_extend_encodeBlockAsm8B - SUBL $0x02, R9 - LEAL 2(R12), R12 + SUBL $0x02, R8 + LEAL 2(R11), R11 matchlen_match1_repeat_extend_encodeBlockAsm8B: - CMPL R9, $0x01 + CMPL R8, $0x01 JL repeat_extend_forward_end_encodeBlockAsm8B - MOVB (R10)(R12*1), R11 - CMPB (SI)(R12*1), R11 + MOVB (R9)(R11*1), R10 + CMPB (BX)(R11*1), R10 JNE repeat_extend_forward_end_encodeBlockAsm8B - LEAL 1(R12), R12 + LEAL 1(R11), R11 repeat_extend_forward_end_encodeBlockAsm8B: - ADDL R12, CX - MOVL CX, SI - SUBL DI, SI - MOVL 16(SP), DI - TESTL R8, R8 + ADDL R11, CX + MOVL CX, BX + SUBL SI, BX + MOVL 16(SP), SI + TESTL DI, DI JZ repeat_as_copy_encodeBlockAsm8B // emitRepeat - MOVL SI, DI - LEAL -4(SI), SI - CMPL DI, $0x08 + MOVL BX, SI + LEAL -4(BX), BX + CMPL SI, $0x08 JLE repeat_two_match_repeat_encodeBlockAsm8B - CMPL DI, $0x0c + CMPL SI, $0x0c JGE cant_repeat_two_offset_match_repeat_encodeBlockAsm8B cant_repeat_two_offset_match_repeat_encodeBlockAsm8B: - CMPL SI, $0x00000104 + CMPL BX, $0x00000104 JLT repeat_three_match_repeat_encodeBlockAsm8B - LEAL -256(SI), SI + LEAL -256(BX), BX MOVW $0x0019, (AX) - MOVW SI, 2(AX) + MOVW BX, 2(AX) ADDQ $0x04, AX JMP repeat_end_emit_encodeBlockAsm8B repeat_three_match_repeat_encodeBlockAsm8B: - LEAL -4(SI), SI + LEAL -4(BX), BX MOVW $0x0015, (AX) - MOVB SI, 2(AX) + MOVB BL, 2(AX) ADDQ $0x03, AX JMP repeat_end_emit_encodeBlockAsm8B repeat_two_match_repeat_encodeBlockAsm8B: - SHLL $0x02, SI - ORL $0x01, SI - MOVW SI, (AX) + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) ADDQ $0x02, AX JMP repeat_end_emit_encodeBlockAsm8B - XORQ R8, R8 - LEAL 1(R8)(SI*4), SI - MOVB DI, 1(AX) - SARL $0x08, DI - SHLL $0x05, DI - ORL DI, SI - MOVB SI, (AX) + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) ADDQ $0x02, AX JMP repeat_end_emit_encodeBlockAsm8B repeat_as_copy_encodeBlockAsm8B: // emitCopy -two_byte_offset_repeat_as_copy_encodeBlockAsm8B: - CMPL SI, $0x40 + CMPL BX, $0x40 JLE two_byte_offset_short_repeat_as_copy_encodeBlockAsm8B - CMPL DI, $0x00000800 + CMPL SI, $0x00000800 JAE long_offset_short_repeat_as_copy_encodeBlockAsm8B - MOVL $0x00000001, R8 - LEAL 16(R8), R8 - MOVB DI, 1(AX) - SHRL $0x08, DI - SHLL $0x05, DI - ORL DI, R8 - MOVB R8, (AX) + MOVL $0x00000001, DI + LEAL 16(DI), DI + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (AX) ADDQ $0x02, AX - SUBL $0x08, SI + SUBL $0x08, BX // emitRepeat - LEAL -4(SI), SI + LEAL -4(BX), BX JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b - MOVL SI, DI - LEAL -4(SI), SI - CMPL DI, $0x08 + MOVL BX, SI + LEAL -4(BX), BX + CMPL SI, $0x08 JLE repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b - CMPL DI, $0x0c + CMPL SI, $0x0c JGE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b: - CMPL SI, $0x00000104 + CMPL BX, $0x00000104 JLT repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b - LEAL -256(SI), SI + LEAL -256(BX), BX MOVW $0x0019, (AX) - MOVW SI, 2(AX) + MOVW BX, 2(AX) ADDQ $0x04, AX JMP repeat_end_emit_encodeBlockAsm8B repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b: - LEAL -4(SI), SI + LEAL -4(BX), BX MOVW $0x0015, (AX) - MOVB SI, 2(AX) + MOVB BL, 2(AX) ADDQ $0x03, AX JMP repeat_end_emit_encodeBlockAsm8B repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b: - SHLL $0x02, SI - ORL $0x01, SI - MOVW SI, (AX) + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) ADDQ $0x02, AX JMP repeat_end_emit_encodeBlockAsm8B - XORQ R8, R8 - LEAL 1(R8)(SI*4), SI - MOVB DI, 1(AX) - SARL $0x08, DI - SHLL $0x05, DI - ORL DI, SI - MOVB SI, (AX) + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) ADDQ $0x02, AX JMP repeat_end_emit_encodeBlockAsm8B long_offset_short_repeat_as_copy_encodeBlockAsm8B: MOVB $0xee, (AX) - MOVW DI, 1(AX) - LEAL -60(SI), SI + MOVW SI, 1(AX) + LEAL -60(BX), BX ADDQ $0x03, AX // emitRepeat - MOVL SI, DI - LEAL -4(SI), SI - CMPL DI, $0x08 + MOVL BX, SI + LEAL -4(BX), BX + CMPL SI, $0x08 JLE repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short - CMPL DI, $0x0c + CMPL SI, $0x0c JGE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short: - CMPL SI, $0x00000104 + CMPL BX, $0x00000104 JLT repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short - LEAL -256(SI), SI + LEAL -256(BX), BX MOVW $0x0019, (AX) - MOVW SI, 2(AX) + MOVW BX, 2(AX) ADDQ $0x04, AX JMP repeat_end_emit_encodeBlockAsm8B repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short: - LEAL -4(SI), SI + LEAL -4(BX), BX MOVW $0x0015, (AX) - MOVB SI, 2(AX) + MOVB BL, 2(AX) ADDQ $0x03, AX JMP repeat_end_emit_encodeBlockAsm8B repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short: - SHLL $0x02, SI - ORL $0x01, SI - MOVW SI, (AX) + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) ADDQ $0x02, AX JMP repeat_end_emit_encodeBlockAsm8B - XORQ R8, R8 - LEAL 1(R8)(SI*4), SI - MOVB DI, 1(AX) - SARL $0x08, DI - SHLL $0x05, DI - ORL DI, SI - MOVB SI, (AX) + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) ADDQ $0x02, AX JMP repeat_end_emit_encodeBlockAsm8B - JMP two_byte_offset_repeat_as_copy_encodeBlockAsm8B two_byte_offset_short_repeat_as_copy_encodeBlockAsm8B: - CMPL SI, $0x0c + MOVL BX, DI + SHLL $0x02, DI + CMPL BX, $0x0c JGE emit_copy_three_repeat_as_copy_encodeBlockAsm8B - MOVB $0x01, BL - LEAL -16(BX)(SI*4), SI - MOVB DI, 1(AX) - SHRL $0x08, DI - SHLL $0x05, DI - ORL DI, SI - MOVB SI, (AX) + LEAL -15(DI), DI + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (AX) ADDQ $0x02, AX JMP repeat_end_emit_encodeBlockAsm8B emit_copy_three_repeat_as_copy_encodeBlockAsm8B: - MOVB $0x02, BL - LEAL -4(BX)(SI*4), SI - MOVB SI, (AX) - MOVW DI, 1(AX) + LEAL -2(DI), DI + MOVB DI, (AX) + MOVW SI, 1(AX) ADDQ $0x03, AX repeat_end_emit_encodeBlockAsm8B: @@ -5186,16 +5152,16 @@ repeat_end_emit_encodeBlockAsm8B: JMP search_loop_encodeBlockAsm8B no_repeat_found_encodeBlockAsm8B: - CMPL (DX)(SI*1), DI + CMPL (DX)(BX*1), SI JEQ candidate_match_encodeBlockAsm8B - SHRQ $0x08, DI - MOVL 24(SP)(R10*4), SI - LEAL 2(CX), R9 - CMPL (DX)(R8*1), DI + SHRQ $0x08, SI + MOVL 24(SP)(R9*4), BX + LEAL 2(CX), R8 + CMPL (DX)(DI*1), SI JEQ candidate2_match_encodeBlockAsm8B - MOVL R9, 24(SP)(R10*4) - SHRQ $0x08, DI - CMPL (DX)(SI*1), DI + MOVL R8, 24(SP)(R9*4) + SHRQ $0x08, SI + CMPL (DX)(BX*1), SI JEQ candidate3_match_encodeBlockAsm8B MOVL 20(SP), CX JMP search_loop_encodeBlockAsm8B @@ -5205,381 +5171,379 @@ candidate3_match_encodeBlockAsm8B: JMP candidate_match_encodeBlockAsm8B candidate2_match_encodeBlockAsm8B: - MOVL R9, 24(SP)(R10*4) + MOVL R8, 24(SP)(R9*4) INCL CX - MOVL R8, SI + MOVL DI, BX candidate_match_encodeBlockAsm8B: - MOVL 12(SP), DI - TESTL SI, SI + MOVL 12(SP), SI + TESTL BX, BX JZ match_extend_back_end_encodeBlockAsm8B match_extend_back_loop_encodeBlockAsm8B: - CMPL CX, DI + CMPL CX, SI JLE match_extend_back_end_encodeBlockAsm8B - MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(BX*1), DI MOVB -1(DX)(CX*1), R8 - CMPB BL, R8 + CMPB DI, R8 JNE match_extend_back_end_encodeBlockAsm8B LEAL -1(CX), CX - DECL SI + DECL BX JZ match_extend_back_end_encodeBlockAsm8B JMP match_extend_back_loop_encodeBlockAsm8B match_extend_back_end_encodeBlockAsm8B: - MOVL CX, DI - SUBL 12(SP), DI - LEAQ 3(AX)(DI*1), DI - CMPQ DI, (SP) + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 3(AX)(SI*1), SI + CMPQ SI, (SP) JL match_dst_size_check_encodeBlockAsm8B MOVQ $0x00000000, ret+48(FP) RET match_dst_size_check_encodeBlockAsm8B: - MOVL CX, DI - MOVL 12(SP), R8 - CMPL R8, DI + MOVL CX, SI + MOVL 12(SP), DI + CMPL DI, SI JEQ emit_literal_done_match_emit_encodeBlockAsm8B - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (DX)(R8*1), DI - SUBL R8, R9 - LEAL -1(R9), R8 - CMPL R8, $0x3c + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(DI*1), SI + SUBL DI, R8 + LEAL -1(R8), DI + CMPL DI, $0x3c JLT one_byte_match_emit_encodeBlockAsm8B - CMPL R8, $0x00000100 + CMPL DI, $0x00000100 JLT two_bytes_match_emit_encodeBlockAsm8B MOVB $0xf4, (AX) - MOVW R8, 1(AX) + MOVW DI, 1(AX) ADDQ $0x03, AX JMP memmove_long_match_emit_encodeBlockAsm8B two_bytes_match_emit_encodeBlockAsm8B: MOVB $0xf0, (AX) - MOVB R8, 1(AX) + MOVB DI, 1(AX) ADDQ $0x02, AX - CMPL R8, $0x40 + CMPL DI, $0x40 JL memmove_match_emit_encodeBlockAsm8B JMP memmove_long_match_emit_encodeBlockAsm8B one_byte_match_emit_encodeBlockAsm8B: - SHLB $0x02, R8 - MOVB R8, (AX) + SHLB $0x02, DI + MOVB DI, (AX) ADDQ $0x01, AX memmove_match_emit_encodeBlockAsm8B: - LEAQ (AX)(R9*1), R8 + LEAQ (AX)(R8*1), DI // genMemMoveShort - CMPQ R9, $0x08 + CMPQ R8, $0x08 JLE emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8 - CMPQ R9, $0x10 + CMPQ R8, $0x10 JBE emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8through16 - CMPQ R9, $0x20 + CMPQ R8, $0x20 JBE emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_17through32 JMP emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_33through64 emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8: - MOVQ (DI), R10 - MOVQ R10, (AX) + MOVQ (SI), R9 + MOVQ R9, (AX) JMP memmove_end_copy_match_emit_encodeBlockAsm8B emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8through16: - MOVQ (DI), R10 - MOVQ -8(DI)(R9*1), DI - MOVQ R10, (AX) - MOVQ DI, -8(AX)(R9*1) + MOVQ (SI), R9 + MOVQ -8(SI)(R8*1), SI + MOVQ R9, (AX) + MOVQ SI, -8(AX)(R8*1) JMP memmove_end_copy_match_emit_encodeBlockAsm8B emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_17through32: - MOVOU (DI), X0 - MOVOU -16(DI)(R9*1), X1 + MOVOU (SI), X0 + MOVOU -16(SI)(R8*1), X1 MOVOU X0, (AX) - MOVOU X1, -16(AX)(R9*1) + MOVOU X1, -16(AX)(R8*1) JMP memmove_end_copy_match_emit_encodeBlockAsm8B emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_33through64: - MOVOU (DI), X0 - MOVOU 16(DI), X1 - MOVOU -32(DI)(R9*1), X2 - MOVOU -16(DI)(R9*1), X3 + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) memmove_end_copy_match_emit_encodeBlockAsm8B: - MOVQ R8, AX + MOVQ DI, AX JMP emit_literal_done_match_emit_encodeBlockAsm8B memmove_long_match_emit_encodeBlockAsm8B: - LEAQ (AX)(R9*1), R8 + LEAQ (AX)(R8*1), DI // genMemMoveLong - MOVOU (DI), X0 - MOVOU 16(DI), X1 - MOVOU -32(DI)(R9*1), X2 - MOVOU -16(DI)(R9*1), X3 - MOVQ R9, R11 - SHRQ $0x05, R11 - MOVQ AX, R10 - ANDL $0x0000001f, R10 - MOVQ $0x00000040, R12 - SUBQ R10, R12 - DECQ R11 + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVQ R8, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R11 + SUBQ R9, R11 + DECQ R10 JA emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_forward_sse_loop_32 - LEAQ -32(DI)(R12*1), R10 - LEAQ -32(AX)(R12*1), R13 + LEAQ -32(SI)(R11*1), R9 + LEAQ -32(AX)(R11*1), R12 emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_big_loop_back: - MOVOU (R10), X4 - MOVOU 16(R10), X5 - MOVOA X4, (R13) - MOVOA X5, 16(R13) - ADDQ $0x20, R13 - ADDQ $0x20, R10 + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R12) + MOVOA X5, 16(R12) ADDQ $0x20, R12 - DECQ R11 + ADDQ $0x20, R9 + ADDQ $0x20, R11 + DECQ R10 JNA emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_big_loop_back emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_forward_sse_loop_32: - MOVOU -32(DI)(R12*1), X4 - MOVOU -16(DI)(R12*1), X5 - MOVOA X4, -32(AX)(R12*1) - MOVOA X5, -16(AX)(R12*1) - ADDQ $0x20, R12 - CMPQ R9, R12 + MOVOU -32(SI)(R11*1), X4 + MOVOU -16(SI)(R11*1), X5 + MOVOA X4, -32(AX)(R11*1) + MOVOA X5, -16(AX)(R11*1) + ADDQ $0x20, R11 + CMPQ R8, R11 JAE emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_forward_sse_loop_32 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) - MOVQ R8, AX + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ DI, AX emit_literal_done_match_emit_encodeBlockAsm8B: match_nolit_loop_encodeBlockAsm8B: - MOVL CX, DI - SUBL SI, DI - MOVL DI, 16(SP) + MOVL CX, SI + SUBL BX, SI + MOVL SI, 16(SP) ADDL $0x04, CX - ADDL $0x04, SI - MOVQ src_len+32(FP), DI - SUBL CX, DI - LEAQ (DX)(CX*1), R8 - LEAQ (DX)(SI*1), SI + ADDL $0x04, BX + MOVQ src_len+32(FP), SI + SUBL CX, SI + LEAQ (DX)(CX*1), DI + LEAQ (DX)(BX*1), BX // matchLen - XORL R10, R10 - CMPL DI, $0x08 + XORL R9, R9 + CMPL SI, $0x08 JL matchlen_match4_match_nolit_encodeBlockAsm8B matchlen_loopback_match_nolit_encodeBlockAsm8B: - MOVQ (R8)(R10*1), R9 - XORQ (SI)(R10*1), R9 - TESTQ R9, R9 + MOVQ (DI)(R9*1), R8 + XORQ (BX)(R9*1), R8 + TESTQ R8, R8 JZ matchlen_loop_match_nolit_encodeBlockAsm8B #ifdef GOAMD64_v3 - TZCNTQ R9, R9 + TZCNTQ R8, R8 #else - BSFQ R9, R9 + BSFQ R8, R8 #endif - SARQ $0x03, R9 - LEAL (R10)(R9*1), R10 + SARQ $0x03, R8 + LEAL (R9)(R8*1), R9 JMP match_nolit_end_encodeBlockAsm8B matchlen_loop_match_nolit_encodeBlockAsm8B: - LEAL -8(DI), DI - LEAL 8(R10), R10 - CMPL DI, $0x08 + LEAL -8(SI), SI + LEAL 8(R9), R9 + CMPL SI, $0x08 JGE matchlen_loopback_match_nolit_encodeBlockAsm8B JZ match_nolit_end_encodeBlockAsm8B matchlen_match4_match_nolit_encodeBlockAsm8B: - CMPL DI, $0x04 + CMPL SI, $0x04 JL matchlen_match2_match_nolit_encodeBlockAsm8B - MOVL (R8)(R10*1), R9 - CMPL (SI)(R10*1), R9 + MOVL (DI)(R9*1), R8 + CMPL (BX)(R9*1), R8 JNE matchlen_match2_match_nolit_encodeBlockAsm8B - SUBL $0x04, DI - LEAL 4(R10), R10 + SUBL $0x04, SI + LEAL 4(R9), R9 matchlen_match2_match_nolit_encodeBlockAsm8B: - CMPL DI, $0x02 + CMPL SI, $0x02 JL matchlen_match1_match_nolit_encodeBlockAsm8B - MOVW (R8)(R10*1), R9 - CMPW (SI)(R10*1), R9 + MOVW (DI)(R9*1), R8 + CMPW (BX)(R9*1), R8 JNE matchlen_match1_match_nolit_encodeBlockAsm8B - SUBL $0x02, DI - LEAL 2(R10), R10 + SUBL $0x02, SI + LEAL 2(R9), R9 matchlen_match1_match_nolit_encodeBlockAsm8B: - CMPL DI, $0x01 + CMPL SI, $0x01 JL match_nolit_end_encodeBlockAsm8B - MOVB (R8)(R10*1), R9 - CMPB (SI)(R10*1), R9 + MOVB (DI)(R9*1), R8 + CMPB (BX)(R9*1), R8 JNE match_nolit_end_encodeBlockAsm8B - LEAL 1(R10), R10 + LEAL 1(R9), R9 match_nolit_end_encodeBlockAsm8B: - ADDL R10, CX - MOVL 16(SP), SI - ADDL $0x04, R10 + ADDL R9, CX + MOVL 16(SP), BX + ADDL $0x04, R9 MOVL CX, 12(SP) // emitCopy -two_byte_offset_match_nolit_encodeBlockAsm8B: - CMPL R10, $0x40 + CMPL R9, $0x40 JLE two_byte_offset_short_match_nolit_encodeBlockAsm8B - CMPL SI, $0x00000800 + CMPL BX, $0x00000800 JAE long_offset_short_match_nolit_encodeBlockAsm8B - MOVL $0x00000001, DI - LEAL 16(DI), DI - MOVB SI, 1(AX) - SHRL $0x08, SI - SHLL $0x05, SI - ORL SI, DI - MOVB DI, (AX) + MOVL $0x00000001, SI + LEAL 16(SI), SI + MOVB BL, 1(AX) + SHRL $0x08, BX + SHLL $0x05, BX + ORL BX, SI + MOVB SI, (AX) ADDQ $0x02, AX - SUBL $0x08, R10 + SUBL $0x08, R9 // emitRepeat - LEAL -4(R10), R10 + LEAL -4(R9), R9 JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short_2b - MOVL R10, SI - LEAL -4(R10), R10 - CMPL SI, $0x08 + MOVL R9, BX + LEAL -4(R9), R9 + CMPL BX, $0x08 JLE repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short_2b - CMPL SI, $0x0c + CMPL BX, $0x0c JGE cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short_2b cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short_2b: - CMPL R10, $0x00000104 + CMPL R9, $0x00000104 JLT repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short_2b - LEAL -256(R10), R10 + LEAL -256(R9), R9 MOVW $0x0019, (AX) - MOVW R10, 2(AX) + MOVW R9, 2(AX) ADDQ $0x04, AX JMP match_nolit_emitcopy_end_encodeBlockAsm8B repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short_2b: - LEAL -4(R10), R10 + LEAL -4(R9), R9 MOVW $0x0015, (AX) - MOVB R10, 2(AX) + MOVB R9, 2(AX) ADDQ $0x03, AX JMP match_nolit_emitcopy_end_encodeBlockAsm8B repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short_2b: - SHLL $0x02, R10 - ORL $0x01, R10 - MOVW R10, (AX) + SHLL $0x02, R9 + ORL $0x01, R9 + MOVW R9, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBlockAsm8B - XORQ DI, DI - LEAL 1(DI)(R10*4), R10 - MOVB SI, 1(AX) - SARL $0x08, SI - SHLL $0x05, SI - ORL SI, R10 - MOVB R10, (AX) + XORQ SI, SI + LEAL 1(SI)(R9*4), R9 + MOVB BL, 1(AX) + SARL $0x08, BX + SHLL $0x05, BX + ORL BX, R9 + MOVB R9, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBlockAsm8B long_offset_short_match_nolit_encodeBlockAsm8B: MOVB $0xee, (AX) - MOVW SI, 1(AX) - LEAL -60(R10), R10 + MOVW BX, 1(AX) + LEAL -60(R9), R9 ADDQ $0x03, AX // emitRepeat - MOVL R10, SI - LEAL -4(R10), R10 - CMPL SI, $0x08 + MOVL R9, BX + LEAL -4(R9), R9 + CMPL BX, $0x08 JLE repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short - CMPL SI, $0x0c + CMPL BX, $0x0c JGE cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short: - CMPL R10, $0x00000104 + CMPL R9, $0x00000104 JLT repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short - LEAL -256(R10), R10 + LEAL -256(R9), R9 MOVW $0x0019, (AX) - MOVW R10, 2(AX) + MOVW R9, 2(AX) ADDQ $0x04, AX JMP match_nolit_emitcopy_end_encodeBlockAsm8B repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short: - LEAL -4(R10), R10 + LEAL -4(R9), R9 MOVW $0x0015, (AX) - MOVB R10, 2(AX) + MOVB R9, 2(AX) ADDQ $0x03, AX JMP match_nolit_emitcopy_end_encodeBlockAsm8B repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short: - SHLL $0x02, R10 - ORL $0x01, R10 - MOVW R10, (AX) + SHLL $0x02, R9 + ORL $0x01, R9 + MOVW R9, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBlockAsm8B - XORQ DI, DI - LEAL 1(DI)(R10*4), R10 - MOVB SI, 1(AX) - SARL $0x08, SI - SHLL $0x05, SI - ORL SI, R10 - MOVB R10, (AX) + XORQ SI, SI + LEAL 1(SI)(R9*4), R9 + MOVB BL, 1(AX) + SARL $0x08, BX + SHLL $0x05, BX + ORL BX, R9 + MOVB R9, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBlockAsm8B - JMP two_byte_offset_match_nolit_encodeBlockAsm8B two_byte_offset_short_match_nolit_encodeBlockAsm8B: - CMPL R10, $0x0c + MOVL R9, SI + SHLL $0x02, SI + CMPL R9, $0x0c JGE emit_copy_three_match_nolit_encodeBlockAsm8B - MOVB $0x01, BL - LEAL -16(BX)(R10*4), R10 - MOVB SI, 1(AX) - SHRL $0x08, SI - SHLL $0x05, SI - ORL SI, R10 - MOVB R10, (AX) + LEAL -15(SI), SI + MOVB BL, 1(AX) + SHRL $0x08, BX + SHLL $0x05, BX + ORL BX, SI + MOVB SI, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBlockAsm8B emit_copy_three_match_nolit_encodeBlockAsm8B: - MOVB $0x02, BL - LEAL -4(BX)(R10*4), R10 - MOVB R10, (AX) - MOVW SI, 1(AX) + LEAL -2(SI), SI + MOVB SI, (AX) + MOVW BX, 1(AX) ADDQ $0x03, AX match_nolit_emitcopy_end_encodeBlockAsm8B: CMPL CX, 8(SP) JGE emit_remainder_encodeBlockAsm8B - MOVQ -2(DX)(CX*1), DI + MOVQ -2(DX)(CX*1), SI CMPQ AX, (SP) JL match_nolit_dst_ok_encodeBlockAsm8B MOVQ $0x00000000, ret+48(FP) RET match_nolit_dst_ok_encodeBlockAsm8B: - MOVQ $0x9e3779b1, R9 - MOVQ DI, R8 - SHRQ $0x10, DI - MOVQ DI, SI - SHLQ $0x20, R8 - IMULQ R9, R8 - SHRQ $0x38, R8 - SHLQ $0x20, SI - IMULQ R9, SI - SHRQ $0x38, SI - LEAL -2(CX), R9 - LEAQ 24(SP)(SI*4), R10 - MOVL (R10), SI - MOVL R9, 24(SP)(R8*4) - MOVL CX, (R10) - CMPL (DX)(SI*1), DI + MOVQ $0x9e3779b1, R8 + MOVQ SI, DI + SHRQ $0x10, SI + MOVQ SI, BX + SHLQ $0x20, DI + IMULQ R8, DI + SHRQ $0x38, DI + SHLQ $0x20, BX + IMULQ R8, BX + SHRQ $0x38, BX + LEAL -2(CX), R8 + LEAQ 24(SP)(BX*4), R9 + MOVL (R9), BX + MOVL R8, 24(SP)(DI*4) + MOVL CX, (R9) + CMPL (DX)(BX*1), SI JEQ match_nolit_loop_encodeBlockAsm8B INCL CX JMP search_loop_encodeBlockAsm8B @@ -5743,9 +5707,9 @@ emit_literal_done_emit_remainder_encodeBlockAsm8B: // func encodeBetterBlockAsm(dst []byte, src []byte) int // Requires: BMI, SSE2 -TEXT ·encodeBetterBlockAsm(SB), $327704-56 +TEXT ·encodeBetterBlockAsm(SB), $589848-56 MOVQ dst_base+0(FP), AX - MOVQ $0x00000a00, CX + MOVQ $0x00001200, CX LEAQ 24(SP), DX PXOR X0, X0 @@ -5764,8 +5728,8 @@ zero_loop_encodeBetterBlockAsm: MOVL $0x00000000, 12(SP) MOVQ src_len+32(FP), CX LEAQ -6(CX), DX - LEAQ -8(CX), SI - MOVL SI, 8(SP) + LEAQ -8(CX), BX + MOVL BX, 8(SP) SHRQ $0x05, CX SUBL CX, DX LEAQ (AX)(DX*1), DX @@ -5775,808 +5739,810 @@ zero_loop_encodeBetterBlockAsm: MOVQ src_base+24(FP), DX search_loop_encodeBetterBlockAsm: - MOVL CX, SI - SUBL 12(SP), SI - SHRL $0x07, SI - CMPL SI, $0x63 + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x07, BX + CMPL BX, $0x63 JLE check_maxskip_ok_encodeBetterBlockAsm - LEAL 100(CX), SI + LEAL 100(CX), BX JMP check_maxskip_cont_encodeBetterBlockAsm check_maxskip_ok_encodeBetterBlockAsm: - LEAL 1(CX)(SI*1), SI + LEAL 1(CX)(BX*1), BX check_maxskip_cont_encodeBetterBlockAsm: - CMPL SI, 8(SP) + CMPL BX, 8(SP) JGE emit_remainder_encodeBetterBlockAsm - MOVQ (DX)(CX*1), DI - MOVL SI, 20(SP) - MOVQ $0x00cf1bbcdcbfa563, R9 - MOVQ $0x9e3779b1, SI - MOVQ DI, R10 - MOVQ DI, R11 - SHLQ $0x08, R10 - IMULQ R9, R10 - SHRQ $0x30, R10 - SHLQ $0x20, R11 - IMULQ SI, R11 - SHRQ $0x32, R11 - MOVL 24(SP)(R10*4), SI - MOVL 262168(SP)(R11*4), R8 - MOVL CX, 24(SP)(R10*4) - MOVL CX, 262168(SP)(R11*4) - CMPL (DX)(SI*1), DI + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x00cf1bbcdcbfa563, R8 + MOVQ $0x9e3779b1, BX + MOVQ SI, R9 + MOVQ SI, R10 + SHLQ $0x08, R9 + IMULQ R8, R9 + SHRQ $0x2f, R9 + SHLQ $0x20, R10 + IMULQ BX, R10 + SHRQ $0x32, R10 + MOVL 24(SP)(R9*4), BX + MOVL 524312(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + MOVL CX, 524312(SP)(R10*4) + MOVQ (DX)(BX*1), R9 + MOVQ (DX)(DI*1), R10 + CMPQ R9, SI JEQ candidate_match_encodeBetterBlockAsm - CMPL (DX)(R8*1), DI - JEQ candidateS_match_encodeBetterBlockAsm - MOVL 20(SP), CX - JMP search_loop_encodeBetterBlockAsm + CMPQ R10, SI + JNE no_short_found_encodeBetterBlockAsm + MOVL DI, BX + JMP candidate_match_encodeBetterBlockAsm + +no_short_found_encodeBetterBlockAsm: + CMPL R9, SI + JEQ candidate_match_encodeBetterBlockAsm + CMPL R10, SI + JEQ candidateS_match_encodeBetterBlockAsm + MOVL 20(SP), CX + JMP search_loop_encodeBetterBlockAsm candidateS_match_encodeBetterBlockAsm: - SHRQ $0x08, DI - MOVQ DI, R10 - SHLQ $0x08, R10 - IMULQ R9, R10 - SHRQ $0x30, R10 - MOVL 24(SP)(R10*4), SI + SHRQ $0x08, SI + MOVQ SI, R9 + SHLQ $0x08, R9 + IMULQ R8, R9 + SHRQ $0x2f, R9 + MOVL 24(SP)(R9*4), BX INCL CX - MOVL CX, 24(SP)(R10*4) - CMPL (DX)(SI*1), DI + MOVL CX, 24(SP)(R9*4) + CMPL (DX)(BX*1), SI JEQ candidate_match_encodeBetterBlockAsm DECL CX - MOVL R8, SI + MOVL DI, BX candidate_match_encodeBetterBlockAsm: - MOVL 12(SP), DI - TESTL SI, SI + MOVL 12(SP), SI + TESTL BX, BX JZ match_extend_back_end_encodeBetterBlockAsm match_extend_back_loop_encodeBetterBlockAsm: - CMPL CX, DI + CMPL CX, SI JLE match_extend_back_end_encodeBetterBlockAsm - MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(BX*1), DI MOVB -1(DX)(CX*1), R8 - CMPB BL, R8 + CMPB DI, R8 JNE match_extend_back_end_encodeBetterBlockAsm LEAL -1(CX), CX - DECL SI + DECL BX JZ match_extend_back_end_encodeBetterBlockAsm JMP match_extend_back_loop_encodeBetterBlockAsm match_extend_back_end_encodeBetterBlockAsm: - MOVL CX, DI - SUBL 12(SP), DI - LEAQ 5(AX)(DI*1), DI - CMPQ DI, (SP) + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 5(AX)(SI*1), SI + CMPQ SI, (SP) JL match_dst_size_check_encodeBetterBlockAsm MOVQ $0x00000000, ret+48(FP) RET match_dst_size_check_encodeBetterBlockAsm: - MOVL CX, DI + MOVL CX, SI ADDL $0x04, CX - ADDL $0x04, SI - MOVQ src_len+32(FP), R8 - SUBL CX, R8 - LEAQ (DX)(CX*1), R9 - LEAQ (DX)(SI*1), R10 + ADDL $0x04, BX + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), R9 // matchLen - XORL R12, R12 - CMPL R8, $0x08 + XORL R11, R11 + CMPL DI, $0x08 JL matchlen_match4_match_nolit_encodeBetterBlockAsm matchlen_loopback_match_nolit_encodeBetterBlockAsm: - MOVQ (R9)(R12*1), R11 - XORQ (R10)(R12*1), R11 - TESTQ R11, R11 + MOVQ (R8)(R11*1), R10 + XORQ (R9)(R11*1), R10 + TESTQ R10, R10 JZ matchlen_loop_match_nolit_encodeBetterBlockAsm #ifdef GOAMD64_v3 - TZCNTQ R11, R11 + TZCNTQ R10, R10 #else - BSFQ R11, R11 + BSFQ R10, R10 #endif - SARQ $0x03, R11 - LEAL (R12)(R11*1), R12 + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 JMP match_nolit_end_encodeBetterBlockAsm matchlen_loop_match_nolit_encodeBetterBlockAsm: - LEAL -8(R8), R8 - LEAL 8(R12), R12 - CMPL R8, $0x08 + LEAL -8(DI), DI + LEAL 8(R11), R11 + CMPL DI, $0x08 JGE matchlen_loopback_match_nolit_encodeBetterBlockAsm JZ match_nolit_end_encodeBetterBlockAsm matchlen_match4_match_nolit_encodeBetterBlockAsm: - CMPL R8, $0x04 + CMPL DI, $0x04 JL matchlen_match2_match_nolit_encodeBetterBlockAsm - MOVL (R9)(R12*1), R11 - CMPL (R10)(R12*1), R11 + MOVL (R8)(R11*1), R10 + CMPL (R9)(R11*1), R10 JNE matchlen_match2_match_nolit_encodeBetterBlockAsm - SUBL $0x04, R8 - LEAL 4(R12), R12 + SUBL $0x04, DI + LEAL 4(R11), R11 matchlen_match2_match_nolit_encodeBetterBlockAsm: - CMPL R8, $0x02 + CMPL DI, $0x02 JL matchlen_match1_match_nolit_encodeBetterBlockAsm - MOVW (R9)(R12*1), R11 - CMPW (R10)(R12*1), R11 + MOVW (R8)(R11*1), R10 + CMPW (R9)(R11*1), R10 JNE matchlen_match1_match_nolit_encodeBetterBlockAsm - SUBL $0x02, R8 - LEAL 2(R12), R12 + SUBL $0x02, DI + LEAL 2(R11), R11 matchlen_match1_match_nolit_encodeBetterBlockAsm: - CMPL R8, $0x01 + CMPL DI, $0x01 JL match_nolit_end_encodeBetterBlockAsm - MOVB (R9)(R12*1), R11 - CMPB (R10)(R12*1), R11 + MOVB (R8)(R11*1), R10 + CMPB (R9)(R11*1), R10 JNE match_nolit_end_encodeBetterBlockAsm - LEAL 1(R12), R12 + LEAL 1(R11), R11 match_nolit_end_encodeBetterBlockAsm: - MOVL CX, R8 - SUBL SI, R8 + MOVL CX, DI + SUBL BX, DI // Check if repeat - CMPL 16(SP), R8 + CMPL 16(SP), DI JEQ match_is_repeat_encodeBetterBlockAsm - CMPL R12, $0x01 + CMPL R11, $0x01 JG match_length_ok_encodeBetterBlockAsm - CMPL R8, $0x0000ffff + CMPL DI, $0x0000ffff JLE match_length_ok_encodeBetterBlockAsm MOVL 20(SP), CX INCL CX JMP search_loop_encodeBetterBlockAsm match_length_ok_encodeBetterBlockAsm: - MOVL R8, 16(SP) - MOVL 12(SP), SI - CMPL SI, DI + MOVL DI, 16(SP) + MOVL 12(SP), BX + CMPL BX, SI JEQ emit_literal_done_match_emit_encodeBetterBlockAsm - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (DX)(SI*1), R10 - SUBL SI, R9 - LEAL -1(R9), SI - CMPL SI, $0x3c + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c JLT one_byte_match_emit_encodeBetterBlockAsm - CMPL SI, $0x00000100 + CMPL BX, $0x00000100 JLT two_bytes_match_emit_encodeBetterBlockAsm - CMPL SI, $0x00010000 + CMPL BX, $0x00010000 JLT three_bytes_match_emit_encodeBetterBlockAsm - CMPL SI, $0x01000000 + CMPL BX, $0x01000000 JLT four_bytes_match_emit_encodeBetterBlockAsm MOVB $0xfc, (AX) - MOVL SI, 1(AX) + MOVL BX, 1(AX) ADDQ $0x05, AX JMP memmove_long_match_emit_encodeBetterBlockAsm four_bytes_match_emit_encodeBetterBlockAsm: - MOVL SI, R11 - SHRL $0x10, R11 + MOVL BX, R10 + SHRL $0x10, R10 MOVB $0xf8, (AX) - MOVW SI, 1(AX) - MOVB R11, 3(AX) + MOVW BX, 1(AX) + MOVB R10, 3(AX) ADDQ $0x04, AX JMP memmove_long_match_emit_encodeBetterBlockAsm three_bytes_match_emit_encodeBetterBlockAsm: MOVB $0xf4, (AX) - MOVW SI, 1(AX) + MOVW BX, 1(AX) ADDQ $0x03, AX JMP memmove_long_match_emit_encodeBetterBlockAsm two_bytes_match_emit_encodeBetterBlockAsm: MOVB $0xf0, (AX) - MOVB SI, 1(AX) + MOVB BL, 1(AX) ADDQ $0x02, AX - CMPL SI, $0x40 + CMPL BX, $0x40 JL memmove_match_emit_encodeBetterBlockAsm JMP memmove_long_match_emit_encodeBetterBlockAsm one_byte_match_emit_encodeBetterBlockAsm: - SHLB $0x02, SI - MOVB SI, (AX) + SHLB $0x02, BL + MOVB BL, (AX) ADDQ $0x01, AX memmove_match_emit_encodeBetterBlockAsm: - LEAQ (AX)(R9*1), SI + LEAQ (AX)(R8*1), BX // genMemMoveShort - CMPQ R9, $0x04 + CMPQ R8, $0x04 JLE emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4 - CMPQ R9, $0x08 + CMPQ R8, $0x08 JB emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4through7 - CMPQ R9, $0x10 + CMPQ R8, $0x10 JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_8through16 - CMPQ R9, $0x20 + CMPQ R8, $0x20 JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_17through32 JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_33through64 emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4: - MOVL (R10), R11 - MOVL R11, (AX) + MOVL (R9), R10 + MOVL R10, (AX) JMP memmove_end_copy_match_emit_encodeBetterBlockAsm emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4through7: - MOVL (R10), R11 - MOVL -4(R10)(R9*1), R10 - MOVL R11, (AX) - MOVL R10, -4(AX)(R9*1) + MOVL (R9), R10 + MOVL -4(R9)(R8*1), R9 + MOVL R10, (AX) + MOVL R9, -4(AX)(R8*1) JMP memmove_end_copy_match_emit_encodeBetterBlockAsm emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_8through16: - MOVQ (R10), R11 - MOVQ -8(R10)(R9*1), R10 - MOVQ R11, (AX) - MOVQ R10, -8(AX)(R9*1) + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) JMP memmove_end_copy_match_emit_encodeBetterBlockAsm emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_17through32: - MOVOU (R10), X0 - MOVOU -16(R10)(R9*1), X1 + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 MOVOU X0, (AX) - MOVOU X1, -16(AX)(R9*1) + MOVOU X1, -16(AX)(R8*1) JMP memmove_end_copy_match_emit_encodeBetterBlockAsm emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_33through64: - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) memmove_end_copy_match_emit_encodeBetterBlockAsm: - MOVQ SI, AX + MOVQ BX, AX JMP emit_literal_done_match_emit_encodeBetterBlockAsm memmove_long_match_emit_encodeBetterBlockAsm: - LEAQ (AX)(R9*1), SI + LEAQ (AX)(R8*1), BX // genMemMoveLong - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVQ R9, R13 - SHRQ $0x05, R13 - MOVQ AX, R11 - ANDL $0x0000001f, R11 - MOVQ $0x00000040, R14 - SUBQ R11, R14 - DECQ R13 + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R13 + SUBQ R10, R13 + DECQ R12 JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_forward_sse_loop_32 - LEAQ -32(R10)(R14*1), R11 - LEAQ -32(AX)(R14*1), R15 + LEAQ -32(R9)(R13*1), R10 + LEAQ -32(AX)(R13*1), R14 emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_big_loop_back: - MOVOU (R11), X4 - MOVOU 16(R11), X5 - MOVOA X4, (R15) - MOVOA X5, 16(R15) - ADDQ $0x20, R15 - ADDQ $0x20, R11 + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) ADDQ $0x20, R14 - DECQ R13 + ADDQ $0x20, R10 + ADDQ $0x20, R13 + DECQ R12 JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_big_loop_back emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_forward_sse_loop_32: - MOVOU -32(R10)(R14*1), X4 - MOVOU -16(R10)(R14*1), X5 - MOVOA X4, -32(AX)(R14*1) - MOVOA X5, -16(AX)(R14*1) - ADDQ $0x20, R14 - CMPQ R9, R14 + MOVOU -32(R9)(R13*1), X4 + MOVOU -16(R9)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_forward_sse_loop_32 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) - MOVQ SI, AX + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX emit_literal_done_match_emit_encodeBetterBlockAsm: - ADDL R12, CX - ADDL $0x04, R12 + ADDL R11, CX + ADDL $0x04, R11 MOVL CX, 12(SP) // emitCopy - CMPL R8, $0x00010000 + CMPL DI, $0x00010000 JL two_byte_offset_match_nolit_encodeBetterBlockAsm - -four_bytes_loop_back_match_nolit_encodeBetterBlockAsm: - CMPL R12, $0x40 + CMPL R11, $0x40 JLE four_bytes_remain_match_nolit_encodeBetterBlockAsm MOVB $0xff, (AX) - MOVL R8, 1(AX) - LEAL -64(R12), R12 + MOVL DI, 1(AX) + LEAL -64(R11), R11 ADDQ $0x05, AX - CMPL R12, $0x04 + CMPL R11, $0x04 JL four_bytes_remain_match_nolit_encodeBetterBlockAsm // emitRepeat emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy: - MOVL R12, SI - LEAL -4(R12), R12 - CMPL SI, $0x08 + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 JLE repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy - CMPL SI, $0x0c + CMPL BX, $0x0c JGE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy - CMPL R8, $0x00000800 + CMPL DI, $0x00000800 JLT repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy: - CMPL R12, $0x00000104 + CMPL R11, $0x00000104 JLT repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy - CMPL R12, $0x00010100 + CMPL R11, $0x00010100 JLT repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy - CMPL R12, $0x0100ffff + CMPL R11, $0x0100ffff JLT repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy - LEAL -16842747(R12), R12 - MOVW $0x001d, (AX) - MOVW $0xfffb, 2(AX) + LEAL -16842747(R11), R11 + MOVL $0xfffb001d, (AX) MOVB $0xff, 4(AX) ADDQ $0x05, AX JMP emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy: - LEAL -65536(R12), R12 - MOVL R12, R8 + LEAL -65536(R11), R11 + MOVL R11, DI MOVW $0x001d, (AX) - MOVW R12, 2(AX) - SARL $0x10, R8 - MOVB R8, 4(AX) + MOVW R11, 2(AX) + SARL $0x10, DI + MOVB DI, 4(AX) ADDQ $0x05, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy: - LEAL -256(R12), R12 + LEAL -256(R11), R11 MOVW $0x0019, (AX) - MOVW R12, 2(AX) + MOVW R11, 2(AX) ADDQ $0x04, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy: - LEAL -4(R12), R12 + LEAL -4(R11), R11 MOVW $0x0015, (AX) - MOVB R12, 2(AX) + MOVB R11, 2(AX) ADDQ $0x03, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy: - SHLL $0x02, R12 - ORL $0x01, R12 - MOVW R12, (AX) + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy: - XORQ SI, SI - LEAL 1(SI)(R12*4), R12 - MOVB R8, 1(AX) - SARL $0x08, R8 - SHLL $0x05, R8 - ORL R8, R12 - MOVB R12, (AX) + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm - JMP four_bytes_loop_back_match_nolit_encodeBetterBlockAsm four_bytes_remain_match_nolit_encodeBetterBlockAsm: - TESTL R12, R12 + TESTL R11, R11 JZ match_nolit_emitcopy_end_encodeBetterBlockAsm - MOVB $0x03, BL - LEAL -4(BX)(R12*4), R12 - MOVB R12, (AX) - MOVL R8, 1(AX) + XORL BX, BX + LEAL -1(BX)(R11*4), R11 + MOVB R11, (AX) + MOVL DI, 1(AX) ADDQ $0x05, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm two_byte_offset_match_nolit_encodeBetterBlockAsm: - CMPL R12, $0x40 + CMPL R11, $0x40 JLE two_byte_offset_short_match_nolit_encodeBetterBlockAsm - CMPL R8, $0x00000800 + CMPL DI, $0x00000800 JAE long_offset_short_match_nolit_encodeBetterBlockAsm - MOVL $0x00000001, SI - LEAL 16(SI), SI - MOVB R8, 1(AX) - MOVL R8, R9 - SHRL $0x08, R9 - SHLL $0x05, R9 - ORL R9, SI - MOVB SI, (AX) + MOVL $0x00000001, BX + LEAL 16(BX), BX + MOVB DI, 1(AX) + MOVL DI, R8 + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, BX + MOVB BL, (AX) ADDQ $0x02, AX - SUBL $0x08, R12 + SUBL $0x08, R11 // emitRepeat - LEAL -4(R12), R12 + LEAL -4(R11), R11 JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b: - MOVL R12, SI - LEAL -4(R12), R12 - CMPL SI, $0x08 + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 JLE repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b - CMPL SI, $0x0c + CMPL BX, $0x0c JGE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b - CMPL R8, $0x00000800 + CMPL DI, $0x00000800 JLT repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b: - CMPL R12, $0x00000104 + CMPL R11, $0x00000104 JLT repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b - CMPL R12, $0x00010100 + CMPL R11, $0x00010100 JLT repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b - CMPL R12, $0x0100ffff + CMPL R11, $0x0100ffff JLT repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b - LEAL -16842747(R12), R12 - MOVW $0x001d, (AX) - MOVW $0xfffb, 2(AX) + LEAL -16842747(R11), R11 + MOVL $0xfffb001d, (AX) MOVB $0xff, 4(AX) ADDQ $0x05, AX JMP emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b: - LEAL -65536(R12), R12 - MOVL R12, R8 + LEAL -65536(R11), R11 + MOVL R11, DI MOVW $0x001d, (AX) - MOVW R12, 2(AX) - SARL $0x10, R8 - MOVB R8, 4(AX) + MOVW R11, 2(AX) + SARL $0x10, DI + MOVB DI, 4(AX) ADDQ $0x05, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b: - LEAL -256(R12), R12 + LEAL -256(R11), R11 MOVW $0x0019, (AX) - MOVW R12, 2(AX) + MOVW R11, 2(AX) ADDQ $0x04, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b: - LEAL -4(R12), R12 + LEAL -4(R11), R11 MOVW $0x0015, (AX) - MOVB R12, 2(AX) + MOVB R11, 2(AX) ADDQ $0x03, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b: - SHLL $0x02, R12 - ORL $0x01, R12 - MOVW R12, (AX) + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b: - XORQ SI, SI - LEAL 1(SI)(R12*4), R12 - MOVB R8, 1(AX) - SARL $0x08, R8 - SHLL $0x05, R8 - ORL R8, R12 - MOVB R12, (AX) + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm long_offset_short_match_nolit_encodeBetterBlockAsm: MOVB $0xee, (AX) - MOVW R8, 1(AX) - LEAL -60(R12), R12 + MOVW DI, 1(AX) + LEAL -60(R11), R11 ADDQ $0x03, AX // emitRepeat emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short: - MOVL R12, SI - LEAL -4(R12), R12 - CMPL SI, $0x08 + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 JLE repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short - CMPL SI, $0x0c + CMPL BX, $0x0c JGE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short - CMPL R8, $0x00000800 + CMPL DI, $0x00000800 JLT repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short: - CMPL R12, $0x00000104 + CMPL R11, $0x00000104 JLT repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short - CMPL R12, $0x00010100 + CMPL R11, $0x00010100 JLT repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short - CMPL R12, $0x0100ffff + CMPL R11, $0x0100ffff JLT repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short - LEAL -16842747(R12), R12 - MOVW $0x001d, (AX) - MOVW $0xfffb, 2(AX) + LEAL -16842747(R11), R11 + MOVL $0xfffb001d, (AX) MOVB $0xff, 4(AX) ADDQ $0x05, AX JMP emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short: - LEAL -65536(R12), R12 - MOVL R12, R8 + LEAL -65536(R11), R11 + MOVL R11, DI MOVW $0x001d, (AX) - MOVW R12, 2(AX) - SARL $0x10, R8 - MOVB R8, 4(AX) + MOVW R11, 2(AX) + SARL $0x10, DI + MOVB DI, 4(AX) ADDQ $0x05, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short: - LEAL -256(R12), R12 + LEAL -256(R11), R11 MOVW $0x0019, (AX) - MOVW R12, 2(AX) + MOVW R11, 2(AX) ADDQ $0x04, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short: - LEAL -4(R12), R12 + LEAL -4(R11), R11 MOVW $0x0015, (AX) - MOVB R12, 2(AX) + MOVB R11, 2(AX) ADDQ $0x03, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short: - SHLL $0x02, R12 - ORL $0x01, R12 - MOVW R12, (AX) + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short: - XORQ SI, SI - LEAL 1(SI)(R12*4), R12 - MOVB R8, 1(AX) - SARL $0x08, R8 - SHLL $0x05, R8 - ORL R8, R12 - MOVB R12, (AX) + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm - JMP two_byte_offset_match_nolit_encodeBetterBlockAsm two_byte_offset_short_match_nolit_encodeBetterBlockAsm: - CMPL R12, $0x0c + MOVL R11, BX + SHLL $0x02, BX + CMPL R11, $0x0c JGE emit_copy_three_match_nolit_encodeBetterBlockAsm - CMPL R8, $0x00000800 + CMPL DI, $0x00000800 JGE emit_copy_three_match_nolit_encodeBetterBlockAsm - MOVB $0x01, BL - LEAL -16(BX)(R12*4), R12 - MOVB R8, 1(AX) - SHRL $0x08, R8 - SHLL $0x05, R8 - ORL R8, R12 - MOVB R12, (AX) + LEAL -15(BX), BX + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, BX + MOVB BL, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm emit_copy_three_match_nolit_encodeBetterBlockAsm: - MOVB $0x02, BL - LEAL -4(BX)(R12*4), R12 - MOVB R12, (AX) - MOVW R8, 1(AX) + LEAL -2(BX), BX + MOVB BL, (AX) + MOVW DI, 1(AX) ADDQ $0x03, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm match_is_repeat_encodeBetterBlockAsm: - MOVL 12(SP), SI - CMPL SI, DI + MOVL 12(SP), BX + CMPL BX, SI JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (DX)(SI*1), R10 - SUBL SI, R9 - LEAL -1(R9), SI - CMPL SI, $0x3c + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c JLT one_byte_match_emit_repeat_encodeBetterBlockAsm - CMPL SI, $0x00000100 + CMPL BX, $0x00000100 JLT two_bytes_match_emit_repeat_encodeBetterBlockAsm - CMPL SI, $0x00010000 + CMPL BX, $0x00010000 JLT three_bytes_match_emit_repeat_encodeBetterBlockAsm - CMPL SI, $0x01000000 + CMPL BX, $0x01000000 JLT four_bytes_match_emit_repeat_encodeBetterBlockAsm MOVB $0xfc, (AX) - MOVL SI, 1(AX) + MOVL BX, 1(AX) ADDQ $0x05, AX JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm four_bytes_match_emit_repeat_encodeBetterBlockAsm: - MOVL SI, R11 - SHRL $0x10, R11 + MOVL BX, R10 + SHRL $0x10, R10 MOVB $0xf8, (AX) - MOVW SI, 1(AX) - MOVB R11, 3(AX) + MOVW BX, 1(AX) + MOVB R10, 3(AX) ADDQ $0x04, AX JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm three_bytes_match_emit_repeat_encodeBetterBlockAsm: MOVB $0xf4, (AX) - MOVW SI, 1(AX) + MOVW BX, 1(AX) ADDQ $0x03, AX JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm two_bytes_match_emit_repeat_encodeBetterBlockAsm: MOVB $0xf0, (AX) - MOVB SI, 1(AX) + MOVB BL, 1(AX) ADDQ $0x02, AX - CMPL SI, $0x40 + CMPL BX, $0x40 JL memmove_match_emit_repeat_encodeBetterBlockAsm JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm one_byte_match_emit_repeat_encodeBetterBlockAsm: - SHLB $0x02, SI - MOVB SI, (AX) + SHLB $0x02, BL + MOVB BL, (AX) ADDQ $0x01, AX memmove_match_emit_repeat_encodeBetterBlockAsm: - LEAQ (AX)(R9*1), SI + LEAQ (AX)(R8*1), BX // genMemMoveShort - CMPQ R9, $0x04 + CMPQ R8, $0x04 JLE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4 - CMPQ R9, $0x08 + CMPQ R8, $0x08 JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4through7 - CMPQ R9, $0x10 + CMPQ R8, $0x10 JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_8through16 - CMPQ R9, $0x20 + CMPQ R8, $0x20 JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_17through32 JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_33through64 emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4: - MOVL (R10), R11 - MOVL R11, (AX) + MOVL (R9), R10 + MOVL R10, (AX) JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4through7: - MOVL (R10), R11 - MOVL -4(R10)(R9*1), R10 - MOVL R11, (AX) - MOVL R10, -4(AX)(R9*1) + MOVL (R9), R10 + MOVL -4(R9)(R8*1), R9 + MOVL R10, (AX) + MOVL R9, -4(AX)(R8*1) JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_8through16: - MOVQ (R10), R11 - MOVQ -8(R10)(R9*1), R10 - MOVQ R11, (AX) - MOVQ R10, -8(AX)(R9*1) + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_17through32: - MOVOU (R10), X0 - MOVOU -16(R10)(R9*1), X1 + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 MOVOU X0, (AX) - MOVOU X1, -16(AX)(R9*1) + MOVOU X1, -16(AX)(R8*1) JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_33through64: - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm: - MOVQ SI, AX + MOVQ BX, AX JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm memmove_long_match_emit_repeat_encodeBetterBlockAsm: - LEAQ (AX)(R9*1), SI + LEAQ (AX)(R8*1), BX // genMemMoveLong - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVQ R9, R13 - SHRQ $0x05, R13 - MOVQ AX, R11 - ANDL $0x0000001f, R11 - MOVQ $0x00000040, R14 - SUBQ R11, R14 - DECQ R13 + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R13 + SUBQ R10, R13 + DECQ R12 JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_forward_sse_loop_32 - LEAQ -32(R10)(R14*1), R11 - LEAQ -32(AX)(R14*1), R15 + LEAQ -32(R9)(R13*1), R10 + LEAQ -32(AX)(R13*1), R14 emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_big_loop_back: - MOVOU (R11), X4 - MOVOU 16(R11), X5 - MOVOA X4, (R15) - MOVOA X5, 16(R15) - ADDQ $0x20, R15 - ADDQ $0x20, R11 + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) ADDQ $0x20, R14 - DECQ R13 + ADDQ $0x20, R10 + ADDQ $0x20, R13 + DECQ R12 JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_big_loop_back emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_forward_sse_loop_32: - MOVOU -32(R10)(R14*1), X4 - MOVOU -16(R10)(R14*1), X5 - MOVOA X4, -32(AX)(R14*1) - MOVOA X5, -16(AX)(R14*1) - ADDQ $0x20, R14 - CMPQ R9, R14 + MOVOU -32(R9)(R13*1), X4 + MOVOU -16(R9)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_forward_sse_loop_32 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) - MOVQ SI, AX + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX emit_literal_done_match_emit_repeat_encodeBetterBlockAsm: - ADDL R12, CX - ADDL $0x04, R12 + ADDL R11, CX + ADDL $0x04, R11 MOVL CX, 12(SP) // emitRepeat emit_repeat_again_match_nolit_repeat_encodeBetterBlockAsm: - MOVL R12, SI - LEAL -4(R12), R12 - CMPL SI, $0x08 + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 JLE repeat_two_match_nolit_repeat_encodeBetterBlockAsm - CMPL SI, $0x0c + CMPL BX, $0x0c JGE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm - CMPL R8, $0x00000800 + CMPL DI, $0x00000800 JLT repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm: - CMPL R12, $0x00000104 + CMPL R11, $0x00000104 JLT repeat_three_match_nolit_repeat_encodeBetterBlockAsm - CMPL R12, $0x00010100 + CMPL R11, $0x00010100 JLT repeat_four_match_nolit_repeat_encodeBetterBlockAsm - CMPL R12, $0x0100ffff + CMPL R11, $0x0100ffff JLT repeat_five_match_nolit_repeat_encodeBetterBlockAsm - LEAL -16842747(R12), R12 - MOVW $0x001d, (AX) - MOVW $0xfffb, 2(AX) + LEAL -16842747(R11), R11 + MOVL $0xfffb001d, (AX) MOVB $0xff, 4(AX) ADDQ $0x05, AX JMP emit_repeat_again_match_nolit_repeat_encodeBetterBlockAsm repeat_five_match_nolit_repeat_encodeBetterBlockAsm: - LEAL -65536(R12), R12 - MOVL R12, R8 + LEAL -65536(R11), R11 + MOVL R11, DI MOVW $0x001d, (AX) - MOVW R12, 2(AX) - SARL $0x10, R8 - MOVB R8, 4(AX) + MOVW R11, 2(AX) + SARL $0x10, DI + MOVB DI, 4(AX) ADDQ $0x05, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm repeat_four_match_nolit_repeat_encodeBetterBlockAsm: - LEAL -256(R12), R12 + LEAL -256(R11), R11 MOVW $0x0019, (AX) - MOVW R12, 2(AX) + MOVW R11, 2(AX) ADDQ $0x04, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm repeat_three_match_nolit_repeat_encodeBetterBlockAsm: - LEAL -4(R12), R12 + LEAL -4(R11), R11 MOVW $0x0015, (AX) - MOVB R12, 2(AX) + MOVB R11, 2(AX) ADDQ $0x03, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm repeat_two_match_nolit_repeat_encodeBetterBlockAsm: - SHLL $0x02, R12 - ORL $0x01, R12 - MOVW R12, (AX) + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm: - XORQ SI, SI - LEAL 1(SI)(R12*4), R12 - MOVB R8, 1(AX) - SARL $0x08, R8 - SHLL $0x05, R8 - ORL R8, R12 - MOVB R12, (AX) + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) ADDQ $0x02, AX match_nolit_emitcopy_end_encodeBetterBlockAsm: @@ -6588,54 +6554,51 @@ match_nolit_emitcopy_end_encodeBetterBlockAsm: RET match_nolit_dst_ok_encodeBetterBlockAsm: - MOVQ $0x00cf1bbcdcbfa563, SI - MOVQ $0x9e3779b1, R8 - INCL DI - MOVQ (DX)(DI*1), R9 - MOVQ R9, R10 - MOVQ R9, R11 - MOVQ R9, R12 - SHRQ $0x08, R11 - MOVQ R11, R13 - SHRQ $0x10, R12 - LEAL 1(DI), R14 - LEAL 2(DI), R15 - MOVQ -2(DX)(CX*1), R9 - SHLQ $0x08, R10 - IMULQ SI, R10 - SHRQ $0x30, R10 - SHLQ $0x08, R13 - IMULQ SI, R13 - SHRQ $0x30, R13 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x32, R11 + MOVQ $0x00cf1bbcdcbfa563, BX + MOVQ $0x9e3779b1, DI + LEAQ 1(SI), SI + LEAQ -2(CX), R8 + MOVQ (DX)(SI*1), R9 + MOVQ 1(DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + MOVQ 1(DX)(R8*1), R12 + SHLQ $0x08, R9 + IMULQ BX, R9 + SHRQ $0x2f, R9 + SHLQ $0x20, R10 + IMULQ DI, R10 + SHRQ $0x32, R10 + SHLQ $0x08, R11 + IMULQ BX, R11 + SHRQ $0x2f, R11 SHLQ $0x20, R12 - IMULQ R8, R12 + IMULQ DI, R12 SHRQ $0x32, R12 - MOVL DI, 24(SP)(R10*4) - MOVL R14, 24(SP)(R13*4) - MOVL R14, 262168(SP)(R11*4) - MOVL R15, 262168(SP)(R12*4) - MOVQ R9, R10 - MOVQ R9, R11 - SHRQ $0x08, R11 - MOVQ R11, R13 - LEAL -2(CX), R9 - LEAL -1(CX), DI - SHLQ $0x08, R10 - IMULQ SI, R10 - SHRQ $0x30, R10 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x32, R11 - SHLQ $0x08, R13 - IMULQ SI, R13 - SHRQ $0x30, R13 - MOVL R9, 24(SP)(R10*4) - MOVL DI, 262168(SP)(R11*4) - MOVL DI, 24(SP)(R13*4) - JMP search_loop_encodeBetterBlockAsm + LEAQ 1(SI), DI + LEAQ 1(R8), R13 + MOVL SI, 24(SP)(R9*4) + MOVL R8, 24(SP)(R11*4) + MOVL DI, 524312(SP)(R10*4) + MOVL R13, 524312(SP)(R12*4) + ADDQ $0x01, SI + SUBQ $0x01, R8 + +index_loop_encodeBetterBlockAsm: + CMPQ SI, R8 + JAE search_loop_encodeBetterBlockAsm + MOVQ (DX)(SI*1), DI + MOVQ (DX)(R8*1), R9 + SHLQ $0x08, DI + IMULQ BX, DI + SHRQ $0x2f, DI + SHLQ $0x08, R9 + IMULQ BX, R9 + SHRQ $0x2f, R9 + MOVL SI, 24(SP)(DI*4) + MOVL R8, 24(SP)(R9*4) + ADDQ $0x02, SI + SUBQ $0x02, R8 + JMP index_loop_encodeBetterBlockAsm emit_remainder_encodeBetterBlockAsm: MOVQ src_len+32(FP), CX @@ -6815,9 +6778,9 @@ emit_literal_done_emit_remainder_encodeBetterBlockAsm: // func encodeBetterBlockAsm4MB(dst []byte, src []byte) int // Requires: BMI, SSE2 -TEXT ·encodeBetterBlockAsm4MB(SB), $327704-56 +TEXT ·encodeBetterBlockAsm4MB(SB), $589848-56 MOVQ dst_base+0(FP), AX - MOVQ $0x00000a00, CX + MOVQ $0x00001200, CX LEAQ 24(SP), DX PXOR X0, X0 @@ -6836,8 +6799,8 @@ zero_loop_encodeBetterBlockAsm4MB: MOVL $0x00000000, 12(SP) MOVQ src_len+32(FP), CX LEAQ -6(CX), DX - LEAQ -8(CX), SI - MOVL SI, 8(SP) + LEAQ -8(CX), BX + MOVL BX, 8(SP) SHRQ $0x05, CX SUBL CX, DX LEAQ (AX)(DX*1), DX @@ -6847,746 +6810,752 @@ zero_loop_encodeBetterBlockAsm4MB: MOVQ src_base+24(FP), DX search_loop_encodeBetterBlockAsm4MB: - MOVL CX, SI - SUBL 12(SP), SI - SHRL $0x07, SI - CMPL SI, $0x63 + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x07, BX + CMPL BX, $0x63 JLE check_maxskip_ok_encodeBetterBlockAsm4MB - LEAL 100(CX), SI + LEAL 100(CX), BX JMP check_maxskip_cont_encodeBetterBlockAsm4MB check_maxskip_ok_encodeBetterBlockAsm4MB: - LEAL 1(CX)(SI*1), SI + LEAL 1(CX)(BX*1), BX check_maxskip_cont_encodeBetterBlockAsm4MB: - CMPL SI, 8(SP) + CMPL BX, 8(SP) JGE emit_remainder_encodeBetterBlockAsm4MB - MOVQ (DX)(CX*1), DI - MOVL SI, 20(SP) - MOVQ $0x00cf1bbcdcbfa563, R9 - MOVQ $0x9e3779b1, SI - MOVQ DI, R10 - MOVQ DI, R11 - SHLQ $0x08, R10 - IMULQ R9, R10 - SHRQ $0x30, R10 - SHLQ $0x20, R11 - IMULQ SI, R11 - SHRQ $0x32, R11 - MOVL 24(SP)(R10*4), SI - MOVL 262168(SP)(R11*4), R8 - MOVL CX, 24(SP)(R10*4) - MOVL CX, 262168(SP)(R11*4) - CMPL (DX)(SI*1), DI + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x00cf1bbcdcbfa563, R8 + MOVQ $0x9e3779b1, BX + MOVQ SI, R9 + MOVQ SI, R10 + SHLQ $0x08, R9 + IMULQ R8, R9 + SHRQ $0x2f, R9 + SHLQ $0x20, R10 + IMULQ BX, R10 + SHRQ $0x32, R10 + MOVL 24(SP)(R9*4), BX + MOVL 524312(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + MOVL CX, 524312(SP)(R10*4) + MOVQ (DX)(BX*1), R9 + MOVQ (DX)(DI*1), R10 + CMPQ R9, SI JEQ candidate_match_encodeBetterBlockAsm4MB - CMPL (DX)(R8*1), DI - JEQ candidateS_match_encodeBetterBlockAsm4MB - MOVL 20(SP), CX - JMP search_loop_encodeBetterBlockAsm4MB + CMPQ R10, SI + JNE no_short_found_encodeBetterBlockAsm4MB + MOVL DI, BX + JMP candidate_match_encodeBetterBlockAsm4MB + +no_short_found_encodeBetterBlockAsm4MB: + CMPL R9, SI + JEQ candidate_match_encodeBetterBlockAsm4MB + CMPL R10, SI + JEQ candidateS_match_encodeBetterBlockAsm4MB + MOVL 20(SP), CX + JMP search_loop_encodeBetterBlockAsm4MB candidateS_match_encodeBetterBlockAsm4MB: - SHRQ $0x08, DI - MOVQ DI, R10 - SHLQ $0x08, R10 - IMULQ R9, R10 - SHRQ $0x30, R10 - MOVL 24(SP)(R10*4), SI + SHRQ $0x08, SI + MOVQ SI, R9 + SHLQ $0x08, R9 + IMULQ R8, R9 + SHRQ $0x2f, R9 + MOVL 24(SP)(R9*4), BX INCL CX - MOVL CX, 24(SP)(R10*4) - CMPL (DX)(SI*1), DI + MOVL CX, 24(SP)(R9*4) + CMPL (DX)(BX*1), SI JEQ candidate_match_encodeBetterBlockAsm4MB DECL CX - MOVL R8, SI + MOVL DI, BX candidate_match_encodeBetterBlockAsm4MB: - MOVL 12(SP), DI - TESTL SI, SI + MOVL 12(SP), SI + TESTL BX, BX JZ match_extend_back_end_encodeBetterBlockAsm4MB match_extend_back_loop_encodeBetterBlockAsm4MB: - CMPL CX, DI + CMPL CX, SI JLE match_extend_back_end_encodeBetterBlockAsm4MB - MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(BX*1), DI MOVB -1(DX)(CX*1), R8 - CMPB BL, R8 + CMPB DI, R8 JNE match_extend_back_end_encodeBetterBlockAsm4MB LEAL -1(CX), CX - DECL SI + DECL BX JZ match_extend_back_end_encodeBetterBlockAsm4MB JMP match_extend_back_loop_encodeBetterBlockAsm4MB match_extend_back_end_encodeBetterBlockAsm4MB: - MOVL CX, DI - SUBL 12(SP), DI - LEAQ 4(AX)(DI*1), DI - CMPQ DI, (SP) + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 4(AX)(SI*1), SI + CMPQ SI, (SP) JL match_dst_size_check_encodeBetterBlockAsm4MB MOVQ $0x00000000, ret+48(FP) RET match_dst_size_check_encodeBetterBlockAsm4MB: - MOVL CX, DI + MOVL CX, SI ADDL $0x04, CX - ADDL $0x04, SI - MOVQ src_len+32(FP), R8 - SUBL CX, R8 - LEAQ (DX)(CX*1), R9 - LEAQ (DX)(SI*1), R10 + ADDL $0x04, BX + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), R9 // matchLen - XORL R12, R12 - CMPL R8, $0x08 + XORL R11, R11 + CMPL DI, $0x08 JL matchlen_match4_match_nolit_encodeBetterBlockAsm4MB matchlen_loopback_match_nolit_encodeBetterBlockAsm4MB: - MOVQ (R9)(R12*1), R11 - XORQ (R10)(R12*1), R11 - TESTQ R11, R11 + MOVQ (R8)(R11*1), R10 + XORQ (R9)(R11*1), R10 + TESTQ R10, R10 JZ matchlen_loop_match_nolit_encodeBetterBlockAsm4MB #ifdef GOAMD64_v3 - TZCNTQ R11, R11 + TZCNTQ R10, R10 #else - BSFQ R11, R11 + BSFQ R10, R10 #endif - SARQ $0x03, R11 - LEAL (R12)(R11*1), R12 + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 JMP match_nolit_end_encodeBetterBlockAsm4MB matchlen_loop_match_nolit_encodeBetterBlockAsm4MB: - LEAL -8(R8), R8 - LEAL 8(R12), R12 - CMPL R8, $0x08 + LEAL -8(DI), DI + LEAL 8(R11), R11 + CMPL DI, $0x08 JGE matchlen_loopback_match_nolit_encodeBetterBlockAsm4MB JZ match_nolit_end_encodeBetterBlockAsm4MB matchlen_match4_match_nolit_encodeBetterBlockAsm4MB: - CMPL R8, $0x04 + CMPL DI, $0x04 JL matchlen_match2_match_nolit_encodeBetterBlockAsm4MB - MOVL (R9)(R12*1), R11 - CMPL (R10)(R12*1), R11 + MOVL (R8)(R11*1), R10 + CMPL (R9)(R11*1), R10 JNE matchlen_match2_match_nolit_encodeBetterBlockAsm4MB - SUBL $0x04, R8 - LEAL 4(R12), R12 + SUBL $0x04, DI + LEAL 4(R11), R11 matchlen_match2_match_nolit_encodeBetterBlockAsm4MB: - CMPL R8, $0x02 + CMPL DI, $0x02 JL matchlen_match1_match_nolit_encodeBetterBlockAsm4MB - MOVW (R9)(R12*1), R11 - CMPW (R10)(R12*1), R11 + MOVW (R8)(R11*1), R10 + CMPW (R9)(R11*1), R10 JNE matchlen_match1_match_nolit_encodeBetterBlockAsm4MB - SUBL $0x02, R8 - LEAL 2(R12), R12 + SUBL $0x02, DI + LEAL 2(R11), R11 matchlen_match1_match_nolit_encodeBetterBlockAsm4MB: - CMPL R8, $0x01 + CMPL DI, $0x01 JL match_nolit_end_encodeBetterBlockAsm4MB - MOVB (R9)(R12*1), R11 - CMPB (R10)(R12*1), R11 + MOVB (R8)(R11*1), R10 + CMPB (R9)(R11*1), R10 JNE match_nolit_end_encodeBetterBlockAsm4MB - LEAL 1(R12), R12 + LEAL 1(R11), R11 match_nolit_end_encodeBetterBlockAsm4MB: - MOVL CX, R8 - SUBL SI, R8 + MOVL CX, DI + SUBL BX, DI // Check if repeat - CMPL 16(SP), R8 + CMPL 16(SP), DI JEQ match_is_repeat_encodeBetterBlockAsm4MB - CMPL R12, $0x01 + CMPL R11, $0x01 JG match_length_ok_encodeBetterBlockAsm4MB - CMPL R8, $0x0000ffff + CMPL DI, $0x0000ffff JLE match_length_ok_encodeBetterBlockAsm4MB MOVL 20(SP), CX INCL CX JMP search_loop_encodeBetterBlockAsm4MB match_length_ok_encodeBetterBlockAsm4MB: - MOVL R8, 16(SP) - MOVL 12(SP), SI - CMPL SI, DI + MOVL DI, 16(SP) + MOVL 12(SP), BX + CMPL BX, SI JEQ emit_literal_done_match_emit_encodeBetterBlockAsm4MB - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (DX)(SI*1), R10 - SUBL SI, R9 - LEAL -1(R9), SI - CMPL SI, $0x3c + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c JLT one_byte_match_emit_encodeBetterBlockAsm4MB - CMPL SI, $0x00000100 + CMPL BX, $0x00000100 JLT two_bytes_match_emit_encodeBetterBlockAsm4MB - CMPL SI, $0x00010000 + CMPL BX, $0x00010000 JLT three_bytes_match_emit_encodeBetterBlockAsm4MB - MOVL SI, R11 - SHRL $0x10, R11 + MOVL BX, R10 + SHRL $0x10, R10 MOVB $0xf8, (AX) - MOVW SI, 1(AX) - MOVB R11, 3(AX) + MOVW BX, 1(AX) + MOVB R10, 3(AX) ADDQ $0x04, AX JMP memmove_long_match_emit_encodeBetterBlockAsm4MB three_bytes_match_emit_encodeBetterBlockAsm4MB: MOVB $0xf4, (AX) - MOVW SI, 1(AX) + MOVW BX, 1(AX) ADDQ $0x03, AX JMP memmove_long_match_emit_encodeBetterBlockAsm4MB two_bytes_match_emit_encodeBetterBlockAsm4MB: MOVB $0xf0, (AX) - MOVB SI, 1(AX) + MOVB BL, 1(AX) ADDQ $0x02, AX - CMPL SI, $0x40 + CMPL BX, $0x40 JL memmove_match_emit_encodeBetterBlockAsm4MB JMP memmove_long_match_emit_encodeBetterBlockAsm4MB one_byte_match_emit_encodeBetterBlockAsm4MB: - SHLB $0x02, SI - MOVB SI, (AX) + SHLB $0x02, BL + MOVB BL, (AX) ADDQ $0x01, AX memmove_match_emit_encodeBetterBlockAsm4MB: - LEAQ (AX)(R9*1), SI + LEAQ (AX)(R8*1), BX // genMemMoveShort - CMPQ R9, $0x04 + CMPQ R8, $0x04 JLE emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4 - CMPQ R9, $0x08 + CMPQ R8, $0x08 JB emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4through7 - CMPQ R9, $0x10 + CMPQ R8, $0x10 JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_8through16 - CMPQ R9, $0x20 + CMPQ R8, $0x20 JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_17through32 JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_33through64 emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4: - MOVL (R10), R11 - MOVL R11, (AX) + MOVL (R9), R10 + MOVL R10, (AX) JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4through7: - MOVL (R10), R11 - MOVL -4(R10)(R9*1), R10 - MOVL R11, (AX) - MOVL R10, -4(AX)(R9*1) + MOVL (R9), R10 + MOVL -4(R9)(R8*1), R9 + MOVL R10, (AX) + MOVL R9, -4(AX)(R8*1) JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_8through16: - MOVQ (R10), R11 - MOVQ -8(R10)(R9*1), R10 - MOVQ R11, (AX) - MOVQ R10, -8(AX)(R9*1) + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_17through32: - MOVOU (R10), X0 - MOVOU -16(R10)(R9*1), X1 + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 MOVOU X0, (AX) - MOVOU X1, -16(AX)(R9*1) + MOVOU X1, -16(AX)(R8*1) JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_33through64: - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) memmove_end_copy_match_emit_encodeBetterBlockAsm4MB: - MOVQ SI, AX + MOVQ BX, AX JMP emit_literal_done_match_emit_encodeBetterBlockAsm4MB memmove_long_match_emit_encodeBetterBlockAsm4MB: - LEAQ (AX)(R9*1), SI + LEAQ (AX)(R8*1), BX // genMemMoveLong - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVQ R9, R13 - SHRQ $0x05, R13 - MOVQ AX, R11 - ANDL $0x0000001f, R11 - MOVQ $0x00000040, R14 - SUBQ R11, R14 - DECQ R13 + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R13 + SUBQ R10, R13 + DECQ R12 JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32 - LEAQ -32(R10)(R14*1), R11 - LEAQ -32(AX)(R14*1), R15 + LEAQ -32(R9)(R13*1), R10 + LEAQ -32(AX)(R13*1), R14 emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_big_loop_back: - MOVOU (R11), X4 - MOVOU 16(R11), X5 - MOVOA X4, (R15) - MOVOA X5, 16(R15) - ADDQ $0x20, R15 - ADDQ $0x20, R11 + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) ADDQ $0x20, R14 - DECQ R13 + ADDQ $0x20, R10 + ADDQ $0x20, R13 + DECQ R12 JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_big_loop_back emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32: - MOVOU -32(R10)(R14*1), X4 - MOVOU -16(R10)(R14*1), X5 - MOVOA X4, -32(AX)(R14*1) - MOVOA X5, -16(AX)(R14*1) - ADDQ $0x20, R14 - CMPQ R9, R14 + MOVOU -32(R9)(R13*1), X4 + MOVOU -16(R9)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) - MOVQ SI, AX + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX emit_literal_done_match_emit_encodeBetterBlockAsm4MB: - ADDL R12, CX - ADDL $0x04, R12 + ADDL R11, CX + ADDL $0x04, R11 MOVL CX, 12(SP) // emitCopy - CMPL R8, $0x00010000 + CMPL DI, $0x00010000 JL two_byte_offset_match_nolit_encodeBetterBlockAsm4MB - -four_bytes_loop_back_match_nolit_encodeBetterBlockAsm4MB: - CMPL R12, $0x40 + CMPL R11, $0x40 JLE four_bytes_remain_match_nolit_encodeBetterBlockAsm4MB MOVB $0xff, (AX) - MOVL R8, 1(AX) - LEAL -64(R12), R12 + MOVL DI, 1(AX) + LEAL -64(R11), R11 ADDQ $0x05, AX - CMPL R12, $0x04 + CMPL R11, $0x04 JL four_bytes_remain_match_nolit_encodeBetterBlockAsm4MB // emitRepeat - MOVL R12, SI - LEAL -4(R12), R12 - CMPL SI, $0x08 + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 JLE repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy - CMPL SI, $0x0c + CMPL BX, $0x0c JGE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy - CMPL R8, $0x00000800 + CMPL DI, $0x00000800 JLT repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy: - CMPL R12, $0x00000104 + CMPL R11, $0x00000104 JLT repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy - CMPL R12, $0x00010100 + CMPL R11, $0x00010100 JLT repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy - LEAL -65536(R12), R12 - MOVL R12, R8 + LEAL -65536(R11), R11 + MOVL R11, DI MOVW $0x001d, (AX) - MOVW R12, 2(AX) - SARL $0x10, R8 - MOVB R8, 4(AX) + MOVW R11, 2(AX) + SARL $0x10, DI + MOVB DI, 4(AX) ADDQ $0x05, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy: - LEAL -256(R12), R12 + LEAL -256(R11), R11 MOVW $0x0019, (AX) - MOVW R12, 2(AX) + MOVW R11, 2(AX) ADDQ $0x04, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy: - LEAL -4(R12), R12 + LEAL -4(R11), R11 MOVW $0x0015, (AX) - MOVB R12, 2(AX) + MOVB R11, 2(AX) ADDQ $0x03, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy: - SHLL $0x02, R12 - ORL $0x01, R12 - MOVW R12, (AX) + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy: - XORQ SI, SI - LEAL 1(SI)(R12*4), R12 - MOVB R8, 1(AX) - SARL $0x08, R8 - SHLL $0x05, R8 - ORL R8, R12 - MOVB R12, (AX) + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB - JMP four_bytes_loop_back_match_nolit_encodeBetterBlockAsm4MB four_bytes_remain_match_nolit_encodeBetterBlockAsm4MB: - TESTL R12, R12 + TESTL R11, R11 JZ match_nolit_emitcopy_end_encodeBetterBlockAsm4MB - MOVB $0x03, BL - LEAL -4(BX)(R12*4), R12 - MOVB R12, (AX) - MOVL R8, 1(AX) + XORL BX, BX + LEAL -1(BX)(R11*4), R11 + MOVB R11, (AX) + MOVL DI, 1(AX) ADDQ $0x05, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB two_byte_offset_match_nolit_encodeBetterBlockAsm4MB: - CMPL R12, $0x40 + CMPL R11, $0x40 JLE two_byte_offset_short_match_nolit_encodeBetterBlockAsm4MB - CMPL R8, $0x00000800 + CMPL DI, $0x00000800 JAE long_offset_short_match_nolit_encodeBetterBlockAsm4MB - MOVL $0x00000001, SI - LEAL 16(SI), SI - MOVB R8, 1(AX) - SHRL $0x08, R8 - SHLL $0x05, R8 - ORL R8, SI - MOVB SI, (AX) + MOVL $0x00000001, BX + LEAL 16(BX), BX + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, BX + MOVB BL, (AX) ADDQ $0x02, AX - SUBL $0x08, R12 + SUBL $0x08, R11 // emitRepeat - LEAL -4(R12), R12 + LEAL -4(R11), R11 JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b - MOVL R12, SI - LEAL -4(R12), R12 - CMPL SI, $0x08 + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 JLE repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b - CMPL SI, $0x0c + CMPL BX, $0x0c JGE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b - CMPL R8, $0x00000800 + CMPL DI, $0x00000800 JLT repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b: - CMPL R12, $0x00000104 + CMPL R11, $0x00000104 JLT repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b - CMPL R12, $0x00010100 + CMPL R11, $0x00010100 JLT repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b - LEAL -65536(R12), R12 - MOVL R12, R8 + LEAL -65536(R11), R11 + MOVL R11, DI MOVW $0x001d, (AX) - MOVW R12, 2(AX) - SARL $0x10, R8 - MOVB R8, 4(AX) + MOVW R11, 2(AX) + SARL $0x10, DI + MOVB DI, 4(AX) ADDQ $0x05, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b: - LEAL -256(R12), R12 + LEAL -256(R11), R11 MOVW $0x0019, (AX) - MOVW R12, 2(AX) + MOVW R11, 2(AX) ADDQ $0x04, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b: - LEAL -4(R12), R12 + LEAL -4(R11), R11 MOVW $0x0015, (AX) - MOVB R12, 2(AX) + MOVB R11, 2(AX) ADDQ $0x03, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b: - SHLL $0x02, R12 - ORL $0x01, R12 - MOVW R12, (AX) + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b: - XORQ SI, SI - LEAL 1(SI)(R12*4), R12 - MOVB R8, 1(AX) - SARL $0x08, R8 - SHLL $0x05, R8 - ORL R8, R12 - MOVB R12, (AX) + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB long_offset_short_match_nolit_encodeBetterBlockAsm4MB: MOVB $0xee, (AX) - MOVW R8, 1(AX) - LEAL -60(R12), R12 + MOVW DI, 1(AX) + LEAL -60(R11), R11 ADDQ $0x03, AX // emitRepeat - MOVL R12, SI - LEAL -4(R12), R12 - CMPL SI, $0x08 + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 JLE repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short - CMPL SI, $0x0c + CMPL BX, $0x0c JGE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short - CMPL R8, $0x00000800 + CMPL DI, $0x00000800 JLT repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short: - CMPL R12, $0x00000104 + CMPL R11, $0x00000104 JLT repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short - CMPL R12, $0x00010100 + CMPL R11, $0x00010100 JLT repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short - LEAL -65536(R12), R12 - MOVL R12, R8 + LEAL -65536(R11), R11 + MOVL R11, DI MOVW $0x001d, (AX) - MOVW R12, 2(AX) - SARL $0x10, R8 - MOVB R8, 4(AX) + MOVW R11, 2(AX) + SARL $0x10, DI + MOVB DI, 4(AX) ADDQ $0x05, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short: - LEAL -256(R12), R12 + LEAL -256(R11), R11 MOVW $0x0019, (AX) - MOVW R12, 2(AX) + MOVW R11, 2(AX) ADDQ $0x04, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short: - LEAL -4(R12), R12 + LEAL -4(R11), R11 MOVW $0x0015, (AX) - MOVB R12, 2(AX) + MOVB R11, 2(AX) ADDQ $0x03, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short: - SHLL $0x02, R12 - ORL $0x01, R12 - MOVW R12, (AX) + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short: - XORQ SI, SI - LEAL 1(SI)(R12*4), R12 - MOVB R8, 1(AX) - SARL $0x08, R8 - SHLL $0x05, R8 - ORL R8, R12 - MOVB R12, (AX) + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB - JMP two_byte_offset_match_nolit_encodeBetterBlockAsm4MB two_byte_offset_short_match_nolit_encodeBetterBlockAsm4MB: - CMPL R12, $0x0c + MOVL R11, BX + SHLL $0x02, BX + CMPL R11, $0x0c JGE emit_copy_three_match_nolit_encodeBetterBlockAsm4MB - CMPL R8, $0x00000800 + CMPL DI, $0x00000800 JGE emit_copy_three_match_nolit_encodeBetterBlockAsm4MB - MOVB $0x01, BL - LEAL -16(BX)(R12*4), R12 - MOVB R8, 1(AX) - SHRL $0x08, R8 - SHLL $0x05, R8 - ORL R8, R12 - MOVB R12, (AX) + LEAL -15(BX), BX + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, BX + MOVB BL, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB emit_copy_three_match_nolit_encodeBetterBlockAsm4MB: - MOVB $0x02, BL - LEAL -4(BX)(R12*4), R12 - MOVB R12, (AX) - MOVW R8, 1(AX) + LEAL -2(BX), BX + MOVB BL, (AX) + MOVW DI, 1(AX) ADDQ $0x03, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB match_is_repeat_encodeBetterBlockAsm4MB: - MOVL 12(SP), SI - CMPL SI, DI + MOVL 12(SP), BX + CMPL BX, SI JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm4MB - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (DX)(SI*1), R10 - SUBL SI, R9 - LEAL -1(R9), SI - CMPL SI, $0x3c + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c JLT one_byte_match_emit_repeat_encodeBetterBlockAsm4MB - CMPL SI, $0x00000100 + CMPL BX, $0x00000100 JLT two_bytes_match_emit_repeat_encodeBetterBlockAsm4MB - CMPL SI, $0x00010000 + CMPL BX, $0x00010000 JLT three_bytes_match_emit_repeat_encodeBetterBlockAsm4MB - MOVL SI, R11 - SHRL $0x10, R11 + MOVL BX, R10 + SHRL $0x10, R10 MOVB $0xf8, (AX) - MOVW SI, 1(AX) - MOVB R11, 3(AX) + MOVW BX, 1(AX) + MOVB R10, 3(AX) ADDQ $0x04, AX JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB three_bytes_match_emit_repeat_encodeBetterBlockAsm4MB: MOVB $0xf4, (AX) - MOVW SI, 1(AX) + MOVW BX, 1(AX) ADDQ $0x03, AX JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB two_bytes_match_emit_repeat_encodeBetterBlockAsm4MB: MOVB $0xf0, (AX) - MOVB SI, 1(AX) + MOVB BL, 1(AX) ADDQ $0x02, AX - CMPL SI, $0x40 + CMPL BX, $0x40 JL memmove_match_emit_repeat_encodeBetterBlockAsm4MB JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB one_byte_match_emit_repeat_encodeBetterBlockAsm4MB: - SHLB $0x02, SI - MOVB SI, (AX) + SHLB $0x02, BL + MOVB BL, (AX) ADDQ $0x01, AX memmove_match_emit_repeat_encodeBetterBlockAsm4MB: - LEAQ (AX)(R9*1), SI + LEAQ (AX)(R8*1), BX // genMemMoveShort - CMPQ R9, $0x04 + CMPQ R8, $0x04 JLE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4 - CMPQ R9, $0x08 + CMPQ R8, $0x08 JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4through7 - CMPQ R9, $0x10 + CMPQ R8, $0x10 JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_8through16 - CMPQ R9, $0x20 + CMPQ R8, $0x20 JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_17through32 JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_33through64 emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4: - MOVL (R10), R11 - MOVL R11, (AX) + MOVL (R9), R10 + MOVL R10, (AX) JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4through7: - MOVL (R10), R11 - MOVL -4(R10)(R9*1), R10 - MOVL R11, (AX) - MOVL R10, -4(AX)(R9*1) + MOVL (R9), R10 + MOVL -4(R9)(R8*1), R9 + MOVL R10, (AX) + MOVL R9, -4(AX)(R8*1) JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_8through16: - MOVQ (R10), R11 - MOVQ -8(R10)(R9*1), R10 - MOVQ R11, (AX) - MOVQ R10, -8(AX)(R9*1) + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_17through32: - MOVOU (R10), X0 - MOVOU -16(R10)(R9*1), X1 + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 MOVOU X0, (AX) - MOVOU X1, -16(AX)(R9*1) + MOVOU X1, -16(AX)(R8*1) JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_33through64: - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB: - MOVQ SI, AX + MOVQ BX, AX JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm4MB memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB: - LEAQ (AX)(R9*1), SI + LEAQ (AX)(R8*1), BX // genMemMoveLong - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVQ R9, R13 - SHRQ $0x05, R13 - MOVQ AX, R11 - ANDL $0x0000001f, R11 - MOVQ $0x00000040, R14 - SUBQ R11, R14 - DECQ R13 + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R13 + SUBQ R10, R13 + DECQ R12 JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32 - LEAQ -32(R10)(R14*1), R11 - LEAQ -32(AX)(R14*1), R15 + LEAQ -32(R9)(R13*1), R10 + LEAQ -32(AX)(R13*1), R14 emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_big_loop_back: - MOVOU (R11), X4 - MOVOU 16(R11), X5 - MOVOA X4, (R15) - MOVOA X5, 16(R15) - ADDQ $0x20, R15 - ADDQ $0x20, R11 + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) ADDQ $0x20, R14 - DECQ R13 + ADDQ $0x20, R10 + ADDQ $0x20, R13 + DECQ R12 JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_big_loop_back emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32: - MOVOU -32(R10)(R14*1), X4 - MOVOU -16(R10)(R14*1), X5 - MOVOA X4, -32(AX)(R14*1) - MOVOA X5, -16(AX)(R14*1) - ADDQ $0x20, R14 - CMPQ R9, R14 + MOVOU -32(R9)(R13*1), X4 + MOVOU -16(R9)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) - MOVQ SI, AX + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX emit_literal_done_match_emit_repeat_encodeBetterBlockAsm4MB: - ADDL R12, CX - ADDL $0x04, R12 + ADDL R11, CX + ADDL $0x04, R11 MOVL CX, 12(SP) // emitRepeat - MOVL R12, SI - LEAL -4(R12), R12 - CMPL SI, $0x08 + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 JLE repeat_two_match_nolit_repeat_encodeBetterBlockAsm4MB - CMPL SI, $0x0c + CMPL BX, $0x0c JGE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB - CMPL R8, $0x00000800 + CMPL DI, $0x00000800 JLT repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB: - CMPL R12, $0x00000104 + CMPL R11, $0x00000104 JLT repeat_three_match_nolit_repeat_encodeBetterBlockAsm4MB - CMPL R12, $0x00010100 + CMPL R11, $0x00010100 JLT repeat_four_match_nolit_repeat_encodeBetterBlockAsm4MB - LEAL -65536(R12), R12 - MOVL R12, R8 + LEAL -65536(R11), R11 + MOVL R11, DI MOVW $0x001d, (AX) - MOVW R12, 2(AX) - SARL $0x10, R8 - MOVB R8, 4(AX) + MOVW R11, 2(AX) + SARL $0x10, DI + MOVB DI, 4(AX) ADDQ $0x05, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB repeat_four_match_nolit_repeat_encodeBetterBlockAsm4MB: - LEAL -256(R12), R12 + LEAL -256(R11), R11 MOVW $0x0019, (AX) - MOVW R12, 2(AX) + MOVW R11, 2(AX) ADDQ $0x04, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB repeat_three_match_nolit_repeat_encodeBetterBlockAsm4MB: - LEAL -4(R12), R12 + LEAL -4(R11), R11 MOVW $0x0015, (AX) - MOVB R12, 2(AX) + MOVB R11, 2(AX) ADDQ $0x03, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB repeat_two_match_nolit_repeat_encodeBetterBlockAsm4MB: - SHLL $0x02, R12 - ORL $0x01, R12 - MOVW R12, (AX) + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB: - XORQ SI, SI - LEAL 1(SI)(R12*4), R12 - MOVB R8, 1(AX) - SARL $0x08, R8 - SHLL $0x05, R8 - ORL R8, R12 - MOVB R12, (AX) + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) ADDQ $0x02, AX match_nolit_emitcopy_end_encodeBetterBlockAsm4MB: @@ -7598,54 +7567,51 @@ match_nolit_emitcopy_end_encodeBetterBlockAsm4MB: RET match_nolit_dst_ok_encodeBetterBlockAsm4MB: - MOVQ $0x00cf1bbcdcbfa563, SI - MOVQ $0x9e3779b1, R8 - INCL DI - MOVQ (DX)(DI*1), R9 - MOVQ R9, R10 - MOVQ R9, R11 - MOVQ R9, R12 - SHRQ $0x08, R11 - MOVQ R11, R13 - SHRQ $0x10, R12 - LEAL 1(DI), R14 - LEAL 2(DI), R15 - MOVQ -2(DX)(CX*1), R9 - SHLQ $0x08, R10 - IMULQ SI, R10 - SHRQ $0x30, R10 - SHLQ $0x08, R13 - IMULQ SI, R13 - SHRQ $0x30, R13 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x32, R11 + MOVQ $0x00cf1bbcdcbfa563, BX + MOVQ $0x9e3779b1, DI + LEAQ 1(SI), SI + LEAQ -2(CX), R8 + MOVQ (DX)(SI*1), R9 + MOVQ 1(DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + MOVQ 1(DX)(R8*1), R12 + SHLQ $0x08, R9 + IMULQ BX, R9 + SHRQ $0x2f, R9 + SHLQ $0x20, R10 + IMULQ DI, R10 + SHRQ $0x32, R10 + SHLQ $0x08, R11 + IMULQ BX, R11 + SHRQ $0x2f, R11 SHLQ $0x20, R12 - IMULQ R8, R12 + IMULQ DI, R12 SHRQ $0x32, R12 - MOVL DI, 24(SP)(R10*4) - MOVL R14, 24(SP)(R13*4) - MOVL R14, 262168(SP)(R11*4) - MOVL R15, 262168(SP)(R12*4) - MOVQ R9, R10 - MOVQ R9, R11 - SHRQ $0x08, R11 - MOVQ R11, R13 - LEAL -2(CX), R9 - LEAL -1(CX), DI - SHLQ $0x08, R10 - IMULQ SI, R10 - SHRQ $0x30, R10 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x32, R11 - SHLQ $0x08, R13 - IMULQ SI, R13 - SHRQ $0x30, R13 - MOVL R9, 24(SP)(R10*4) - MOVL DI, 262168(SP)(R11*4) - MOVL DI, 24(SP)(R13*4) - JMP search_loop_encodeBetterBlockAsm4MB + LEAQ 1(SI), DI + LEAQ 1(R8), R13 + MOVL SI, 24(SP)(R9*4) + MOVL R8, 24(SP)(R11*4) + MOVL DI, 524312(SP)(R10*4) + MOVL R13, 524312(SP)(R12*4) + ADDQ $0x01, SI + SUBQ $0x01, R8 + +index_loop_encodeBetterBlockAsm4MB: + CMPQ SI, R8 + JAE search_loop_encodeBetterBlockAsm4MB + MOVQ (DX)(SI*1), DI + MOVQ (DX)(R8*1), R9 + SHLQ $0x08, DI + IMULQ BX, DI + SHRQ $0x2f, DI + SHLQ $0x08, R9 + IMULQ BX, R9 + SHRQ $0x2f, R9 + MOVL SI, 24(SP)(DI*4) + MOVL R8, 24(SP)(R9*4) + ADDQ $0x02, SI + SUBQ $0x02, R8 + JMP index_loop_encodeBetterBlockAsm4MB emit_remainder_encodeBetterBlockAsm4MB: MOVQ src_len+32(FP), CX @@ -7838,8 +7804,8 @@ zero_loop_encodeBetterBlockAsm12B: MOVL $0x00000000, 12(SP) MOVQ src_len+32(FP), CX LEAQ -6(CX), DX - LEAQ -8(CX), SI - MOVL SI, 8(SP) + LEAQ -8(CX), BX + MOVL BX, 8(SP) SHRQ $0x05, CX SUBL CX, DX LEAQ (AX)(DX*1), DX @@ -7849,591 +7815,599 @@ zero_loop_encodeBetterBlockAsm12B: MOVQ src_base+24(FP), DX search_loop_encodeBetterBlockAsm12B: - MOVL CX, SI - SUBL 12(SP), SI - SHRL $0x06, SI - LEAL 1(CX)(SI*1), SI - CMPL SI, 8(SP) + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x06, BX + LEAL 1(CX)(BX*1), BX + CMPL BX, 8(SP) JGE emit_remainder_encodeBetterBlockAsm12B - MOVQ (DX)(CX*1), DI - MOVL SI, 20(SP) - MOVQ $0x0000cf1bbcdcbf9b, R9 - MOVQ $0x9e3779b1, SI - MOVQ DI, R10 - MOVQ DI, R11 - SHLQ $0x10, R10 - IMULQ R9, R10 - SHRQ $0x32, R10 - SHLQ $0x20, R11 - IMULQ SI, R11 - SHRQ $0x34, R11 - MOVL 24(SP)(R10*4), SI - MOVL 65560(SP)(R11*4), R8 - MOVL CX, 24(SP)(R10*4) - MOVL CX, 65560(SP)(R11*4) - CMPL (DX)(SI*1), DI + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ $0x9e3779b1, BX + MOVQ SI, R9 + MOVQ SI, R10 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x32, R9 + SHLQ $0x20, R10 + IMULQ BX, R10 + SHRQ $0x34, R10 + MOVL 24(SP)(R9*4), BX + MOVL 65560(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + MOVL CX, 65560(SP)(R10*4) + MOVQ (DX)(BX*1), R9 + MOVQ (DX)(DI*1), R10 + CMPQ R9, SI JEQ candidate_match_encodeBetterBlockAsm12B - CMPL (DX)(R8*1), DI - JEQ candidateS_match_encodeBetterBlockAsm12B - MOVL 20(SP), CX - JMP search_loop_encodeBetterBlockAsm12B + CMPQ R10, SI + JNE no_short_found_encodeBetterBlockAsm12B + MOVL DI, BX + JMP candidate_match_encodeBetterBlockAsm12B + +no_short_found_encodeBetterBlockAsm12B: + CMPL R9, SI + JEQ candidate_match_encodeBetterBlockAsm12B + CMPL R10, SI + JEQ candidateS_match_encodeBetterBlockAsm12B + MOVL 20(SP), CX + JMP search_loop_encodeBetterBlockAsm12B candidateS_match_encodeBetterBlockAsm12B: - SHRQ $0x08, DI - MOVQ DI, R10 - SHLQ $0x10, R10 - IMULQ R9, R10 - SHRQ $0x32, R10 - MOVL 24(SP)(R10*4), SI + SHRQ $0x08, SI + MOVQ SI, R9 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x32, R9 + MOVL 24(SP)(R9*4), BX INCL CX - MOVL CX, 24(SP)(R10*4) - CMPL (DX)(SI*1), DI + MOVL CX, 24(SP)(R9*4) + CMPL (DX)(BX*1), SI JEQ candidate_match_encodeBetterBlockAsm12B DECL CX - MOVL R8, SI + MOVL DI, BX candidate_match_encodeBetterBlockAsm12B: - MOVL 12(SP), DI - TESTL SI, SI + MOVL 12(SP), SI + TESTL BX, BX JZ match_extend_back_end_encodeBetterBlockAsm12B match_extend_back_loop_encodeBetterBlockAsm12B: - CMPL CX, DI + CMPL CX, SI JLE match_extend_back_end_encodeBetterBlockAsm12B - MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(BX*1), DI MOVB -1(DX)(CX*1), R8 - CMPB BL, R8 + CMPB DI, R8 JNE match_extend_back_end_encodeBetterBlockAsm12B LEAL -1(CX), CX - DECL SI + DECL BX JZ match_extend_back_end_encodeBetterBlockAsm12B JMP match_extend_back_loop_encodeBetterBlockAsm12B match_extend_back_end_encodeBetterBlockAsm12B: - MOVL CX, DI - SUBL 12(SP), DI - LEAQ 3(AX)(DI*1), DI - CMPQ DI, (SP) + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 3(AX)(SI*1), SI + CMPQ SI, (SP) JL match_dst_size_check_encodeBetterBlockAsm12B MOVQ $0x00000000, ret+48(FP) RET match_dst_size_check_encodeBetterBlockAsm12B: - MOVL CX, DI + MOVL CX, SI ADDL $0x04, CX - ADDL $0x04, SI - MOVQ src_len+32(FP), R8 - SUBL CX, R8 - LEAQ (DX)(CX*1), R9 - LEAQ (DX)(SI*1), R10 + ADDL $0x04, BX + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), R9 // matchLen - XORL R12, R12 - CMPL R8, $0x08 + XORL R11, R11 + CMPL DI, $0x08 JL matchlen_match4_match_nolit_encodeBetterBlockAsm12B matchlen_loopback_match_nolit_encodeBetterBlockAsm12B: - MOVQ (R9)(R12*1), R11 - XORQ (R10)(R12*1), R11 - TESTQ R11, R11 + MOVQ (R8)(R11*1), R10 + XORQ (R9)(R11*1), R10 + TESTQ R10, R10 JZ matchlen_loop_match_nolit_encodeBetterBlockAsm12B #ifdef GOAMD64_v3 - TZCNTQ R11, R11 + TZCNTQ R10, R10 #else - BSFQ R11, R11 + BSFQ R10, R10 #endif - SARQ $0x03, R11 - LEAL (R12)(R11*1), R12 + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 JMP match_nolit_end_encodeBetterBlockAsm12B matchlen_loop_match_nolit_encodeBetterBlockAsm12B: - LEAL -8(R8), R8 - LEAL 8(R12), R12 - CMPL R8, $0x08 + LEAL -8(DI), DI + LEAL 8(R11), R11 + CMPL DI, $0x08 JGE matchlen_loopback_match_nolit_encodeBetterBlockAsm12B JZ match_nolit_end_encodeBetterBlockAsm12B matchlen_match4_match_nolit_encodeBetterBlockAsm12B: - CMPL R8, $0x04 + CMPL DI, $0x04 JL matchlen_match2_match_nolit_encodeBetterBlockAsm12B - MOVL (R9)(R12*1), R11 - CMPL (R10)(R12*1), R11 + MOVL (R8)(R11*1), R10 + CMPL (R9)(R11*1), R10 JNE matchlen_match2_match_nolit_encodeBetterBlockAsm12B - SUBL $0x04, R8 - LEAL 4(R12), R12 + SUBL $0x04, DI + LEAL 4(R11), R11 matchlen_match2_match_nolit_encodeBetterBlockAsm12B: - CMPL R8, $0x02 + CMPL DI, $0x02 JL matchlen_match1_match_nolit_encodeBetterBlockAsm12B - MOVW (R9)(R12*1), R11 - CMPW (R10)(R12*1), R11 + MOVW (R8)(R11*1), R10 + CMPW (R9)(R11*1), R10 JNE matchlen_match1_match_nolit_encodeBetterBlockAsm12B - SUBL $0x02, R8 - LEAL 2(R12), R12 + SUBL $0x02, DI + LEAL 2(R11), R11 matchlen_match1_match_nolit_encodeBetterBlockAsm12B: - CMPL R8, $0x01 + CMPL DI, $0x01 JL match_nolit_end_encodeBetterBlockAsm12B - MOVB (R9)(R12*1), R11 - CMPB (R10)(R12*1), R11 + MOVB (R8)(R11*1), R10 + CMPB (R9)(R11*1), R10 JNE match_nolit_end_encodeBetterBlockAsm12B - LEAL 1(R12), R12 + LEAL 1(R11), R11 match_nolit_end_encodeBetterBlockAsm12B: - MOVL CX, R8 - SUBL SI, R8 + MOVL CX, DI + SUBL BX, DI // Check if repeat - CMPL 16(SP), R8 + CMPL 16(SP), DI JEQ match_is_repeat_encodeBetterBlockAsm12B - MOVL R8, 16(SP) - MOVL 12(SP), SI - CMPL SI, DI + MOVL DI, 16(SP) + MOVL 12(SP), BX + CMPL BX, SI JEQ emit_literal_done_match_emit_encodeBetterBlockAsm12B - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (DX)(SI*1), R10 - SUBL SI, R9 - LEAL -1(R9), SI - CMPL SI, $0x3c + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c JLT one_byte_match_emit_encodeBetterBlockAsm12B - CMPL SI, $0x00000100 + CMPL BX, $0x00000100 JLT two_bytes_match_emit_encodeBetterBlockAsm12B MOVB $0xf4, (AX) - MOVW SI, 1(AX) + MOVW BX, 1(AX) ADDQ $0x03, AX JMP memmove_long_match_emit_encodeBetterBlockAsm12B two_bytes_match_emit_encodeBetterBlockAsm12B: MOVB $0xf0, (AX) - MOVB SI, 1(AX) + MOVB BL, 1(AX) ADDQ $0x02, AX - CMPL SI, $0x40 + CMPL BX, $0x40 JL memmove_match_emit_encodeBetterBlockAsm12B JMP memmove_long_match_emit_encodeBetterBlockAsm12B one_byte_match_emit_encodeBetterBlockAsm12B: - SHLB $0x02, SI - MOVB SI, (AX) + SHLB $0x02, BL + MOVB BL, (AX) ADDQ $0x01, AX memmove_match_emit_encodeBetterBlockAsm12B: - LEAQ (AX)(R9*1), SI + LEAQ (AX)(R8*1), BX // genMemMoveShort - CMPQ R9, $0x04 + CMPQ R8, $0x04 JLE emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4 - CMPQ R9, $0x08 + CMPQ R8, $0x08 JB emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4through7 - CMPQ R9, $0x10 + CMPQ R8, $0x10 JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_8through16 - CMPQ R9, $0x20 + CMPQ R8, $0x20 JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_17through32 JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_33through64 emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4: - MOVL (R10), R11 - MOVL R11, (AX) + MOVL (R9), R10 + MOVL R10, (AX) JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4through7: - MOVL (R10), R11 - MOVL -4(R10)(R9*1), R10 - MOVL R11, (AX) - MOVL R10, -4(AX)(R9*1) + MOVL (R9), R10 + MOVL -4(R9)(R8*1), R9 + MOVL R10, (AX) + MOVL R9, -4(AX)(R8*1) JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_8through16: - MOVQ (R10), R11 - MOVQ -8(R10)(R9*1), R10 - MOVQ R11, (AX) - MOVQ R10, -8(AX)(R9*1) + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_17through32: - MOVOU (R10), X0 - MOVOU -16(R10)(R9*1), X1 + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 MOVOU X0, (AX) - MOVOU X1, -16(AX)(R9*1) + MOVOU X1, -16(AX)(R8*1) JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_33through64: - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVOU X0, (AX) - MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) memmove_end_copy_match_emit_encodeBetterBlockAsm12B: - MOVQ SI, AX + MOVQ BX, AX JMP emit_literal_done_match_emit_encodeBetterBlockAsm12B memmove_long_match_emit_encodeBetterBlockAsm12B: - LEAQ (AX)(R9*1), SI + LEAQ (AX)(R8*1), BX // genMemMoveLong - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVQ R9, R13 - SHRQ $0x05, R13 - MOVQ AX, R11 - ANDL $0x0000001f, R11 - MOVQ $0x00000040, R14 - SUBQ R11, R14 - DECQ R13 + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R13 + SUBQ R10, R13 + DECQ R12 JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_forward_sse_loop_32 - LEAQ -32(R10)(R14*1), R11 - LEAQ -32(AX)(R14*1), R15 + LEAQ -32(R9)(R13*1), R10 + LEAQ -32(AX)(R13*1), R14 emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_big_loop_back: - MOVOU (R11), X4 - MOVOU 16(R11), X5 - MOVOA X4, (R15) - MOVOA X5, 16(R15) - ADDQ $0x20, R15 - ADDQ $0x20, R11 + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) ADDQ $0x20, R14 - DECQ R13 + ADDQ $0x20, R10 + ADDQ $0x20, R13 + DECQ R12 JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_big_loop_back emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_forward_sse_loop_32: - MOVOU -32(R10)(R14*1), X4 - MOVOU -16(R10)(R14*1), X5 - MOVOA X4, -32(AX)(R14*1) - MOVOA X5, -16(AX)(R14*1) - ADDQ $0x20, R14 - CMPQ R9, R14 + MOVOU -32(R9)(R13*1), X4 + MOVOU -16(R9)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_forward_sse_loop_32 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) - MOVQ SI, AX + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX emit_literal_done_match_emit_encodeBetterBlockAsm12B: - ADDL R12, CX - ADDL $0x04, R12 + ADDL R11, CX + ADDL $0x04, R11 MOVL CX, 12(SP) // emitCopy -two_byte_offset_match_nolit_encodeBetterBlockAsm12B: - CMPL R12, $0x40 + CMPL R11, $0x40 JLE two_byte_offset_short_match_nolit_encodeBetterBlockAsm12B - CMPL R8, $0x00000800 + CMPL DI, $0x00000800 JAE long_offset_short_match_nolit_encodeBetterBlockAsm12B - MOVL $0x00000001, SI - LEAL 16(SI), SI - MOVB R8, 1(AX) - SHRL $0x08, R8 - SHLL $0x05, R8 - ORL R8, SI - MOVB SI, (AX) + MOVL $0x00000001, BX + LEAL 16(BX), BX + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, BX + MOVB BL, (AX) ADDQ $0x02, AX - SUBL $0x08, R12 + SUBL $0x08, R11 // emitRepeat - LEAL -4(R12), R12 + LEAL -4(R11), R11 JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b - MOVL R12, SI - LEAL -4(R12), R12 - CMPL SI, $0x08 + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 JLE repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b - CMPL SI, $0x0c + CMPL BX, $0x0c JGE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b - CMPL R8, $0x00000800 + CMPL DI, $0x00000800 JLT repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b: - CMPL R12, $0x00000104 + CMPL R11, $0x00000104 JLT repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b - LEAL -256(R12), R12 + LEAL -256(R11), R11 MOVW $0x0019, (AX) - MOVW R12, 2(AX) + MOVW R11, 2(AX) ADDQ $0x04, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b: - LEAL -4(R12), R12 + LEAL -4(R11), R11 MOVW $0x0015, (AX) - MOVB R12, 2(AX) + MOVB R11, 2(AX) ADDQ $0x03, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b: - SHLL $0x02, R12 - ORL $0x01, R12 - MOVW R12, (AX) + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b: - XORQ SI, SI - LEAL 1(SI)(R12*4), R12 - MOVB R8, 1(AX) - SARL $0x08, R8 - SHLL $0x05, R8 - ORL R8, R12 - MOVB R12, (AX) + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B long_offset_short_match_nolit_encodeBetterBlockAsm12B: MOVB $0xee, (AX) - MOVW R8, 1(AX) - LEAL -60(R12), R12 + MOVW DI, 1(AX) + LEAL -60(R11), R11 ADDQ $0x03, AX // emitRepeat - MOVL R12, SI - LEAL -4(R12), R12 - CMPL SI, $0x08 + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 JLE repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short - CMPL SI, $0x0c + CMPL BX, $0x0c JGE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short - CMPL R8, $0x00000800 + CMPL DI, $0x00000800 JLT repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short: - CMPL R12, $0x00000104 + CMPL R11, $0x00000104 JLT repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short - LEAL -256(R12), R12 + LEAL -256(R11), R11 MOVW $0x0019, (AX) - MOVW R12, 2(AX) + MOVW R11, 2(AX) ADDQ $0x04, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short: - LEAL -4(R12), R12 + LEAL -4(R11), R11 MOVW $0x0015, (AX) - MOVB R12, 2(AX) + MOVB R11, 2(AX) ADDQ $0x03, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short: - SHLL $0x02, R12 - ORL $0x01, R12 - MOVW R12, (AX) + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short: - XORQ SI, SI - LEAL 1(SI)(R12*4), R12 - MOVB R8, 1(AX) - SARL $0x08, R8 - SHLL $0x05, R8 - ORL R8, R12 - MOVB R12, (AX) + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B - JMP two_byte_offset_match_nolit_encodeBetterBlockAsm12B two_byte_offset_short_match_nolit_encodeBetterBlockAsm12B: - CMPL R12, $0x0c + MOVL R11, BX + SHLL $0x02, BX + CMPL R11, $0x0c JGE emit_copy_three_match_nolit_encodeBetterBlockAsm12B - CMPL R8, $0x00000800 + CMPL DI, $0x00000800 JGE emit_copy_three_match_nolit_encodeBetterBlockAsm12B - MOVB $0x01, BL - LEAL -16(BX)(R12*4), R12 - MOVB R8, 1(AX) - SHRL $0x08, R8 - SHLL $0x05, R8 - ORL R8, R12 - MOVB R12, (AX) + LEAL -15(BX), BX + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, BX + MOVB BL, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B emit_copy_three_match_nolit_encodeBetterBlockAsm12B: - MOVB $0x02, BL - LEAL -4(BX)(R12*4), R12 - MOVB R12, (AX) - MOVW R8, 1(AX) + LEAL -2(BX), BX + MOVB BL, (AX) + MOVW DI, 1(AX) ADDQ $0x03, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B match_is_repeat_encodeBetterBlockAsm12B: - MOVL 12(SP), SI - CMPL SI, DI + MOVL 12(SP), BX + CMPL BX, SI JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm12B - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (DX)(SI*1), R10 - SUBL SI, R9 - LEAL -1(R9), SI - CMPL SI, $0x3c + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c JLT one_byte_match_emit_repeat_encodeBetterBlockAsm12B - CMPL SI, $0x00000100 + CMPL BX, $0x00000100 JLT two_bytes_match_emit_repeat_encodeBetterBlockAsm12B MOVB $0xf4, (AX) - MOVW SI, 1(AX) + MOVW BX, 1(AX) ADDQ $0x03, AX JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm12B two_bytes_match_emit_repeat_encodeBetterBlockAsm12B: MOVB $0xf0, (AX) - MOVB SI, 1(AX) + MOVB BL, 1(AX) ADDQ $0x02, AX - CMPL SI, $0x40 + CMPL BX, $0x40 JL memmove_match_emit_repeat_encodeBetterBlockAsm12B JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm12B one_byte_match_emit_repeat_encodeBetterBlockAsm12B: - SHLB $0x02, SI - MOVB SI, (AX) + SHLB $0x02, BL + MOVB BL, (AX) ADDQ $0x01, AX memmove_match_emit_repeat_encodeBetterBlockAsm12B: - LEAQ (AX)(R9*1), SI + LEAQ (AX)(R8*1), BX // genMemMoveShort - CMPQ R9, $0x04 + CMPQ R8, $0x04 JLE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4 - CMPQ R9, $0x08 + CMPQ R8, $0x08 JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4through7 - CMPQ R9, $0x10 + CMPQ R8, $0x10 JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_8through16 - CMPQ R9, $0x20 + CMPQ R8, $0x20 JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_17through32 JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_33through64 emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4: - MOVL (R10), R11 - MOVL R11, (AX) + MOVL (R9), R10 + MOVL R10, (AX) JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4through7: - MOVL (R10), R11 - MOVL -4(R10)(R9*1), R10 - MOVL R11, (AX) - MOVL R10, -4(AX)(R9*1) + MOVL (R9), R10 + MOVL -4(R9)(R8*1), R9 + MOVL R10, (AX) + MOVL R9, -4(AX)(R8*1) JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_8through16: - MOVQ (R10), R11 - MOVQ -8(R10)(R9*1), R10 - MOVQ R11, (AX) - MOVQ R10, -8(AX)(R9*1) + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_17through32: - MOVOU (R10), X0 - MOVOU -16(R10)(R9*1), X1 + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 MOVOU X0, (AX) - MOVOU X1, -16(AX)(R9*1) + MOVOU X1, -16(AX)(R8*1) JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_33through64: - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B: - MOVQ SI, AX + MOVQ BX, AX JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm12B memmove_long_match_emit_repeat_encodeBetterBlockAsm12B: - LEAQ (AX)(R9*1), SI + LEAQ (AX)(R8*1), BX // genMemMoveLong - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVQ R9, R13 - SHRQ $0x05, R13 - MOVQ AX, R11 - ANDL $0x0000001f, R11 - MOVQ $0x00000040, R14 - SUBQ R11, R14 - DECQ R13 + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R13 + SUBQ R10, R13 + DECQ R12 JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_forward_sse_loop_32 - LEAQ -32(R10)(R14*1), R11 - LEAQ -32(AX)(R14*1), R15 + LEAQ -32(R9)(R13*1), R10 + LEAQ -32(AX)(R13*1), R14 emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_big_loop_back: - MOVOU (R11), X4 - MOVOU 16(R11), X5 - MOVOA X4, (R15) - MOVOA X5, 16(R15) - ADDQ $0x20, R15 - ADDQ $0x20, R11 + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) ADDQ $0x20, R14 - DECQ R13 + ADDQ $0x20, R10 + ADDQ $0x20, R13 + DECQ R12 JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_big_loop_back emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_forward_sse_loop_32: - MOVOU -32(R10)(R14*1), X4 - MOVOU -16(R10)(R14*1), X5 - MOVOA X4, -32(AX)(R14*1) - MOVOA X5, -16(AX)(R14*1) - ADDQ $0x20, R14 - CMPQ R9, R14 + MOVOU -32(R9)(R13*1), X4 + MOVOU -16(R9)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_forward_sse_loop_32 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) - MOVQ SI, AX + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX emit_literal_done_match_emit_repeat_encodeBetterBlockAsm12B: - ADDL R12, CX - ADDL $0x04, R12 + ADDL R11, CX + ADDL $0x04, R11 MOVL CX, 12(SP) // emitRepeat - MOVL R12, SI - LEAL -4(R12), R12 - CMPL SI, $0x08 + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 JLE repeat_two_match_nolit_repeat_encodeBetterBlockAsm12B - CMPL SI, $0x0c + CMPL BX, $0x0c JGE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B - CMPL R8, $0x00000800 + CMPL DI, $0x00000800 JLT repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B: - CMPL R12, $0x00000104 + CMPL R11, $0x00000104 JLT repeat_three_match_nolit_repeat_encodeBetterBlockAsm12B - LEAL -256(R12), R12 + LEAL -256(R11), R11 MOVW $0x0019, (AX) - MOVW R12, 2(AX) + MOVW R11, 2(AX) ADDQ $0x04, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B repeat_three_match_nolit_repeat_encodeBetterBlockAsm12B: - LEAL -4(R12), R12 + LEAL -4(R11), R11 MOVW $0x0015, (AX) - MOVB R12, 2(AX) + MOVB R11, 2(AX) ADDQ $0x03, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B repeat_two_match_nolit_repeat_encodeBetterBlockAsm12B: - SHLL $0x02, R12 - ORL $0x01, R12 - MOVW R12, (AX) + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B: - XORQ SI, SI - LEAL 1(SI)(R12*4), R12 - MOVB R8, 1(AX) - SARL $0x08, R8 - SHLL $0x05, R8 - ORL R8, R12 - MOVB R12, (AX) + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) ADDQ $0x02, AX match_nolit_emitcopy_end_encodeBetterBlockAsm12B: @@ -8445,54 +8419,51 @@ match_nolit_emitcopy_end_encodeBetterBlockAsm12B: RET match_nolit_dst_ok_encodeBetterBlockAsm12B: - MOVQ $0x0000cf1bbcdcbf9b, SI - MOVQ $0x9e3779b1, R8 - INCL DI - MOVQ (DX)(DI*1), R9 - MOVQ R9, R10 - MOVQ R9, R11 - MOVQ R9, R12 - SHRQ $0x08, R11 - MOVQ R11, R13 - SHRQ $0x10, R12 - LEAL 1(DI), R14 - LEAL 2(DI), R15 - MOVQ -2(DX)(CX*1), R9 - SHLQ $0x10, R10 - IMULQ SI, R10 - SHRQ $0x32, R10 - SHLQ $0x10, R13 - IMULQ SI, R13 - SHRQ $0x32, R13 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x34, R11 + MOVQ $0x0000cf1bbcdcbf9b, BX + MOVQ $0x9e3779b1, DI + LEAQ 1(SI), SI + LEAQ -2(CX), R8 + MOVQ (DX)(SI*1), R9 + MOVQ 1(DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + MOVQ 1(DX)(R8*1), R12 + SHLQ $0x10, R9 + IMULQ BX, R9 + SHRQ $0x32, R9 + SHLQ $0x20, R10 + IMULQ DI, R10 + SHRQ $0x34, R10 + SHLQ $0x10, R11 + IMULQ BX, R11 + SHRQ $0x32, R11 SHLQ $0x20, R12 - IMULQ R8, R12 + IMULQ DI, R12 SHRQ $0x34, R12 - MOVL DI, 24(SP)(R10*4) - MOVL R14, 24(SP)(R13*4) - MOVL R14, 65560(SP)(R11*4) - MOVL R15, 65560(SP)(R12*4) - MOVQ R9, R10 - MOVQ R9, R11 - SHRQ $0x08, R11 - MOVQ R11, R13 - LEAL -2(CX), R9 - LEAL -1(CX), DI - SHLQ $0x10, R10 - IMULQ SI, R10 - SHRQ $0x32, R10 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x34, R11 - SHLQ $0x10, R13 - IMULQ SI, R13 - SHRQ $0x32, R13 - MOVL R9, 24(SP)(R10*4) - MOVL DI, 65560(SP)(R11*4) - MOVL DI, 24(SP)(R13*4) - JMP search_loop_encodeBetterBlockAsm12B + LEAQ 1(SI), DI + LEAQ 1(R8), R13 + MOVL SI, 24(SP)(R9*4) + MOVL R8, 24(SP)(R11*4) + MOVL DI, 65560(SP)(R10*4) + MOVL R13, 65560(SP)(R12*4) + ADDQ $0x01, SI + SUBQ $0x01, R8 + +index_loop_encodeBetterBlockAsm12B: + CMPQ SI, R8 + JAE search_loop_encodeBetterBlockAsm12B + MOVQ (DX)(SI*1), DI + MOVQ (DX)(R8*1), R9 + SHLQ $0x10, DI + IMULQ BX, DI + SHRQ $0x32, DI + SHLQ $0x10, R9 + IMULQ BX, R9 + SHRQ $0x32, R9 + MOVL SI, 24(SP)(DI*4) + MOVL R8, 24(SP)(R9*4) + ADDQ $0x02, SI + SUBQ $0x02, R8 + JMP index_loop_encodeBetterBlockAsm12B emit_remainder_encodeBetterBlockAsm12B: MOVQ src_len+32(FP), CX @@ -8674,8 +8645,8 @@ zero_loop_encodeBetterBlockAsm10B: MOVL $0x00000000, 12(SP) MOVQ src_len+32(FP), CX LEAQ -6(CX), DX - LEAQ -8(CX), SI - MOVL SI, 8(SP) + LEAQ -8(CX), BX + MOVL BX, 8(SP) SHRQ $0x05, CX SUBL CX, DX LEAQ (AX)(DX*1), DX @@ -8685,591 +8656,599 @@ zero_loop_encodeBetterBlockAsm10B: MOVQ src_base+24(FP), DX search_loop_encodeBetterBlockAsm10B: - MOVL CX, SI - SUBL 12(SP), SI - SHRL $0x05, SI - LEAL 1(CX)(SI*1), SI - CMPL SI, 8(SP) + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x05, BX + LEAL 1(CX)(BX*1), BX + CMPL BX, 8(SP) JGE emit_remainder_encodeBetterBlockAsm10B - MOVQ (DX)(CX*1), DI - MOVL SI, 20(SP) - MOVQ $0x0000cf1bbcdcbf9b, R9 - MOVQ $0x9e3779b1, SI - MOVQ DI, R10 - MOVQ DI, R11 - SHLQ $0x10, R10 - IMULQ R9, R10 - SHRQ $0x34, R10 - SHLQ $0x20, R11 - IMULQ SI, R11 - SHRQ $0x36, R11 - MOVL 24(SP)(R10*4), SI - MOVL 16408(SP)(R11*4), R8 - MOVL CX, 24(SP)(R10*4) - MOVL CX, 16408(SP)(R11*4) - CMPL (DX)(SI*1), DI + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ $0x9e3779b1, BX + MOVQ SI, R9 + MOVQ SI, R10 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x34, R9 + SHLQ $0x20, R10 + IMULQ BX, R10 + SHRQ $0x36, R10 + MOVL 24(SP)(R9*4), BX + MOVL 16408(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + MOVL CX, 16408(SP)(R10*4) + MOVQ (DX)(BX*1), R9 + MOVQ (DX)(DI*1), R10 + CMPQ R9, SI JEQ candidate_match_encodeBetterBlockAsm10B - CMPL (DX)(R8*1), DI - JEQ candidateS_match_encodeBetterBlockAsm10B - MOVL 20(SP), CX - JMP search_loop_encodeBetterBlockAsm10B + CMPQ R10, SI + JNE no_short_found_encodeBetterBlockAsm10B + MOVL DI, BX + JMP candidate_match_encodeBetterBlockAsm10B + +no_short_found_encodeBetterBlockAsm10B: + CMPL R9, SI + JEQ candidate_match_encodeBetterBlockAsm10B + CMPL R10, SI + JEQ candidateS_match_encodeBetterBlockAsm10B + MOVL 20(SP), CX + JMP search_loop_encodeBetterBlockAsm10B candidateS_match_encodeBetterBlockAsm10B: - SHRQ $0x08, DI - MOVQ DI, R10 - SHLQ $0x10, R10 - IMULQ R9, R10 - SHRQ $0x34, R10 - MOVL 24(SP)(R10*4), SI + SHRQ $0x08, SI + MOVQ SI, R9 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x34, R9 + MOVL 24(SP)(R9*4), BX INCL CX - MOVL CX, 24(SP)(R10*4) - CMPL (DX)(SI*1), DI + MOVL CX, 24(SP)(R9*4) + CMPL (DX)(BX*1), SI JEQ candidate_match_encodeBetterBlockAsm10B DECL CX - MOVL R8, SI + MOVL DI, BX candidate_match_encodeBetterBlockAsm10B: - MOVL 12(SP), DI - TESTL SI, SI + MOVL 12(SP), SI + TESTL BX, BX JZ match_extend_back_end_encodeBetterBlockAsm10B match_extend_back_loop_encodeBetterBlockAsm10B: - CMPL CX, DI + CMPL CX, SI JLE match_extend_back_end_encodeBetterBlockAsm10B - MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(BX*1), DI MOVB -1(DX)(CX*1), R8 - CMPB BL, R8 + CMPB DI, R8 JNE match_extend_back_end_encodeBetterBlockAsm10B LEAL -1(CX), CX - DECL SI + DECL BX JZ match_extend_back_end_encodeBetterBlockAsm10B JMP match_extend_back_loop_encodeBetterBlockAsm10B match_extend_back_end_encodeBetterBlockAsm10B: - MOVL CX, DI - SUBL 12(SP), DI - LEAQ 3(AX)(DI*1), DI - CMPQ DI, (SP) + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 3(AX)(SI*1), SI + CMPQ SI, (SP) JL match_dst_size_check_encodeBetterBlockAsm10B MOVQ $0x00000000, ret+48(FP) RET match_dst_size_check_encodeBetterBlockAsm10B: - MOVL CX, DI + MOVL CX, SI ADDL $0x04, CX - ADDL $0x04, SI - MOVQ src_len+32(FP), R8 - SUBL CX, R8 - LEAQ (DX)(CX*1), R9 - LEAQ (DX)(SI*1), R10 + ADDL $0x04, BX + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), R9 // matchLen - XORL R12, R12 - CMPL R8, $0x08 + XORL R11, R11 + CMPL DI, $0x08 JL matchlen_match4_match_nolit_encodeBetterBlockAsm10B matchlen_loopback_match_nolit_encodeBetterBlockAsm10B: - MOVQ (R9)(R12*1), R11 - XORQ (R10)(R12*1), R11 - TESTQ R11, R11 + MOVQ (R8)(R11*1), R10 + XORQ (R9)(R11*1), R10 + TESTQ R10, R10 JZ matchlen_loop_match_nolit_encodeBetterBlockAsm10B #ifdef GOAMD64_v3 - TZCNTQ R11, R11 + TZCNTQ R10, R10 #else - BSFQ R11, R11 + BSFQ R10, R10 #endif - SARQ $0x03, R11 - LEAL (R12)(R11*1), R12 + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 JMP match_nolit_end_encodeBetterBlockAsm10B matchlen_loop_match_nolit_encodeBetterBlockAsm10B: - LEAL -8(R8), R8 - LEAL 8(R12), R12 - CMPL R8, $0x08 + LEAL -8(DI), DI + LEAL 8(R11), R11 + CMPL DI, $0x08 JGE matchlen_loopback_match_nolit_encodeBetterBlockAsm10B JZ match_nolit_end_encodeBetterBlockAsm10B matchlen_match4_match_nolit_encodeBetterBlockAsm10B: - CMPL R8, $0x04 + CMPL DI, $0x04 JL matchlen_match2_match_nolit_encodeBetterBlockAsm10B - MOVL (R9)(R12*1), R11 - CMPL (R10)(R12*1), R11 + MOVL (R8)(R11*1), R10 + CMPL (R9)(R11*1), R10 JNE matchlen_match2_match_nolit_encodeBetterBlockAsm10B - SUBL $0x04, R8 - LEAL 4(R12), R12 - + SUBL $0x04, DI + LEAL 4(R11), R11 + matchlen_match2_match_nolit_encodeBetterBlockAsm10B: - CMPL R8, $0x02 + CMPL DI, $0x02 JL matchlen_match1_match_nolit_encodeBetterBlockAsm10B - MOVW (R9)(R12*1), R11 - CMPW (R10)(R12*1), R11 + MOVW (R8)(R11*1), R10 + CMPW (R9)(R11*1), R10 JNE matchlen_match1_match_nolit_encodeBetterBlockAsm10B - SUBL $0x02, R8 - LEAL 2(R12), R12 + SUBL $0x02, DI + LEAL 2(R11), R11 matchlen_match1_match_nolit_encodeBetterBlockAsm10B: - CMPL R8, $0x01 + CMPL DI, $0x01 JL match_nolit_end_encodeBetterBlockAsm10B - MOVB (R9)(R12*1), R11 - CMPB (R10)(R12*1), R11 + MOVB (R8)(R11*1), R10 + CMPB (R9)(R11*1), R10 JNE match_nolit_end_encodeBetterBlockAsm10B - LEAL 1(R12), R12 + LEAL 1(R11), R11 match_nolit_end_encodeBetterBlockAsm10B: - MOVL CX, R8 - SUBL SI, R8 + MOVL CX, DI + SUBL BX, DI // Check if repeat - CMPL 16(SP), R8 + CMPL 16(SP), DI JEQ match_is_repeat_encodeBetterBlockAsm10B - MOVL R8, 16(SP) - MOVL 12(SP), SI - CMPL SI, DI + MOVL DI, 16(SP) + MOVL 12(SP), BX + CMPL BX, SI JEQ emit_literal_done_match_emit_encodeBetterBlockAsm10B - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (DX)(SI*1), R10 - SUBL SI, R9 - LEAL -1(R9), SI - CMPL SI, $0x3c + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c JLT one_byte_match_emit_encodeBetterBlockAsm10B - CMPL SI, $0x00000100 + CMPL BX, $0x00000100 JLT two_bytes_match_emit_encodeBetterBlockAsm10B MOVB $0xf4, (AX) - MOVW SI, 1(AX) + MOVW BX, 1(AX) ADDQ $0x03, AX JMP memmove_long_match_emit_encodeBetterBlockAsm10B two_bytes_match_emit_encodeBetterBlockAsm10B: MOVB $0xf0, (AX) - MOVB SI, 1(AX) + MOVB BL, 1(AX) ADDQ $0x02, AX - CMPL SI, $0x40 + CMPL BX, $0x40 JL memmove_match_emit_encodeBetterBlockAsm10B JMP memmove_long_match_emit_encodeBetterBlockAsm10B one_byte_match_emit_encodeBetterBlockAsm10B: - SHLB $0x02, SI - MOVB SI, (AX) + SHLB $0x02, BL + MOVB BL, (AX) ADDQ $0x01, AX memmove_match_emit_encodeBetterBlockAsm10B: - LEAQ (AX)(R9*1), SI + LEAQ (AX)(R8*1), BX // genMemMoveShort - CMPQ R9, $0x04 + CMPQ R8, $0x04 JLE emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4 - CMPQ R9, $0x08 + CMPQ R8, $0x08 JB emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4through7 - CMPQ R9, $0x10 + CMPQ R8, $0x10 JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_8through16 - CMPQ R9, $0x20 + CMPQ R8, $0x20 JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_17through32 JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_33through64 emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4: - MOVL (R10), R11 - MOVL R11, (AX) + MOVL (R9), R10 + MOVL R10, (AX) JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4through7: - MOVL (R10), R11 - MOVL -4(R10)(R9*1), R10 - MOVL R11, (AX) - MOVL R10, -4(AX)(R9*1) + MOVL (R9), R10 + MOVL -4(R9)(R8*1), R9 + MOVL R10, (AX) + MOVL R9, -4(AX)(R8*1) JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_8through16: - MOVQ (R10), R11 - MOVQ -8(R10)(R9*1), R10 - MOVQ R11, (AX) - MOVQ R10, -8(AX)(R9*1) + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_17through32: - MOVOU (R10), X0 - MOVOU -16(R10)(R9*1), X1 + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 MOVOU X0, (AX) - MOVOU X1, -16(AX)(R9*1) + MOVOU X1, -16(AX)(R8*1) JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_33through64: - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) memmove_end_copy_match_emit_encodeBetterBlockAsm10B: - MOVQ SI, AX + MOVQ BX, AX JMP emit_literal_done_match_emit_encodeBetterBlockAsm10B memmove_long_match_emit_encodeBetterBlockAsm10B: - LEAQ (AX)(R9*1), SI + LEAQ (AX)(R8*1), BX // genMemMoveLong - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVQ R9, R13 - SHRQ $0x05, R13 - MOVQ AX, R11 - ANDL $0x0000001f, R11 - MOVQ $0x00000040, R14 - SUBQ R11, R14 - DECQ R13 + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R13 + SUBQ R10, R13 + DECQ R12 JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_forward_sse_loop_32 - LEAQ -32(R10)(R14*1), R11 - LEAQ -32(AX)(R14*1), R15 + LEAQ -32(R9)(R13*1), R10 + LEAQ -32(AX)(R13*1), R14 emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_big_loop_back: - MOVOU (R11), X4 - MOVOU 16(R11), X5 - MOVOA X4, (R15) - MOVOA X5, 16(R15) - ADDQ $0x20, R15 - ADDQ $0x20, R11 + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) ADDQ $0x20, R14 - DECQ R13 + ADDQ $0x20, R10 + ADDQ $0x20, R13 + DECQ R12 JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_big_loop_back emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_forward_sse_loop_32: - MOVOU -32(R10)(R14*1), X4 - MOVOU -16(R10)(R14*1), X5 - MOVOA X4, -32(AX)(R14*1) - MOVOA X5, -16(AX)(R14*1) - ADDQ $0x20, R14 - CMPQ R9, R14 + MOVOU -32(R9)(R13*1), X4 + MOVOU -16(R9)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_forward_sse_loop_32 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) - MOVQ SI, AX + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX emit_literal_done_match_emit_encodeBetterBlockAsm10B: - ADDL R12, CX - ADDL $0x04, R12 + ADDL R11, CX + ADDL $0x04, R11 MOVL CX, 12(SP) // emitCopy -two_byte_offset_match_nolit_encodeBetterBlockAsm10B: - CMPL R12, $0x40 + CMPL R11, $0x40 JLE two_byte_offset_short_match_nolit_encodeBetterBlockAsm10B - CMPL R8, $0x00000800 + CMPL DI, $0x00000800 JAE long_offset_short_match_nolit_encodeBetterBlockAsm10B - MOVL $0x00000001, SI - LEAL 16(SI), SI - MOVB R8, 1(AX) - SHRL $0x08, R8 - SHLL $0x05, R8 - ORL R8, SI - MOVB SI, (AX) + MOVL $0x00000001, BX + LEAL 16(BX), BX + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, BX + MOVB BL, (AX) ADDQ $0x02, AX - SUBL $0x08, R12 + SUBL $0x08, R11 // emitRepeat - LEAL -4(R12), R12 + LEAL -4(R11), R11 JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b - MOVL R12, SI - LEAL -4(R12), R12 - CMPL SI, $0x08 + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 JLE repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b - CMPL SI, $0x0c + CMPL BX, $0x0c JGE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b - CMPL R8, $0x00000800 + CMPL DI, $0x00000800 JLT repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b: - CMPL R12, $0x00000104 + CMPL R11, $0x00000104 JLT repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b - LEAL -256(R12), R12 + LEAL -256(R11), R11 MOVW $0x0019, (AX) - MOVW R12, 2(AX) + MOVW R11, 2(AX) ADDQ $0x04, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b: - LEAL -4(R12), R12 + LEAL -4(R11), R11 MOVW $0x0015, (AX) - MOVB R12, 2(AX) + MOVB R11, 2(AX) ADDQ $0x03, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b: - SHLL $0x02, R12 - ORL $0x01, R12 - MOVW R12, (AX) + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b: - XORQ SI, SI - LEAL 1(SI)(R12*4), R12 - MOVB R8, 1(AX) - SARL $0x08, R8 - SHLL $0x05, R8 - ORL R8, R12 - MOVB R12, (AX) + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B long_offset_short_match_nolit_encodeBetterBlockAsm10B: MOVB $0xee, (AX) - MOVW R8, 1(AX) - LEAL -60(R12), R12 + MOVW DI, 1(AX) + LEAL -60(R11), R11 ADDQ $0x03, AX // emitRepeat - MOVL R12, SI - LEAL -4(R12), R12 - CMPL SI, $0x08 + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 JLE repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short - CMPL SI, $0x0c + CMPL BX, $0x0c JGE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short - CMPL R8, $0x00000800 + CMPL DI, $0x00000800 JLT repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short: - CMPL R12, $0x00000104 + CMPL R11, $0x00000104 JLT repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short - LEAL -256(R12), R12 + LEAL -256(R11), R11 MOVW $0x0019, (AX) - MOVW R12, 2(AX) + MOVW R11, 2(AX) ADDQ $0x04, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short: - LEAL -4(R12), R12 + LEAL -4(R11), R11 MOVW $0x0015, (AX) - MOVB R12, 2(AX) + MOVB R11, 2(AX) ADDQ $0x03, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short: - SHLL $0x02, R12 - ORL $0x01, R12 - MOVW R12, (AX) + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short: - XORQ SI, SI - LEAL 1(SI)(R12*4), R12 - MOVB R8, 1(AX) - SARL $0x08, R8 - SHLL $0x05, R8 - ORL R8, R12 - MOVB R12, (AX) + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B - JMP two_byte_offset_match_nolit_encodeBetterBlockAsm10B two_byte_offset_short_match_nolit_encodeBetterBlockAsm10B: - CMPL R12, $0x0c + MOVL R11, BX + SHLL $0x02, BX + CMPL R11, $0x0c JGE emit_copy_three_match_nolit_encodeBetterBlockAsm10B - CMPL R8, $0x00000800 + CMPL DI, $0x00000800 JGE emit_copy_three_match_nolit_encodeBetterBlockAsm10B - MOVB $0x01, BL - LEAL -16(BX)(R12*4), R12 - MOVB R8, 1(AX) - SHRL $0x08, R8 - SHLL $0x05, R8 - ORL R8, R12 - MOVB R12, (AX) + LEAL -15(BX), BX + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, BX + MOVB BL, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B emit_copy_three_match_nolit_encodeBetterBlockAsm10B: - MOVB $0x02, BL - LEAL -4(BX)(R12*4), R12 - MOVB R12, (AX) - MOVW R8, 1(AX) + LEAL -2(BX), BX + MOVB BL, (AX) + MOVW DI, 1(AX) ADDQ $0x03, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B match_is_repeat_encodeBetterBlockAsm10B: - MOVL 12(SP), SI - CMPL SI, DI + MOVL 12(SP), BX + CMPL BX, SI JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm10B - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (DX)(SI*1), R10 - SUBL SI, R9 - LEAL -1(R9), SI - CMPL SI, $0x3c + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c JLT one_byte_match_emit_repeat_encodeBetterBlockAsm10B - CMPL SI, $0x00000100 + CMPL BX, $0x00000100 JLT two_bytes_match_emit_repeat_encodeBetterBlockAsm10B MOVB $0xf4, (AX) - MOVW SI, 1(AX) + MOVW BX, 1(AX) ADDQ $0x03, AX JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm10B two_bytes_match_emit_repeat_encodeBetterBlockAsm10B: MOVB $0xf0, (AX) - MOVB SI, 1(AX) + MOVB BL, 1(AX) ADDQ $0x02, AX - CMPL SI, $0x40 + CMPL BX, $0x40 JL memmove_match_emit_repeat_encodeBetterBlockAsm10B JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm10B one_byte_match_emit_repeat_encodeBetterBlockAsm10B: - SHLB $0x02, SI - MOVB SI, (AX) + SHLB $0x02, BL + MOVB BL, (AX) ADDQ $0x01, AX memmove_match_emit_repeat_encodeBetterBlockAsm10B: - LEAQ (AX)(R9*1), SI + LEAQ (AX)(R8*1), BX // genMemMoveShort - CMPQ R9, $0x04 + CMPQ R8, $0x04 JLE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4 - CMPQ R9, $0x08 + CMPQ R8, $0x08 JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4through7 - CMPQ R9, $0x10 + CMPQ R8, $0x10 JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_8through16 - CMPQ R9, $0x20 + CMPQ R8, $0x20 JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_17through32 JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_33through64 emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4: - MOVL (R10), R11 - MOVL R11, (AX) + MOVL (R9), R10 + MOVL R10, (AX) JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4through7: - MOVL (R10), R11 - MOVL -4(R10)(R9*1), R10 - MOVL R11, (AX) - MOVL R10, -4(AX)(R9*1) + MOVL (R9), R10 + MOVL -4(R9)(R8*1), R9 + MOVL R10, (AX) + MOVL R9, -4(AX)(R8*1) JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_8through16: - MOVQ (R10), R11 - MOVQ -8(R10)(R9*1), R10 - MOVQ R11, (AX) - MOVQ R10, -8(AX)(R9*1) + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_17through32: - MOVOU (R10), X0 - MOVOU -16(R10)(R9*1), X1 + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 MOVOU X0, (AX) - MOVOU X1, -16(AX)(R9*1) + MOVOU X1, -16(AX)(R8*1) JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_33through64: - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B: - MOVQ SI, AX + MOVQ BX, AX JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm10B memmove_long_match_emit_repeat_encodeBetterBlockAsm10B: - LEAQ (AX)(R9*1), SI + LEAQ (AX)(R8*1), BX // genMemMoveLong - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVQ R9, R13 - SHRQ $0x05, R13 - MOVQ AX, R11 - ANDL $0x0000001f, R11 - MOVQ $0x00000040, R14 - SUBQ R11, R14 - DECQ R13 + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R13 + SUBQ R10, R13 + DECQ R12 JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_forward_sse_loop_32 - LEAQ -32(R10)(R14*1), R11 - LEAQ -32(AX)(R14*1), R15 + LEAQ -32(R9)(R13*1), R10 + LEAQ -32(AX)(R13*1), R14 emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_big_loop_back: - MOVOU (R11), X4 - MOVOU 16(R11), X5 - MOVOA X4, (R15) - MOVOA X5, 16(R15) - ADDQ $0x20, R15 - ADDQ $0x20, R11 + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) ADDQ $0x20, R14 - DECQ R13 + ADDQ $0x20, R10 + ADDQ $0x20, R13 + DECQ R12 JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_big_loop_back emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_forward_sse_loop_32: - MOVOU -32(R10)(R14*1), X4 - MOVOU -16(R10)(R14*1), X5 - MOVOA X4, -32(AX)(R14*1) - MOVOA X5, -16(AX)(R14*1) - ADDQ $0x20, R14 - CMPQ R9, R14 + MOVOU -32(R9)(R13*1), X4 + MOVOU -16(R9)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_forward_sse_loop_32 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) - MOVQ SI, AX + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX emit_literal_done_match_emit_repeat_encodeBetterBlockAsm10B: - ADDL R12, CX - ADDL $0x04, R12 + ADDL R11, CX + ADDL $0x04, R11 MOVL CX, 12(SP) // emitRepeat - MOVL R12, SI - LEAL -4(R12), R12 - CMPL SI, $0x08 + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 JLE repeat_two_match_nolit_repeat_encodeBetterBlockAsm10B - CMPL SI, $0x0c + CMPL BX, $0x0c JGE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B - CMPL R8, $0x00000800 + CMPL DI, $0x00000800 JLT repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B: - CMPL R12, $0x00000104 + CMPL R11, $0x00000104 JLT repeat_three_match_nolit_repeat_encodeBetterBlockAsm10B - LEAL -256(R12), R12 + LEAL -256(R11), R11 MOVW $0x0019, (AX) - MOVW R12, 2(AX) + MOVW R11, 2(AX) ADDQ $0x04, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B repeat_three_match_nolit_repeat_encodeBetterBlockAsm10B: - LEAL -4(R12), R12 + LEAL -4(R11), R11 MOVW $0x0015, (AX) - MOVB R12, 2(AX) + MOVB R11, 2(AX) ADDQ $0x03, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B repeat_two_match_nolit_repeat_encodeBetterBlockAsm10B: - SHLL $0x02, R12 - ORL $0x01, R12 - MOVW R12, (AX) + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B: - XORQ SI, SI - LEAL 1(SI)(R12*4), R12 - MOVB R8, 1(AX) - SARL $0x08, R8 - SHLL $0x05, R8 - ORL R8, R12 - MOVB R12, (AX) + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) ADDQ $0x02, AX match_nolit_emitcopy_end_encodeBetterBlockAsm10B: @@ -9281,54 +9260,51 @@ match_nolit_emitcopy_end_encodeBetterBlockAsm10B: RET match_nolit_dst_ok_encodeBetterBlockAsm10B: - MOVQ $0x0000cf1bbcdcbf9b, SI - MOVQ $0x9e3779b1, R8 - INCL DI - MOVQ (DX)(DI*1), R9 - MOVQ R9, R10 - MOVQ R9, R11 - MOVQ R9, R12 - SHRQ $0x08, R11 - MOVQ R11, R13 - SHRQ $0x10, R12 - LEAL 1(DI), R14 - LEAL 2(DI), R15 - MOVQ -2(DX)(CX*1), R9 - SHLQ $0x10, R10 - IMULQ SI, R10 - SHRQ $0x34, R10 - SHLQ $0x10, R13 - IMULQ SI, R13 - SHRQ $0x34, R13 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x36, R11 - SHLQ $0x20, R12 - IMULQ R8, R12 - SHRQ $0x36, R12 - MOVL DI, 24(SP)(R10*4) - MOVL R14, 24(SP)(R13*4) - MOVL R14, 16408(SP)(R11*4) - MOVL R15, 16408(SP)(R12*4) - MOVQ R9, R10 - MOVQ R9, R11 - SHRQ $0x08, R11 - MOVQ R11, R13 - LEAL -2(CX), R9 - LEAL -1(CX), DI - SHLQ $0x10, R10 - IMULQ SI, R10 - SHRQ $0x34, R10 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x36, R11 - SHLQ $0x10, R13 - IMULQ SI, R13 - SHRQ $0x34, R13 - MOVL R9, 24(SP)(R10*4) - MOVL DI, 16408(SP)(R11*4) - MOVL DI, 24(SP)(R13*4) - JMP search_loop_encodeBetterBlockAsm10B + MOVQ $0x0000cf1bbcdcbf9b, BX + MOVQ $0x9e3779b1, DI + LEAQ 1(SI), SI + LEAQ -2(CX), R8 + MOVQ (DX)(SI*1), R9 + MOVQ 1(DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + MOVQ 1(DX)(R8*1), R12 + SHLQ $0x10, R9 + IMULQ BX, R9 + SHRQ $0x34, R9 + SHLQ $0x20, R10 + IMULQ DI, R10 + SHRQ $0x36, R10 + SHLQ $0x10, R11 + IMULQ BX, R11 + SHRQ $0x34, R11 + SHLQ $0x20, R12 + IMULQ DI, R12 + SHRQ $0x36, R12 + LEAQ 1(SI), DI + LEAQ 1(R8), R13 + MOVL SI, 24(SP)(R9*4) + MOVL R8, 24(SP)(R11*4) + MOVL DI, 16408(SP)(R10*4) + MOVL R13, 16408(SP)(R12*4) + ADDQ $0x01, SI + SUBQ $0x01, R8 + +index_loop_encodeBetterBlockAsm10B: + CMPQ SI, R8 + JAE search_loop_encodeBetterBlockAsm10B + MOVQ (DX)(SI*1), DI + MOVQ (DX)(R8*1), R9 + SHLQ $0x10, DI + IMULQ BX, DI + SHRQ $0x34, DI + SHLQ $0x10, R9 + IMULQ BX, R9 + SHRQ $0x34, R9 + MOVL SI, 24(SP)(DI*4) + MOVL R8, 24(SP)(R9*4) + ADDQ $0x02, SI + SUBQ $0x02, R8 + JMP index_loop_encodeBetterBlockAsm10B emit_remainder_encodeBetterBlockAsm10B: MOVQ src_len+32(FP), CX @@ -9510,8 +9486,8 @@ zero_loop_encodeBetterBlockAsm8B: MOVL $0x00000000, 12(SP) MOVQ src_len+32(FP), CX LEAQ -6(CX), DX - LEAQ -8(CX), SI - MOVL SI, 8(SP) + LEAQ -8(CX), BX + MOVL BX, 8(SP) SHRQ $0x05, CX SUBL CX, DX LEAQ (AX)(DX*1), DX @@ -9521,577 +9497,585 @@ zero_loop_encodeBetterBlockAsm8B: MOVQ src_base+24(FP), DX search_loop_encodeBetterBlockAsm8B: - MOVL CX, SI - SUBL 12(SP), SI - SHRL $0x04, SI - LEAL 1(CX)(SI*1), SI - CMPL SI, 8(SP) + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x04, BX + LEAL 1(CX)(BX*1), BX + CMPL BX, 8(SP) JGE emit_remainder_encodeBetterBlockAsm8B - MOVQ (DX)(CX*1), DI - MOVL SI, 20(SP) - MOVQ $0x0000cf1bbcdcbf9b, R9 - MOVQ $0x9e3779b1, SI - MOVQ DI, R10 - MOVQ DI, R11 - SHLQ $0x10, R10 - IMULQ R9, R10 - SHRQ $0x36, R10 - SHLQ $0x20, R11 - IMULQ SI, R11 - SHRQ $0x38, R11 - MOVL 24(SP)(R10*4), SI - MOVL 4120(SP)(R11*4), R8 - MOVL CX, 24(SP)(R10*4) - MOVL CX, 4120(SP)(R11*4) - CMPL (DX)(SI*1), DI + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ $0x9e3779b1, BX + MOVQ SI, R9 + MOVQ SI, R10 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x36, R9 + SHLQ $0x20, R10 + IMULQ BX, R10 + SHRQ $0x38, R10 + MOVL 24(SP)(R9*4), BX + MOVL 4120(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + MOVL CX, 4120(SP)(R10*4) + MOVQ (DX)(BX*1), R9 + MOVQ (DX)(DI*1), R10 + CMPQ R9, SI JEQ candidate_match_encodeBetterBlockAsm8B - CMPL (DX)(R8*1), DI - JEQ candidateS_match_encodeBetterBlockAsm8B - MOVL 20(SP), CX - JMP search_loop_encodeBetterBlockAsm8B + CMPQ R10, SI + JNE no_short_found_encodeBetterBlockAsm8B + MOVL DI, BX + JMP candidate_match_encodeBetterBlockAsm8B + +no_short_found_encodeBetterBlockAsm8B: + CMPL R9, SI + JEQ candidate_match_encodeBetterBlockAsm8B + CMPL R10, SI + JEQ candidateS_match_encodeBetterBlockAsm8B + MOVL 20(SP), CX + JMP search_loop_encodeBetterBlockAsm8B candidateS_match_encodeBetterBlockAsm8B: - SHRQ $0x08, DI - MOVQ DI, R10 - SHLQ $0x10, R10 - IMULQ R9, R10 - SHRQ $0x36, R10 - MOVL 24(SP)(R10*4), SI + SHRQ $0x08, SI + MOVQ SI, R9 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x36, R9 + MOVL 24(SP)(R9*4), BX INCL CX - MOVL CX, 24(SP)(R10*4) - CMPL (DX)(SI*1), DI + MOVL CX, 24(SP)(R9*4) + CMPL (DX)(BX*1), SI JEQ candidate_match_encodeBetterBlockAsm8B DECL CX - MOVL R8, SI + MOVL DI, BX candidate_match_encodeBetterBlockAsm8B: - MOVL 12(SP), DI - TESTL SI, SI + MOVL 12(SP), SI + TESTL BX, BX JZ match_extend_back_end_encodeBetterBlockAsm8B match_extend_back_loop_encodeBetterBlockAsm8B: - CMPL CX, DI + CMPL CX, SI JLE match_extend_back_end_encodeBetterBlockAsm8B - MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(BX*1), DI MOVB -1(DX)(CX*1), R8 - CMPB BL, R8 + CMPB DI, R8 JNE match_extend_back_end_encodeBetterBlockAsm8B LEAL -1(CX), CX - DECL SI + DECL BX JZ match_extend_back_end_encodeBetterBlockAsm8B JMP match_extend_back_loop_encodeBetterBlockAsm8B match_extend_back_end_encodeBetterBlockAsm8B: - MOVL CX, DI - SUBL 12(SP), DI - LEAQ 3(AX)(DI*1), DI - CMPQ DI, (SP) + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 3(AX)(SI*1), SI + CMPQ SI, (SP) JL match_dst_size_check_encodeBetterBlockAsm8B MOVQ $0x00000000, ret+48(FP) RET match_dst_size_check_encodeBetterBlockAsm8B: - MOVL CX, DI + MOVL CX, SI ADDL $0x04, CX - ADDL $0x04, SI - MOVQ src_len+32(FP), R8 - SUBL CX, R8 - LEAQ (DX)(CX*1), R9 - LEAQ (DX)(SI*1), R10 + ADDL $0x04, BX + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), R9 // matchLen - XORL R12, R12 - CMPL R8, $0x08 + XORL R11, R11 + CMPL DI, $0x08 JL matchlen_match4_match_nolit_encodeBetterBlockAsm8B matchlen_loopback_match_nolit_encodeBetterBlockAsm8B: - MOVQ (R9)(R12*1), R11 - XORQ (R10)(R12*1), R11 - TESTQ R11, R11 + MOVQ (R8)(R11*1), R10 + XORQ (R9)(R11*1), R10 + TESTQ R10, R10 JZ matchlen_loop_match_nolit_encodeBetterBlockAsm8B #ifdef GOAMD64_v3 - TZCNTQ R11, R11 + TZCNTQ R10, R10 #else - BSFQ R11, R11 + BSFQ R10, R10 #endif - SARQ $0x03, R11 - LEAL (R12)(R11*1), R12 + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 JMP match_nolit_end_encodeBetterBlockAsm8B matchlen_loop_match_nolit_encodeBetterBlockAsm8B: - LEAL -8(R8), R8 - LEAL 8(R12), R12 - CMPL R8, $0x08 + LEAL -8(DI), DI + LEAL 8(R11), R11 + CMPL DI, $0x08 JGE matchlen_loopback_match_nolit_encodeBetterBlockAsm8B JZ match_nolit_end_encodeBetterBlockAsm8B matchlen_match4_match_nolit_encodeBetterBlockAsm8B: - CMPL R8, $0x04 + CMPL DI, $0x04 JL matchlen_match2_match_nolit_encodeBetterBlockAsm8B - MOVL (R9)(R12*1), R11 - CMPL (R10)(R12*1), R11 + MOVL (R8)(R11*1), R10 + CMPL (R9)(R11*1), R10 JNE matchlen_match2_match_nolit_encodeBetterBlockAsm8B - SUBL $0x04, R8 - LEAL 4(R12), R12 + SUBL $0x04, DI + LEAL 4(R11), R11 matchlen_match2_match_nolit_encodeBetterBlockAsm8B: - CMPL R8, $0x02 + CMPL DI, $0x02 JL matchlen_match1_match_nolit_encodeBetterBlockAsm8B - MOVW (R9)(R12*1), R11 - CMPW (R10)(R12*1), R11 + MOVW (R8)(R11*1), R10 + CMPW (R9)(R11*1), R10 JNE matchlen_match1_match_nolit_encodeBetterBlockAsm8B - SUBL $0x02, R8 - LEAL 2(R12), R12 + SUBL $0x02, DI + LEAL 2(R11), R11 matchlen_match1_match_nolit_encodeBetterBlockAsm8B: - CMPL R8, $0x01 + CMPL DI, $0x01 JL match_nolit_end_encodeBetterBlockAsm8B - MOVB (R9)(R12*1), R11 - CMPB (R10)(R12*1), R11 + MOVB (R8)(R11*1), R10 + CMPB (R9)(R11*1), R10 JNE match_nolit_end_encodeBetterBlockAsm8B - LEAL 1(R12), R12 + LEAL 1(R11), R11 match_nolit_end_encodeBetterBlockAsm8B: - MOVL CX, R8 - SUBL SI, R8 + MOVL CX, DI + SUBL BX, DI // Check if repeat - CMPL 16(SP), R8 + CMPL 16(SP), DI JEQ match_is_repeat_encodeBetterBlockAsm8B - MOVL R8, 16(SP) - MOVL 12(SP), SI - CMPL SI, DI + MOVL DI, 16(SP) + MOVL 12(SP), BX + CMPL BX, SI JEQ emit_literal_done_match_emit_encodeBetterBlockAsm8B - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (DX)(SI*1), R10 - SUBL SI, R9 - LEAL -1(R9), SI - CMPL SI, $0x3c + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c JLT one_byte_match_emit_encodeBetterBlockAsm8B - CMPL SI, $0x00000100 + CMPL BX, $0x00000100 JLT two_bytes_match_emit_encodeBetterBlockAsm8B MOVB $0xf4, (AX) - MOVW SI, 1(AX) + MOVW BX, 1(AX) ADDQ $0x03, AX JMP memmove_long_match_emit_encodeBetterBlockAsm8B two_bytes_match_emit_encodeBetterBlockAsm8B: MOVB $0xf0, (AX) - MOVB SI, 1(AX) + MOVB BL, 1(AX) ADDQ $0x02, AX - CMPL SI, $0x40 + CMPL BX, $0x40 JL memmove_match_emit_encodeBetterBlockAsm8B JMP memmove_long_match_emit_encodeBetterBlockAsm8B one_byte_match_emit_encodeBetterBlockAsm8B: - SHLB $0x02, SI - MOVB SI, (AX) + SHLB $0x02, BL + MOVB BL, (AX) ADDQ $0x01, AX memmove_match_emit_encodeBetterBlockAsm8B: - LEAQ (AX)(R9*1), SI + LEAQ (AX)(R8*1), BX // genMemMoveShort - CMPQ R9, $0x04 + CMPQ R8, $0x04 JLE emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4 - CMPQ R9, $0x08 + CMPQ R8, $0x08 JB emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4through7 - CMPQ R9, $0x10 + CMPQ R8, $0x10 JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_8through16 - CMPQ R9, $0x20 + CMPQ R8, $0x20 JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_17through32 JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_33through64 emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4: - MOVL (R10), R11 - MOVL R11, (AX) + MOVL (R9), R10 + MOVL R10, (AX) JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4through7: - MOVL (R10), R11 - MOVL -4(R10)(R9*1), R10 - MOVL R11, (AX) - MOVL R10, -4(AX)(R9*1) + MOVL (R9), R10 + MOVL -4(R9)(R8*1), R9 + MOVL R10, (AX) + MOVL R9, -4(AX)(R8*1) JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_8through16: - MOVQ (R10), R11 - MOVQ -8(R10)(R9*1), R10 - MOVQ R11, (AX) - MOVQ R10, -8(AX)(R9*1) + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_17through32: - MOVOU (R10), X0 - MOVOU -16(R10)(R9*1), X1 + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 MOVOU X0, (AX) - MOVOU X1, -16(AX)(R9*1) + MOVOU X1, -16(AX)(R8*1) JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_33through64: - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) memmove_end_copy_match_emit_encodeBetterBlockAsm8B: - MOVQ SI, AX + MOVQ BX, AX JMP emit_literal_done_match_emit_encodeBetterBlockAsm8B memmove_long_match_emit_encodeBetterBlockAsm8B: - LEAQ (AX)(R9*1), SI + LEAQ (AX)(R8*1), BX // genMemMoveLong - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVQ R9, R13 - SHRQ $0x05, R13 - MOVQ AX, R11 - ANDL $0x0000001f, R11 - MOVQ $0x00000040, R14 - SUBQ R11, R14 - DECQ R13 + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R13 + SUBQ R10, R13 + DECQ R12 JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_forward_sse_loop_32 - LEAQ -32(R10)(R14*1), R11 - LEAQ -32(AX)(R14*1), R15 + LEAQ -32(R9)(R13*1), R10 + LEAQ -32(AX)(R13*1), R14 emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_big_loop_back: - MOVOU (R11), X4 - MOVOU 16(R11), X5 - MOVOA X4, (R15) - MOVOA X5, 16(R15) - ADDQ $0x20, R15 - ADDQ $0x20, R11 + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) ADDQ $0x20, R14 - DECQ R13 + ADDQ $0x20, R10 + ADDQ $0x20, R13 + DECQ R12 JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_big_loop_back emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_forward_sse_loop_32: - MOVOU -32(R10)(R14*1), X4 - MOVOU -16(R10)(R14*1), X5 - MOVOA X4, -32(AX)(R14*1) - MOVOA X5, -16(AX)(R14*1) - ADDQ $0x20, R14 - CMPQ R9, R14 + MOVOU -32(R9)(R13*1), X4 + MOVOU -16(R9)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_forward_sse_loop_32 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) - MOVQ SI, AX + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX emit_literal_done_match_emit_encodeBetterBlockAsm8B: - ADDL R12, CX - ADDL $0x04, R12 + ADDL R11, CX + ADDL $0x04, R11 MOVL CX, 12(SP) // emitCopy -two_byte_offset_match_nolit_encodeBetterBlockAsm8B: - CMPL R12, $0x40 + CMPL R11, $0x40 JLE two_byte_offset_short_match_nolit_encodeBetterBlockAsm8B - CMPL R8, $0x00000800 + CMPL DI, $0x00000800 JAE long_offset_short_match_nolit_encodeBetterBlockAsm8B - MOVL $0x00000001, SI - LEAL 16(SI), SI - MOVB R8, 1(AX) - SHRL $0x08, R8 - SHLL $0x05, R8 - ORL R8, SI - MOVB SI, (AX) + MOVL $0x00000001, BX + LEAL 16(BX), BX + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, BX + MOVB BL, (AX) ADDQ $0x02, AX - SUBL $0x08, R12 + SUBL $0x08, R11 // emitRepeat - LEAL -4(R12), R12 + LEAL -4(R11), R11 JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b - MOVL R12, SI - LEAL -4(R12), R12 - CMPL SI, $0x08 + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 JLE repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b - CMPL SI, $0x0c + CMPL BX, $0x0c JGE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b: - CMPL R12, $0x00000104 + CMPL R11, $0x00000104 JLT repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b - LEAL -256(R12), R12 + LEAL -256(R11), R11 MOVW $0x0019, (AX) - MOVW R12, 2(AX) + MOVW R11, 2(AX) ADDQ $0x04, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b: - LEAL -4(R12), R12 + LEAL -4(R11), R11 MOVW $0x0015, (AX) - MOVB R12, 2(AX) + MOVB R11, 2(AX) ADDQ $0x03, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b: - SHLL $0x02, R12 - ORL $0x01, R12 - MOVW R12, (AX) + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B - XORQ SI, SI - LEAL 1(SI)(R12*4), R12 - MOVB R8, 1(AX) - SARL $0x08, R8 - SHLL $0x05, R8 - ORL R8, R12 - MOVB R12, (AX) + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B long_offset_short_match_nolit_encodeBetterBlockAsm8B: MOVB $0xee, (AX) - MOVW R8, 1(AX) - LEAL -60(R12), R12 + MOVW DI, 1(AX) + LEAL -60(R11), R11 ADDQ $0x03, AX // emitRepeat - MOVL R12, SI - LEAL -4(R12), R12 - CMPL SI, $0x08 + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 JLE repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short - CMPL SI, $0x0c + CMPL BX, $0x0c JGE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short: - CMPL R12, $0x00000104 + CMPL R11, $0x00000104 JLT repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short - LEAL -256(R12), R12 + LEAL -256(R11), R11 MOVW $0x0019, (AX) - MOVW R12, 2(AX) + MOVW R11, 2(AX) ADDQ $0x04, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short: - LEAL -4(R12), R12 + LEAL -4(R11), R11 MOVW $0x0015, (AX) - MOVB R12, 2(AX) + MOVB R11, 2(AX) ADDQ $0x03, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short: - SHLL $0x02, R12 - ORL $0x01, R12 - MOVW R12, (AX) + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B - XORQ SI, SI - LEAL 1(SI)(R12*4), R12 - MOVB R8, 1(AX) - SARL $0x08, R8 - SHLL $0x05, R8 - ORL R8, R12 - MOVB R12, (AX) + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B - JMP two_byte_offset_match_nolit_encodeBetterBlockAsm8B two_byte_offset_short_match_nolit_encodeBetterBlockAsm8B: - CMPL R12, $0x0c + MOVL R11, BX + SHLL $0x02, BX + CMPL R11, $0x0c JGE emit_copy_three_match_nolit_encodeBetterBlockAsm8B - MOVB $0x01, BL - LEAL -16(BX)(R12*4), R12 - MOVB R8, 1(AX) - SHRL $0x08, R8 - SHLL $0x05, R8 - ORL R8, R12 - MOVB R12, (AX) + LEAL -15(BX), BX + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, BX + MOVB BL, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B emit_copy_three_match_nolit_encodeBetterBlockAsm8B: - MOVB $0x02, BL - LEAL -4(BX)(R12*4), R12 - MOVB R12, (AX) - MOVW R8, 1(AX) + LEAL -2(BX), BX + MOVB BL, (AX) + MOVW DI, 1(AX) ADDQ $0x03, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B match_is_repeat_encodeBetterBlockAsm8B: - MOVL 12(SP), SI - CMPL SI, DI + MOVL 12(SP), BX + CMPL BX, SI JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm8B - MOVL DI, R8 - MOVL DI, 12(SP) - LEAQ (DX)(SI*1), R9 - SUBL SI, R8 - LEAL -1(R8), SI - CMPL SI, $0x3c + MOVL SI, DI + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R8 + SUBL BX, DI + LEAL -1(DI), BX + CMPL BX, $0x3c JLT one_byte_match_emit_repeat_encodeBetterBlockAsm8B - CMPL SI, $0x00000100 + CMPL BX, $0x00000100 JLT two_bytes_match_emit_repeat_encodeBetterBlockAsm8B MOVB $0xf4, (AX) - MOVW SI, 1(AX) + MOVW BX, 1(AX) ADDQ $0x03, AX JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm8B two_bytes_match_emit_repeat_encodeBetterBlockAsm8B: MOVB $0xf0, (AX) - MOVB SI, 1(AX) + MOVB BL, 1(AX) ADDQ $0x02, AX - CMPL SI, $0x40 + CMPL BX, $0x40 JL memmove_match_emit_repeat_encodeBetterBlockAsm8B JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm8B one_byte_match_emit_repeat_encodeBetterBlockAsm8B: - SHLB $0x02, SI - MOVB SI, (AX) + SHLB $0x02, BL + MOVB BL, (AX) ADDQ $0x01, AX memmove_match_emit_repeat_encodeBetterBlockAsm8B: - LEAQ (AX)(R8*1), SI + LEAQ (AX)(DI*1), BX // genMemMoveShort - CMPQ R8, $0x04 + CMPQ DI, $0x04 JLE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4 - CMPQ R8, $0x08 + CMPQ DI, $0x08 JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4through7 - CMPQ R8, $0x10 + CMPQ DI, $0x10 JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_8through16 - CMPQ R8, $0x20 + CMPQ DI, $0x20 JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_17through32 JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_33through64 emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4: - MOVL (R9), R10 - MOVL R10, (AX) + MOVL (R8), R9 + MOVL R9, (AX) JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4through7: - MOVL (R9), R10 - MOVL -4(R9)(R8*1), R9 - MOVL R10, (AX) - MOVL R9, -4(AX)(R8*1) + MOVL (R8), R9 + MOVL -4(R8)(DI*1), R8 + MOVL R9, (AX) + MOVL R8, -4(AX)(DI*1) JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_8through16: - MOVQ (R9), R10 - MOVQ -8(R9)(R8*1), R9 - MOVQ R10, (AX) - MOVQ R9, -8(AX)(R8*1) + MOVQ (R8), R9 + MOVQ -8(R8)(DI*1), R8 + MOVQ R9, (AX) + MOVQ R8, -8(AX)(DI*1) JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_17through32: - MOVOU (R9), X0 - MOVOU -16(R9)(R8*1), X1 + MOVOU (R8), X0 + MOVOU -16(R8)(DI*1), X1 MOVOU X0, (AX) - MOVOU X1, -16(AX)(R8*1) + MOVOU X1, -16(AX)(DI*1) JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_33through64: - MOVOU (R9), X0 - MOVOU 16(R9), X1 - MOVOU -32(R9)(R8*1), X2 - MOVOU -16(R9)(R8*1), X3 + MOVOU (R8), X0 + MOVOU 16(R8), X1 + MOVOU -32(R8)(DI*1), X2 + MOVOU -16(R8)(DI*1), X3 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R8*1) - MOVOU X3, -16(AX)(R8*1) + MOVOU X2, -32(AX)(DI*1) + MOVOU X3, -16(AX)(DI*1) memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B: - MOVQ SI, AX + MOVQ BX, AX JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm8B memmove_long_match_emit_repeat_encodeBetterBlockAsm8B: - LEAQ (AX)(R8*1), SI + LEAQ (AX)(DI*1), BX // genMemMoveLong - MOVOU (R9), X0 - MOVOU 16(R9), X1 - MOVOU -32(R9)(R8*1), X2 - MOVOU -16(R9)(R8*1), X3 - MOVQ R8, R11 - SHRQ $0x05, R11 - MOVQ AX, R10 - ANDL $0x0000001f, R10 - MOVQ $0x00000040, R13 - SUBQ R10, R13 - DECQ R11 + MOVOU (R8), X0 + MOVOU 16(R8), X1 + MOVOU -32(R8)(DI*1), X2 + MOVOU -16(R8)(DI*1), X3 + MOVQ DI, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R12 + SUBQ R9, R12 + DECQ R10 JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_forward_sse_loop_32 - LEAQ -32(R9)(R13*1), R10 - LEAQ -32(AX)(R13*1), R14 + LEAQ -32(R8)(R12*1), R9 + LEAQ -32(AX)(R12*1), R13 emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_big_loop_back: - MOVOU (R10), X4 - MOVOU 16(R10), X5 - MOVOA X4, (R14) - MOVOA X5, 16(R14) - ADDQ $0x20, R14 - ADDQ $0x20, R10 + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) ADDQ $0x20, R13 - DECQ R11 + ADDQ $0x20, R9 + ADDQ $0x20, R12 + DECQ R10 JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_big_loop_back emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_forward_sse_loop_32: - MOVOU -32(R9)(R13*1), X4 - MOVOU -16(R9)(R13*1), X5 - MOVOA X4, -32(AX)(R13*1) - MOVOA X5, -16(AX)(R13*1) - ADDQ $0x20, R13 - CMPQ R8, R13 + MOVOU -32(R8)(R12*1), X4 + MOVOU -16(R8)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ DI, R12 JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_forward_sse_loop_32 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R8*1) - MOVOU X3, -16(AX)(R8*1) - MOVQ SI, AX + MOVOU X2, -32(AX)(DI*1) + MOVOU X3, -16(AX)(DI*1) + MOVQ BX, AX emit_literal_done_match_emit_repeat_encodeBetterBlockAsm8B: - ADDL R12, CX - ADDL $0x04, R12 + ADDL R11, CX + ADDL $0x04, R11 MOVL CX, 12(SP) // emitRepeat - MOVL R12, SI - LEAL -4(R12), R12 - CMPL SI, $0x08 + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 JLE repeat_two_match_nolit_repeat_encodeBetterBlockAsm8B - CMPL SI, $0x0c + CMPL BX, $0x0c JGE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm8B cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm8B: - CMPL R12, $0x00000104 + CMPL R11, $0x00000104 JLT repeat_three_match_nolit_repeat_encodeBetterBlockAsm8B - LEAL -256(R12), R12 + LEAL -256(R11), R11 MOVW $0x0019, (AX) - MOVW R12, 2(AX) + MOVW R11, 2(AX) ADDQ $0x04, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B repeat_three_match_nolit_repeat_encodeBetterBlockAsm8B: - LEAL -4(R12), R12 + LEAL -4(R11), R11 MOVW $0x0015, (AX) - MOVB R12, 2(AX) + MOVB R11, 2(AX) ADDQ $0x03, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B repeat_two_match_nolit_repeat_encodeBetterBlockAsm8B: - SHLL $0x02, R12 - ORL $0x01, R12 - MOVW R12, (AX) + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B - XORQ SI, SI - LEAL 1(SI)(R12*4), R12 - MOVB R8, 1(AX) - SARL $0x08, R8 - SHLL $0x05, R8 - ORL R8, R12 - MOVB R12, (AX) + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) ADDQ $0x02, AX match_nolit_emitcopy_end_encodeBetterBlockAsm8B: @@ -10103,54 +10087,51 @@ match_nolit_emitcopy_end_encodeBetterBlockAsm8B: RET match_nolit_dst_ok_encodeBetterBlockAsm8B: - MOVQ $0x0000cf1bbcdcbf9b, SI - MOVQ $0x9e3779b1, R8 - INCL DI - MOVQ (DX)(DI*1), R9 - MOVQ R9, R10 - MOVQ R9, R11 - MOVQ R9, R12 - SHRQ $0x08, R11 - MOVQ R11, R13 - SHRQ $0x10, R12 - LEAL 1(DI), R14 - LEAL 2(DI), R15 - MOVQ -2(DX)(CX*1), R9 - SHLQ $0x10, R10 - IMULQ SI, R10 - SHRQ $0x36, R10 - SHLQ $0x10, R13 - IMULQ SI, R13 - SHRQ $0x36, R13 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x38, R11 + MOVQ $0x0000cf1bbcdcbf9b, BX + MOVQ $0x9e3779b1, DI + LEAQ 1(SI), SI + LEAQ -2(CX), R8 + MOVQ (DX)(SI*1), R9 + MOVQ 1(DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + MOVQ 1(DX)(R8*1), R12 + SHLQ $0x10, R9 + IMULQ BX, R9 + SHRQ $0x36, R9 + SHLQ $0x20, R10 + IMULQ DI, R10 + SHRQ $0x38, R10 + SHLQ $0x10, R11 + IMULQ BX, R11 + SHRQ $0x36, R11 SHLQ $0x20, R12 - IMULQ R8, R12 + IMULQ DI, R12 SHRQ $0x38, R12 - MOVL DI, 24(SP)(R10*4) - MOVL R14, 24(SP)(R13*4) - MOVL R14, 4120(SP)(R11*4) - MOVL R15, 4120(SP)(R12*4) - MOVQ R9, R10 - MOVQ R9, R11 - SHRQ $0x08, R11 - MOVQ R11, R13 - LEAL -2(CX), R9 - LEAL -1(CX), DI - SHLQ $0x10, R10 - IMULQ SI, R10 - SHRQ $0x36, R10 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x38, R11 - SHLQ $0x10, R13 - IMULQ SI, R13 - SHRQ $0x36, R13 - MOVL R9, 24(SP)(R10*4) - MOVL DI, 4120(SP)(R11*4) - MOVL DI, 24(SP)(R13*4) - JMP search_loop_encodeBetterBlockAsm8B + LEAQ 1(SI), DI + LEAQ 1(R8), R13 + MOVL SI, 24(SP)(R9*4) + MOVL R8, 24(SP)(R11*4) + MOVL DI, 4120(SP)(R10*4) + MOVL R13, 4120(SP)(R12*4) + ADDQ $0x01, SI + SUBQ $0x01, R8 + +index_loop_encodeBetterBlockAsm8B: + CMPQ SI, R8 + JAE search_loop_encodeBetterBlockAsm8B + MOVQ (DX)(SI*1), DI + MOVQ (DX)(R8*1), R9 + SHLQ $0x10, DI + IMULQ BX, DI + SHRQ $0x36, DI + SHLQ $0x10, R9 + IMULQ BX, R9 + SHRQ $0x36, R9 + MOVL SI, 24(SP)(DI*4) + MOVL R8, 24(SP)(R9*4) + ADDQ $0x02, SI + SUBQ $0x02, R8 + JMP index_loop_encodeBetterBlockAsm8B emit_remainder_encodeBetterBlockAsm8B: MOVQ src_len+32(FP), CX @@ -10332,8 +10313,8 @@ zero_loop_encodeSnappyBlockAsm: MOVL $0x00000000, 12(SP) MOVQ src_len+32(FP), CX LEAQ -9(CX), DX - LEAQ -8(CX), SI - MOVL SI, 8(SP) + LEAQ -8(CX), BX + MOVL BX, 8(SP) SHRQ $0x05, CX SUBL CX, DX LEAQ (AX)(DX*1), DX @@ -10343,321 +10324,321 @@ zero_loop_encodeSnappyBlockAsm: MOVQ src_base+24(FP), DX search_loop_encodeSnappyBlockAsm: - MOVL CX, SI - SUBL 12(SP), SI - SHRL $0x06, SI - LEAL 4(CX)(SI*1), SI - CMPL SI, 8(SP) + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x06, BX + LEAL 4(CX)(BX*1), BX + CMPL BX, 8(SP) JGE emit_remainder_encodeSnappyBlockAsm - MOVQ (DX)(CX*1), DI - MOVL SI, 20(SP) - MOVQ $0x0000cf1bbcdcbf9b, R9 - MOVQ DI, R10 - MOVQ DI, R11 - SHRQ $0x08, R11 - SHLQ $0x10, R10 - IMULQ R9, R10 - SHRQ $0x32, R10 - SHLQ $0x10, R11 - IMULQ R9, R11 - SHRQ $0x32, R11 - MOVL 24(SP)(R10*4), SI - MOVL 24(SP)(R11*4), R8 - MOVL CX, 24(SP)(R10*4) - LEAL 1(CX), R10 - MOVL R10, 24(SP)(R11*4) - MOVQ DI, R10 - SHRQ $0x10, R10 + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ SI, R9 + MOVQ SI, R10 + SHRQ $0x08, R10 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x32, R9 SHLQ $0x10, R10 - IMULQ R9, R10 + IMULQ R8, R10 SHRQ $0x32, R10 - MOVL CX, R9 - SUBL 16(SP), R9 - MOVL 1(DX)(R9*1), R11 - MOVQ DI, R9 - SHRQ $0x08, R9 - CMPL R9, R11 - JNE no_repeat_found_encodeSnappyBlockAsm - LEAL 1(CX), DI - MOVL 12(SP), SI - MOVL DI, R8 + MOVL 24(SP)(R9*4), BX + MOVL 24(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + LEAL 1(CX), R9 + MOVL R9, 24(SP)(R10*4) + MOVQ SI, R9 + SHRQ $0x10, R9 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x32, R9 + MOVL CX, R8 SUBL 16(SP), R8 + MOVL 1(DX)(R8*1), R10 + MOVQ SI, R8 + SHRQ $0x08, R8 + CMPL R8, R10 + JNE no_repeat_found_encodeSnappyBlockAsm + LEAL 1(CX), SI + MOVL 12(SP), BX + MOVL SI, DI + SUBL 16(SP), DI JZ repeat_extend_back_end_encodeSnappyBlockAsm repeat_extend_back_loop_encodeSnappyBlockAsm: - CMPL DI, SI + CMPL SI, BX JLE repeat_extend_back_end_encodeSnappyBlockAsm - MOVB -1(DX)(R8*1), BL - MOVB -1(DX)(DI*1), R9 - CMPB BL, R9 + MOVB -1(DX)(DI*1), R8 + MOVB -1(DX)(SI*1), R9 + CMPB R8, R9 JNE repeat_extend_back_end_encodeSnappyBlockAsm - LEAL -1(DI), DI - DECL R8 + LEAL -1(SI), SI + DECL DI JNZ repeat_extend_back_loop_encodeSnappyBlockAsm repeat_extend_back_end_encodeSnappyBlockAsm: - MOVL 12(SP), SI - CMPL SI, DI + MOVL 12(SP), BX + CMPL BX, SI JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm - MOVL DI, R8 - MOVL DI, 12(SP) - LEAQ (DX)(SI*1), R9 - SUBL SI, R8 - LEAL -1(R8), SI - CMPL SI, $0x3c + MOVL SI, DI + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R8 + SUBL BX, DI + LEAL -1(DI), BX + CMPL BX, $0x3c JLT one_byte_repeat_emit_encodeSnappyBlockAsm - CMPL SI, $0x00000100 + CMPL BX, $0x00000100 JLT two_bytes_repeat_emit_encodeSnappyBlockAsm - CMPL SI, $0x00010000 + CMPL BX, $0x00010000 JLT three_bytes_repeat_emit_encodeSnappyBlockAsm - CMPL SI, $0x01000000 + CMPL BX, $0x01000000 JLT four_bytes_repeat_emit_encodeSnappyBlockAsm MOVB $0xfc, (AX) - MOVL SI, 1(AX) + MOVL BX, 1(AX) ADDQ $0x05, AX JMP memmove_long_repeat_emit_encodeSnappyBlockAsm four_bytes_repeat_emit_encodeSnappyBlockAsm: - MOVL SI, R10 - SHRL $0x10, R10 + MOVL BX, R9 + SHRL $0x10, R9 MOVB $0xf8, (AX) - MOVW SI, 1(AX) - MOVB R10, 3(AX) + MOVW BX, 1(AX) + MOVB R9, 3(AX) ADDQ $0x04, AX JMP memmove_long_repeat_emit_encodeSnappyBlockAsm three_bytes_repeat_emit_encodeSnappyBlockAsm: MOVB $0xf4, (AX) - MOVW SI, 1(AX) + MOVW BX, 1(AX) ADDQ $0x03, AX JMP memmove_long_repeat_emit_encodeSnappyBlockAsm two_bytes_repeat_emit_encodeSnappyBlockAsm: MOVB $0xf0, (AX) - MOVB SI, 1(AX) + MOVB BL, 1(AX) ADDQ $0x02, AX - CMPL SI, $0x40 + CMPL BX, $0x40 JL memmove_repeat_emit_encodeSnappyBlockAsm JMP memmove_long_repeat_emit_encodeSnappyBlockAsm one_byte_repeat_emit_encodeSnappyBlockAsm: - SHLB $0x02, SI - MOVB SI, (AX) + SHLB $0x02, BL + MOVB BL, (AX) ADDQ $0x01, AX memmove_repeat_emit_encodeSnappyBlockAsm: - LEAQ (AX)(R8*1), SI + LEAQ (AX)(DI*1), BX // genMemMoveShort - CMPQ R8, $0x08 + CMPQ DI, $0x08 JLE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8 - CMPQ R8, $0x10 + CMPQ DI, $0x10 JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8through16 - CMPQ R8, $0x20 + CMPQ DI, $0x20 JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_17through32 JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_33through64 emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8: - MOVQ (R9), R10 - MOVQ R10, (AX) + MOVQ (R8), R9 + MOVQ R9, (AX) JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8through16: - MOVQ (R9), R10 - MOVQ -8(R9)(R8*1), R9 - MOVQ R10, (AX) - MOVQ R9, -8(AX)(R8*1) + MOVQ (R8), R9 + MOVQ -8(R8)(DI*1), R8 + MOVQ R9, (AX) + MOVQ R8, -8(AX)(DI*1) JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_17through32: - MOVOU (R9), X0 - MOVOU -16(R9)(R8*1), X1 + MOVOU (R8), X0 + MOVOU -16(R8)(DI*1), X1 MOVOU X0, (AX) - MOVOU X1, -16(AX)(R8*1) + MOVOU X1, -16(AX)(DI*1) JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_33through64: - MOVOU (R9), X0 - MOVOU 16(R9), X1 - MOVOU -32(R9)(R8*1), X2 - MOVOU -16(R9)(R8*1), X3 + MOVOU (R8), X0 + MOVOU 16(R8), X1 + MOVOU -32(R8)(DI*1), X2 + MOVOU -16(R8)(DI*1), X3 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R8*1) - MOVOU X3, -16(AX)(R8*1) + MOVOU X2, -32(AX)(DI*1) + MOVOU X3, -16(AX)(DI*1) memmove_end_copy_repeat_emit_encodeSnappyBlockAsm: - MOVQ SI, AX + MOVQ BX, AX JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm memmove_long_repeat_emit_encodeSnappyBlockAsm: - LEAQ (AX)(R8*1), SI + LEAQ (AX)(DI*1), BX // genMemMoveLong - MOVOU (R9), X0 - MOVOU 16(R9), X1 - MOVOU -32(R9)(R8*1), X2 - MOVOU -16(R9)(R8*1), X3 - MOVQ R8, R11 - SHRQ $0x05, R11 - MOVQ AX, R10 - ANDL $0x0000001f, R10 - MOVQ $0x00000040, R12 - SUBQ R10, R12 - DECQ R11 + MOVOU (R8), X0 + MOVOU 16(R8), X1 + MOVOU -32(R8)(DI*1), X2 + MOVOU -16(R8)(DI*1), X3 + MOVQ DI, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R11 + SUBQ R9, R11 + DECQ R10 JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32 - LEAQ -32(R9)(R12*1), R10 - LEAQ -32(AX)(R12*1), R13 + LEAQ -32(R8)(R11*1), R9 + LEAQ -32(AX)(R11*1), R12 emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_big_loop_back: - MOVOU (R10), X4 - MOVOU 16(R10), X5 - MOVOA X4, (R13) - MOVOA X5, 16(R13) - ADDQ $0x20, R13 - ADDQ $0x20, R10 + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R12) + MOVOA X5, 16(R12) ADDQ $0x20, R12 - DECQ R11 + ADDQ $0x20, R9 + ADDQ $0x20, R11 + DECQ R10 JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_big_loop_back emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32: - MOVOU -32(R9)(R12*1), X4 - MOVOU -16(R9)(R12*1), X5 - MOVOA X4, -32(AX)(R12*1) - MOVOA X5, -16(AX)(R12*1) - ADDQ $0x20, R12 - CMPQ R8, R12 + MOVOU -32(R8)(R11*1), X4 + MOVOU -16(R8)(R11*1), X5 + MOVOA X4, -32(AX)(R11*1) + MOVOA X5, -16(AX)(R11*1) + ADDQ $0x20, R11 + CMPQ DI, R11 JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R8*1) - MOVOU X3, -16(AX)(R8*1) - MOVQ SI, AX + MOVOU X2, -32(AX)(DI*1) + MOVOU X3, -16(AX)(DI*1) + MOVQ BX, AX emit_literal_done_repeat_emit_encodeSnappyBlockAsm: ADDL $0x05, CX - MOVL CX, SI - SUBL 16(SP), SI - MOVQ src_len+32(FP), R8 - SUBL CX, R8 - LEAQ (DX)(CX*1), R9 - LEAQ (DX)(SI*1), SI + MOVL CX, BX + SUBL 16(SP), BX + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), BX // matchLen - XORL R11, R11 - CMPL R8, $0x08 + XORL R10, R10 + CMPL DI, $0x08 JL matchlen_match4_repeat_extend_encodeSnappyBlockAsm matchlen_loopback_repeat_extend_encodeSnappyBlockAsm: - MOVQ (R9)(R11*1), R10 - XORQ (SI)(R11*1), R10 - TESTQ R10, R10 + MOVQ (R8)(R10*1), R9 + XORQ (BX)(R10*1), R9 + TESTQ R9, R9 JZ matchlen_loop_repeat_extend_encodeSnappyBlockAsm #ifdef GOAMD64_v3 - TZCNTQ R10, R10 + TZCNTQ R9, R9 #else - BSFQ R10, R10 + BSFQ R9, R9 #endif - SARQ $0x03, R10 - LEAL (R11)(R10*1), R11 + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 JMP repeat_extend_forward_end_encodeSnappyBlockAsm matchlen_loop_repeat_extend_encodeSnappyBlockAsm: - LEAL -8(R8), R8 - LEAL 8(R11), R11 - CMPL R8, $0x08 + LEAL -8(DI), DI + LEAL 8(R10), R10 + CMPL DI, $0x08 JGE matchlen_loopback_repeat_extend_encodeSnappyBlockAsm JZ repeat_extend_forward_end_encodeSnappyBlockAsm matchlen_match4_repeat_extend_encodeSnappyBlockAsm: - CMPL R8, $0x04 + CMPL DI, $0x04 JL matchlen_match2_repeat_extend_encodeSnappyBlockAsm - MOVL (R9)(R11*1), R10 - CMPL (SI)(R11*1), R10 + MOVL (R8)(R10*1), R9 + CMPL (BX)(R10*1), R9 JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm - SUBL $0x04, R8 - LEAL 4(R11), R11 + SUBL $0x04, DI + LEAL 4(R10), R10 matchlen_match2_repeat_extend_encodeSnappyBlockAsm: - CMPL R8, $0x02 + CMPL DI, $0x02 JL matchlen_match1_repeat_extend_encodeSnappyBlockAsm - MOVW (R9)(R11*1), R10 - CMPW (SI)(R11*1), R10 + MOVW (R8)(R10*1), R9 + CMPW (BX)(R10*1), R9 JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm - SUBL $0x02, R8 - LEAL 2(R11), R11 + SUBL $0x02, DI + LEAL 2(R10), R10 matchlen_match1_repeat_extend_encodeSnappyBlockAsm: - CMPL R8, $0x01 + CMPL DI, $0x01 JL repeat_extend_forward_end_encodeSnappyBlockAsm - MOVB (R9)(R11*1), R10 - CMPB (SI)(R11*1), R10 + MOVB (R8)(R10*1), R9 + CMPB (BX)(R10*1), R9 JNE repeat_extend_forward_end_encodeSnappyBlockAsm - LEAL 1(R11), R11 + LEAL 1(R10), R10 repeat_extend_forward_end_encodeSnappyBlockAsm: - ADDL R11, CX - MOVL CX, SI - SUBL DI, SI - MOVL 16(SP), DI + ADDL R10, CX + MOVL CX, BX + SUBL SI, BX + MOVL 16(SP), SI // emitCopy - CMPL DI, $0x00010000 + CMPL SI, $0x00010000 JL two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm four_bytes_loop_back_repeat_as_copy_encodeSnappyBlockAsm: - CMPL SI, $0x40 + CMPL BX, $0x40 JLE four_bytes_remain_repeat_as_copy_encodeSnappyBlockAsm MOVB $0xff, (AX) - MOVL DI, 1(AX) - LEAL -64(SI), SI + MOVL SI, 1(AX) + LEAL -64(BX), BX ADDQ $0x05, AX - CMPL SI, $0x04 + CMPL BX, $0x04 JL four_bytes_remain_repeat_as_copy_encodeSnappyBlockAsm JMP four_bytes_loop_back_repeat_as_copy_encodeSnappyBlockAsm four_bytes_remain_repeat_as_copy_encodeSnappyBlockAsm: - TESTL SI, SI + TESTL BX, BX JZ repeat_end_emit_encodeSnappyBlockAsm - MOVB $0x03, BL - LEAL -4(BX)(SI*4), SI - MOVB SI, (AX) - MOVL DI, 1(AX) + XORL DI, DI + LEAL -1(DI)(BX*4), BX + MOVB BL, (AX) + MOVL SI, 1(AX) ADDQ $0x05, AX JMP repeat_end_emit_encodeSnappyBlockAsm two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm: - CMPL SI, $0x40 + CMPL BX, $0x40 JLE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm MOVB $0xee, (AX) - MOVW DI, 1(AX) - LEAL -60(SI), SI + MOVW SI, 1(AX) + LEAL -60(BX), BX ADDQ $0x03, AX JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm: - CMPL SI, $0x0c + MOVL BX, DI + SHLL $0x02, DI + CMPL BX, $0x0c JGE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm - CMPL DI, $0x00000800 + CMPL SI, $0x00000800 JGE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm - MOVB $0x01, BL - LEAL -16(BX)(SI*4), SI - MOVB DI, 1(AX) - SHRL $0x08, DI - SHLL $0x05, DI - ORL DI, SI - MOVB SI, (AX) + LEAL -15(DI), DI + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (AX) ADDQ $0x02, AX JMP repeat_end_emit_encodeSnappyBlockAsm emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm: - MOVB $0x02, BL - LEAL -4(BX)(SI*4), SI - MOVB SI, (AX) - MOVW DI, 1(AX) + LEAL -2(DI), DI + MOVB DI, (AX) + MOVW SI, 1(AX) ADDQ $0x03, AX repeat_end_emit_encodeSnappyBlockAsm: @@ -10665,16 +10646,16 @@ repeat_end_emit_encodeSnappyBlockAsm: JMP search_loop_encodeSnappyBlockAsm no_repeat_found_encodeSnappyBlockAsm: - CMPL (DX)(SI*1), DI + CMPL (DX)(BX*1), SI JEQ candidate_match_encodeSnappyBlockAsm - SHRQ $0x08, DI - MOVL 24(SP)(R10*4), SI - LEAL 2(CX), R9 - CMPL (DX)(R8*1), DI + SHRQ $0x08, SI + MOVL 24(SP)(R9*4), BX + LEAL 2(CX), R8 + CMPL (DX)(DI*1), SI JEQ candidate2_match_encodeSnappyBlockAsm - MOVL R9, 24(SP)(R10*4) - SHRQ $0x08, DI - CMPL (DX)(SI*1), DI + MOVL R8, 24(SP)(R9*4) + SHRQ $0x08, SI + CMPL (DX)(BX*1), SI JEQ candidate3_match_encodeSnappyBlockAsm MOVL 20(SP), CX JMP search_loop_encodeSnappyBlockAsm @@ -10684,331 +10665,331 @@ candidate3_match_encodeSnappyBlockAsm: JMP candidate_match_encodeSnappyBlockAsm candidate2_match_encodeSnappyBlockAsm: - MOVL R9, 24(SP)(R10*4) + MOVL R8, 24(SP)(R9*4) INCL CX - MOVL R8, SI + MOVL DI, BX candidate_match_encodeSnappyBlockAsm: - MOVL 12(SP), DI - TESTL SI, SI + MOVL 12(SP), SI + TESTL BX, BX JZ match_extend_back_end_encodeSnappyBlockAsm match_extend_back_loop_encodeSnappyBlockAsm: - CMPL CX, DI + CMPL CX, SI JLE match_extend_back_end_encodeSnappyBlockAsm - MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(BX*1), DI MOVB -1(DX)(CX*1), R8 - CMPB BL, R8 + CMPB DI, R8 JNE match_extend_back_end_encodeSnappyBlockAsm LEAL -1(CX), CX - DECL SI + DECL BX JZ match_extend_back_end_encodeSnappyBlockAsm JMP match_extend_back_loop_encodeSnappyBlockAsm match_extend_back_end_encodeSnappyBlockAsm: - MOVL CX, DI - SUBL 12(SP), DI - LEAQ 5(AX)(DI*1), DI - CMPQ DI, (SP) + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 5(AX)(SI*1), SI + CMPQ SI, (SP) JL match_dst_size_check_encodeSnappyBlockAsm MOVQ $0x00000000, ret+48(FP) RET match_dst_size_check_encodeSnappyBlockAsm: - MOVL CX, DI - MOVL 12(SP), R8 - CMPL R8, DI + MOVL CX, SI + MOVL 12(SP), DI + CMPL DI, SI JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (DX)(R8*1), DI - SUBL R8, R9 - LEAL -1(R9), R8 - CMPL R8, $0x3c + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(DI*1), SI + SUBL DI, R8 + LEAL -1(R8), DI + CMPL DI, $0x3c JLT one_byte_match_emit_encodeSnappyBlockAsm - CMPL R8, $0x00000100 + CMPL DI, $0x00000100 JLT two_bytes_match_emit_encodeSnappyBlockAsm - CMPL R8, $0x00010000 + CMPL DI, $0x00010000 JLT three_bytes_match_emit_encodeSnappyBlockAsm - CMPL R8, $0x01000000 + CMPL DI, $0x01000000 JLT four_bytes_match_emit_encodeSnappyBlockAsm MOVB $0xfc, (AX) - MOVL R8, 1(AX) + MOVL DI, 1(AX) ADDQ $0x05, AX JMP memmove_long_match_emit_encodeSnappyBlockAsm four_bytes_match_emit_encodeSnappyBlockAsm: - MOVL R8, R10 - SHRL $0x10, R10 + MOVL DI, R9 + SHRL $0x10, R9 MOVB $0xf8, (AX) - MOVW R8, 1(AX) - MOVB R10, 3(AX) + MOVW DI, 1(AX) + MOVB R9, 3(AX) ADDQ $0x04, AX JMP memmove_long_match_emit_encodeSnappyBlockAsm three_bytes_match_emit_encodeSnappyBlockAsm: MOVB $0xf4, (AX) - MOVW R8, 1(AX) + MOVW DI, 1(AX) ADDQ $0x03, AX JMP memmove_long_match_emit_encodeSnappyBlockAsm two_bytes_match_emit_encodeSnappyBlockAsm: MOVB $0xf0, (AX) - MOVB R8, 1(AX) + MOVB DI, 1(AX) ADDQ $0x02, AX - CMPL R8, $0x40 + CMPL DI, $0x40 JL memmove_match_emit_encodeSnappyBlockAsm JMP memmove_long_match_emit_encodeSnappyBlockAsm one_byte_match_emit_encodeSnappyBlockAsm: - SHLB $0x02, R8 - MOVB R8, (AX) + SHLB $0x02, DI + MOVB DI, (AX) ADDQ $0x01, AX memmove_match_emit_encodeSnappyBlockAsm: - LEAQ (AX)(R9*1), R8 + LEAQ (AX)(R8*1), DI // genMemMoveShort - CMPQ R9, $0x08 + CMPQ R8, $0x08 JLE emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8 - CMPQ R9, $0x10 + CMPQ R8, $0x10 JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8through16 - CMPQ R9, $0x20 + CMPQ R8, $0x20 JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_17through32 JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_33through64 emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8: - MOVQ (DI), R10 - MOVQ R10, (AX) + MOVQ (SI), R9 + MOVQ R9, (AX) JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8through16: - MOVQ (DI), R10 - MOVQ -8(DI)(R9*1), DI - MOVQ R10, (AX) - MOVQ DI, -8(AX)(R9*1) + MOVQ (SI), R9 + MOVQ -8(SI)(R8*1), SI + MOVQ R9, (AX) + MOVQ SI, -8(AX)(R8*1) JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_17through32: - MOVOU (DI), X0 - MOVOU -16(DI)(R9*1), X1 + MOVOU (SI), X0 + MOVOU -16(SI)(R8*1), X1 MOVOU X0, (AX) - MOVOU X1, -16(AX)(R9*1) + MOVOU X1, -16(AX)(R8*1) JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_33through64: - MOVOU (DI), X0 - MOVOU 16(DI), X1 - MOVOU -32(DI)(R9*1), X2 - MOVOU -16(DI)(R9*1), X3 + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) memmove_end_copy_match_emit_encodeSnappyBlockAsm: - MOVQ R8, AX + MOVQ DI, AX JMP emit_literal_done_match_emit_encodeSnappyBlockAsm memmove_long_match_emit_encodeSnappyBlockAsm: - LEAQ (AX)(R9*1), R8 + LEAQ (AX)(R8*1), DI // genMemMoveLong - MOVOU (DI), X0 - MOVOU 16(DI), X1 - MOVOU -32(DI)(R9*1), X2 - MOVOU -16(DI)(R9*1), X3 - MOVQ R9, R11 - SHRQ $0x05, R11 - MOVQ AX, R10 - ANDL $0x0000001f, R10 - MOVQ $0x00000040, R12 - SUBQ R10, R12 - DECQ R11 + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVQ R8, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R11 + SUBQ R9, R11 + DECQ R10 JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32 - LEAQ -32(DI)(R12*1), R10 - LEAQ -32(AX)(R12*1), R13 + LEAQ -32(SI)(R11*1), R9 + LEAQ -32(AX)(R11*1), R12 emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_big_loop_back: - MOVOU (R10), X4 - MOVOU 16(R10), X5 - MOVOA X4, (R13) - MOVOA X5, 16(R13) - ADDQ $0x20, R13 - ADDQ $0x20, R10 + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R12) + MOVOA X5, 16(R12) ADDQ $0x20, R12 - DECQ R11 + ADDQ $0x20, R9 + ADDQ $0x20, R11 + DECQ R10 JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_big_loop_back emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32: - MOVOU -32(DI)(R12*1), X4 - MOVOU -16(DI)(R12*1), X5 - MOVOA X4, -32(AX)(R12*1) - MOVOA X5, -16(AX)(R12*1) - ADDQ $0x20, R12 - CMPQ R9, R12 + MOVOU -32(SI)(R11*1), X4 + MOVOU -16(SI)(R11*1), X5 + MOVOA X4, -32(AX)(R11*1) + MOVOA X5, -16(AX)(R11*1) + ADDQ $0x20, R11 + CMPQ R8, R11 JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) - MOVQ R8, AX + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ DI, AX emit_literal_done_match_emit_encodeSnappyBlockAsm: match_nolit_loop_encodeSnappyBlockAsm: - MOVL CX, DI - SUBL SI, DI - MOVL DI, 16(SP) + MOVL CX, SI + SUBL BX, SI + MOVL SI, 16(SP) ADDL $0x04, CX - ADDL $0x04, SI - MOVQ src_len+32(FP), DI - SUBL CX, DI - LEAQ (DX)(CX*1), R8 - LEAQ (DX)(SI*1), SI + ADDL $0x04, BX + MOVQ src_len+32(FP), SI + SUBL CX, SI + LEAQ (DX)(CX*1), DI + LEAQ (DX)(BX*1), BX // matchLen - XORL R10, R10 - CMPL DI, $0x08 + XORL R9, R9 + CMPL SI, $0x08 JL matchlen_match4_match_nolit_encodeSnappyBlockAsm matchlen_loopback_match_nolit_encodeSnappyBlockAsm: - MOVQ (R8)(R10*1), R9 - XORQ (SI)(R10*1), R9 - TESTQ R9, R9 + MOVQ (DI)(R9*1), R8 + XORQ (BX)(R9*1), R8 + TESTQ R8, R8 JZ matchlen_loop_match_nolit_encodeSnappyBlockAsm #ifdef GOAMD64_v3 - TZCNTQ R9, R9 + TZCNTQ R8, R8 #else - BSFQ R9, R9 + BSFQ R8, R8 #endif - SARQ $0x03, R9 - LEAL (R10)(R9*1), R10 + SARQ $0x03, R8 + LEAL (R9)(R8*1), R9 JMP match_nolit_end_encodeSnappyBlockAsm matchlen_loop_match_nolit_encodeSnappyBlockAsm: - LEAL -8(DI), DI - LEAL 8(R10), R10 - CMPL DI, $0x08 + LEAL -8(SI), SI + LEAL 8(R9), R9 + CMPL SI, $0x08 JGE matchlen_loopback_match_nolit_encodeSnappyBlockAsm JZ match_nolit_end_encodeSnappyBlockAsm matchlen_match4_match_nolit_encodeSnappyBlockAsm: - CMPL DI, $0x04 + CMPL SI, $0x04 JL matchlen_match2_match_nolit_encodeSnappyBlockAsm - MOVL (R8)(R10*1), R9 - CMPL (SI)(R10*1), R9 + MOVL (DI)(R9*1), R8 + CMPL (BX)(R9*1), R8 JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm - SUBL $0x04, DI - LEAL 4(R10), R10 + SUBL $0x04, SI + LEAL 4(R9), R9 matchlen_match2_match_nolit_encodeSnappyBlockAsm: - CMPL DI, $0x02 + CMPL SI, $0x02 JL matchlen_match1_match_nolit_encodeSnappyBlockAsm - MOVW (R8)(R10*1), R9 - CMPW (SI)(R10*1), R9 + MOVW (DI)(R9*1), R8 + CMPW (BX)(R9*1), R8 JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm - SUBL $0x02, DI - LEAL 2(R10), R10 + SUBL $0x02, SI + LEAL 2(R9), R9 matchlen_match1_match_nolit_encodeSnappyBlockAsm: - CMPL DI, $0x01 + CMPL SI, $0x01 JL match_nolit_end_encodeSnappyBlockAsm - MOVB (R8)(R10*1), R9 - CMPB (SI)(R10*1), R9 + MOVB (DI)(R9*1), R8 + CMPB (BX)(R9*1), R8 JNE match_nolit_end_encodeSnappyBlockAsm - LEAL 1(R10), R10 + LEAL 1(R9), R9 match_nolit_end_encodeSnappyBlockAsm: - ADDL R10, CX - MOVL 16(SP), SI - ADDL $0x04, R10 + ADDL R9, CX + MOVL 16(SP), BX + ADDL $0x04, R9 MOVL CX, 12(SP) // emitCopy - CMPL SI, $0x00010000 + CMPL BX, $0x00010000 JL two_byte_offset_match_nolit_encodeSnappyBlockAsm four_bytes_loop_back_match_nolit_encodeSnappyBlockAsm: - CMPL R10, $0x40 + CMPL R9, $0x40 JLE four_bytes_remain_match_nolit_encodeSnappyBlockAsm MOVB $0xff, (AX) - MOVL SI, 1(AX) - LEAL -64(R10), R10 + MOVL BX, 1(AX) + LEAL -64(R9), R9 ADDQ $0x05, AX - CMPL R10, $0x04 + CMPL R9, $0x04 JL four_bytes_remain_match_nolit_encodeSnappyBlockAsm JMP four_bytes_loop_back_match_nolit_encodeSnappyBlockAsm four_bytes_remain_match_nolit_encodeSnappyBlockAsm: - TESTL R10, R10 + TESTL R9, R9 JZ match_nolit_emitcopy_end_encodeSnappyBlockAsm - MOVB $0x03, BL - LEAL -4(BX)(R10*4), R10 - MOVB R10, (AX) - MOVL SI, 1(AX) + XORL SI, SI + LEAL -1(SI)(R9*4), R9 + MOVB R9, (AX) + MOVL BX, 1(AX) ADDQ $0x05, AX JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm two_byte_offset_match_nolit_encodeSnappyBlockAsm: - CMPL R10, $0x40 + CMPL R9, $0x40 JLE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm MOVB $0xee, (AX) - MOVW SI, 1(AX) - LEAL -60(R10), R10 + MOVW BX, 1(AX) + LEAL -60(R9), R9 ADDQ $0x03, AX JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm two_byte_offset_short_match_nolit_encodeSnappyBlockAsm: - CMPL R10, $0x0c + MOVL R9, SI + SHLL $0x02, SI + CMPL R9, $0x0c JGE emit_copy_three_match_nolit_encodeSnappyBlockAsm - CMPL SI, $0x00000800 + CMPL BX, $0x00000800 JGE emit_copy_three_match_nolit_encodeSnappyBlockAsm - MOVB $0x01, BL - LEAL -16(BX)(R10*4), R10 - MOVB SI, 1(AX) - SHRL $0x08, SI - SHLL $0x05, SI - ORL SI, R10 - MOVB R10, (AX) + LEAL -15(SI), SI + MOVB BL, 1(AX) + SHRL $0x08, BX + SHLL $0x05, BX + ORL BX, SI + MOVB SI, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm emit_copy_three_match_nolit_encodeSnappyBlockAsm: - MOVB $0x02, BL - LEAL -4(BX)(R10*4), R10 - MOVB R10, (AX) - MOVW SI, 1(AX) + LEAL -2(SI), SI + MOVB SI, (AX) + MOVW BX, 1(AX) ADDQ $0x03, AX match_nolit_emitcopy_end_encodeSnappyBlockAsm: CMPL CX, 8(SP) JGE emit_remainder_encodeSnappyBlockAsm - MOVQ -2(DX)(CX*1), DI + MOVQ -2(DX)(CX*1), SI CMPQ AX, (SP) JL match_nolit_dst_ok_encodeSnappyBlockAsm MOVQ $0x00000000, ret+48(FP) RET match_nolit_dst_ok_encodeSnappyBlockAsm: - MOVQ $0x0000cf1bbcdcbf9b, R9 - MOVQ DI, R8 - SHRQ $0x10, DI - MOVQ DI, SI - SHLQ $0x10, R8 - IMULQ R9, R8 - SHRQ $0x32, R8 - SHLQ $0x10, SI - IMULQ R9, SI - SHRQ $0x32, SI - LEAL -2(CX), R9 - LEAQ 24(SP)(SI*4), R10 - MOVL (R10), SI - MOVL R9, 24(SP)(R8*4) - MOVL CX, (R10) - CMPL (DX)(SI*1), DI + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ SI, DI + SHRQ $0x10, SI + MOVQ SI, BX + SHLQ $0x10, DI + IMULQ R8, DI + SHRQ $0x32, DI + SHLQ $0x10, BX + IMULQ R8, BX + SHRQ $0x32, BX + LEAL -2(CX), R8 + LEAQ 24(SP)(BX*4), R9 + MOVL (R9), BX + MOVL R8, 24(SP)(DI*4) + MOVL CX, (R9) + CMPL (DX)(BX*1), SI JEQ match_nolit_loop_encodeSnappyBlockAsm INCL CX JMP search_loop_encodeSnappyBlockAsm @@ -11212,8 +11193,8 @@ zero_loop_encodeSnappyBlockAsm64K: MOVL $0x00000000, 12(SP) MOVQ src_len+32(FP), CX LEAQ -9(CX), DX - LEAQ -8(CX), SI - MOVL SI, 8(SP) + LEAQ -8(CX), BX + MOVL BX, 8(SP) SHRQ $0x05, CX SUBL CX, DX LEAQ (AX)(DX*1), DX @@ -11223,278 +11204,278 @@ zero_loop_encodeSnappyBlockAsm64K: MOVQ src_base+24(FP), DX search_loop_encodeSnappyBlockAsm64K: - MOVL CX, SI - SUBL 12(SP), SI - SHRL $0x06, SI - LEAL 4(CX)(SI*1), SI - CMPL SI, 8(SP) + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x06, BX + LEAL 4(CX)(BX*1), BX + CMPL BX, 8(SP) JGE emit_remainder_encodeSnappyBlockAsm64K - MOVQ (DX)(CX*1), DI - MOVL SI, 20(SP) - MOVQ $0x0000cf1bbcdcbf9b, R9 - MOVQ DI, R10 - MOVQ DI, R11 - SHRQ $0x08, R11 - SHLQ $0x10, R10 - IMULQ R9, R10 - SHRQ $0x32, R10 - SHLQ $0x10, R11 - IMULQ R9, R11 - SHRQ $0x32, R11 - MOVL 24(SP)(R10*4), SI - MOVL 24(SP)(R11*4), R8 - MOVL CX, 24(SP)(R10*4) - LEAL 1(CX), R10 - MOVL R10, 24(SP)(R11*4) - MOVQ DI, R10 - SHRQ $0x10, R10 + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ SI, R9 + MOVQ SI, R10 + SHRQ $0x08, R10 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x32, R9 SHLQ $0x10, R10 - IMULQ R9, R10 + IMULQ R8, R10 SHRQ $0x32, R10 - MOVL CX, R9 - SUBL 16(SP), R9 - MOVL 1(DX)(R9*1), R11 - MOVQ DI, R9 - SHRQ $0x08, R9 - CMPL R9, R11 - JNE no_repeat_found_encodeSnappyBlockAsm64K - LEAL 1(CX), DI - MOVL 12(SP), SI - MOVL DI, R8 + MOVL 24(SP)(R9*4), BX + MOVL 24(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + LEAL 1(CX), R9 + MOVL R9, 24(SP)(R10*4) + MOVQ SI, R9 + SHRQ $0x10, R9 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x32, R9 + MOVL CX, R8 SUBL 16(SP), R8 + MOVL 1(DX)(R8*1), R10 + MOVQ SI, R8 + SHRQ $0x08, R8 + CMPL R8, R10 + JNE no_repeat_found_encodeSnappyBlockAsm64K + LEAL 1(CX), SI + MOVL 12(SP), BX + MOVL SI, DI + SUBL 16(SP), DI JZ repeat_extend_back_end_encodeSnappyBlockAsm64K repeat_extend_back_loop_encodeSnappyBlockAsm64K: - CMPL DI, SI + CMPL SI, BX JLE repeat_extend_back_end_encodeSnappyBlockAsm64K - MOVB -1(DX)(R8*1), BL - MOVB -1(DX)(DI*1), R9 - CMPB BL, R9 + MOVB -1(DX)(DI*1), R8 + MOVB -1(DX)(SI*1), R9 + CMPB R8, R9 JNE repeat_extend_back_end_encodeSnappyBlockAsm64K - LEAL -1(DI), DI - DECL R8 + LEAL -1(SI), SI + DECL DI JNZ repeat_extend_back_loop_encodeSnappyBlockAsm64K repeat_extend_back_end_encodeSnappyBlockAsm64K: - MOVL 12(SP), SI - CMPL SI, DI + MOVL 12(SP), BX + CMPL BX, SI JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm64K - MOVL DI, R8 - MOVL DI, 12(SP) - LEAQ (DX)(SI*1), R9 - SUBL SI, R8 - LEAL -1(R8), SI - CMPL SI, $0x3c + MOVL SI, DI + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R8 + SUBL BX, DI + LEAL -1(DI), BX + CMPL BX, $0x3c JLT one_byte_repeat_emit_encodeSnappyBlockAsm64K - CMPL SI, $0x00000100 + CMPL BX, $0x00000100 JLT two_bytes_repeat_emit_encodeSnappyBlockAsm64K MOVB $0xf4, (AX) - MOVW SI, 1(AX) + MOVW BX, 1(AX) ADDQ $0x03, AX JMP memmove_long_repeat_emit_encodeSnappyBlockAsm64K two_bytes_repeat_emit_encodeSnappyBlockAsm64K: MOVB $0xf0, (AX) - MOVB SI, 1(AX) + MOVB BL, 1(AX) ADDQ $0x02, AX - CMPL SI, $0x40 + CMPL BX, $0x40 JL memmove_repeat_emit_encodeSnappyBlockAsm64K JMP memmove_long_repeat_emit_encodeSnappyBlockAsm64K one_byte_repeat_emit_encodeSnappyBlockAsm64K: - SHLB $0x02, SI - MOVB SI, (AX) + SHLB $0x02, BL + MOVB BL, (AX) ADDQ $0x01, AX memmove_repeat_emit_encodeSnappyBlockAsm64K: - LEAQ (AX)(R8*1), SI + LEAQ (AX)(DI*1), BX // genMemMoveShort - CMPQ R8, $0x08 + CMPQ DI, $0x08 JLE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8 - CMPQ R8, $0x10 + CMPQ DI, $0x10 JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8through16 - CMPQ R8, $0x20 + CMPQ DI, $0x20 JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_17through32 JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_33through64 emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8: - MOVQ (R9), R10 - MOVQ R10, (AX) + MOVQ (R8), R9 + MOVQ R9, (AX) JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8through16: - MOVQ (R9), R10 - MOVQ -8(R9)(R8*1), R9 - MOVQ R10, (AX) - MOVQ R9, -8(AX)(R8*1) + MOVQ (R8), R9 + MOVQ -8(R8)(DI*1), R8 + MOVQ R9, (AX) + MOVQ R8, -8(AX)(DI*1) JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_17through32: - MOVOU (R9), X0 - MOVOU -16(R9)(R8*1), X1 + MOVOU (R8), X0 + MOVOU -16(R8)(DI*1), X1 MOVOU X0, (AX) - MOVOU X1, -16(AX)(R8*1) + MOVOU X1, -16(AX)(DI*1) JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_33through64: - MOVOU (R9), X0 - MOVOU 16(R9), X1 - MOVOU -32(R9)(R8*1), X2 - MOVOU -16(R9)(R8*1), X3 + MOVOU (R8), X0 + MOVOU 16(R8), X1 + MOVOU -32(R8)(DI*1), X2 + MOVOU -16(R8)(DI*1), X3 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R8*1) - MOVOU X3, -16(AX)(R8*1) + MOVOU X2, -32(AX)(DI*1) + MOVOU X3, -16(AX)(DI*1) memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K: - MOVQ SI, AX + MOVQ BX, AX JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm64K memmove_long_repeat_emit_encodeSnappyBlockAsm64K: - LEAQ (AX)(R8*1), SI + LEAQ (AX)(DI*1), BX // genMemMoveLong - MOVOU (R9), X0 - MOVOU 16(R9), X1 - MOVOU -32(R9)(R8*1), X2 - MOVOU -16(R9)(R8*1), X3 - MOVQ R8, R11 - SHRQ $0x05, R11 - MOVQ AX, R10 - ANDL $0x0000001f, R10 - MOVQ $0x00000040, R12 - SUBQ R10, R12 - DECQ R11 + MOVOU (R8), X0 + MOVOU 16(R8), X1 + MOVOU -32(R8)(DI*1), X2 + MOVOU -16(R8)(DI*1), X3 + MOVQ DI, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R11 + SUBQ R9, R11 + DECQ R10 JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32 - LEAQ -32(R9)(R12*1), R10 - LEAQ -32(AX)(R12*1), R13 + LEAQ -32(R8)(R11*1), R9 + LEAQ -32(AX)(R11*1), R12 emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_big_loop_back: - MOVOU (R10), X4 - MOVOU 16(R10), X5 - MOVOA X4, (R13) - MOVOA X5, 16(R13) - ADDQ $0x20, R13 - ADDQ $0x20, R10 + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R12) + MOVOA X5, 16(R12) ADDQ $0x20, R12 - DECQ R11 + ADDQ $0x20, R9 + ADDQ $0x20, R11 + DECQ R10 JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_big_loop_back emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32: - MOVOU -32(R9)(R12*1), X4 - MOVOU -16(R9)(R12*1), X5 - MOVOA X4, -32(AX)(R12*1) - MOVOA X5, -16(AX)(R12*1) - ADDQ $0x20, R12 - CMPQ R8, R12 + MOVOU -32(R8)(R11*1), X4 + MOVOU -16(R8)(R11*1), X5 + MOVOA X4, -32(AX)(R11*1) + MOVOA X5, -16(AX)(R11*1) + ADDQ $0x20, R11 + CMPQ DI, R11 JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R8*1) - MOVOU X3, -16(AX)(R8*1) - MOVQ SI, AX + MOVOU X2, -32(AX)(DI*1) + MOVOU X3, -16(AX)(DI*1) + MOVQ BX, AX emit_literal_done_repeat_emit_encodeSnappyBlockAsm64K: ADDL $0x05, CX - MOVL CX, SI - SUBL 16(SP), SI - MOVQ src_len+32(FP), R8 - SUBL CX, R8 - LEAQ (DX)(CX*1), R9 - LEAQ (DX)(SI*1), SI + MOVL CX, BX + SUBL 16(SP), BX + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), BX // matchLen - XORL R11, R11 - CMPL R8, $0x08 + XORL R10, R10 + CMPL DI, $0x08 JL matchlen_match4_repeat_extend_encodeSnappyBlockAsm64K matchlen_loopback_repeat_extend_encodeSnappyBlockAsm64K: - MOVQ (R9)(R11*1), R10 - XORQ (SI)(R11*1), R10 - TESTQ R10, R10 + MOVQ (R8)(R10*1), R9 + XORQ (BX)(R10*1), R9 + TESTQ R9, R9 JZ matchlen_loop_repeat_extend_encodeSnappyBlockAsm64K #ifdef GOAMD64_v3 - TZCNTQ R10, R10 + TZCNTQ R9, R9 #else - BSFQ R10, R10 + BSFQ R9, R9 #endif - SARQ $0x03, R10 - LEAL (R11)(R10*1), R11 + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 JMP repeat_extend_forward_end_encodeSnappyBlockAsm64K matchlen_loop_repeat_extend_encodeSnappyBlockAsm64K: - LEAL -8(R8), R8 - LEAL 8(R11), R11 - CMPL R8, $0x08 + LEAL -8(DI), DI + LEAL 8(R10), R10 + CMPL DI, $0x08 JGE matchlen_loopback_repeat_extend_encodeSnappyBlockAsm64K JZ repeat_extend_forward_end_encodeSnappyBlockAsm64K matchlen_match4_repeat_extend_encodeSnappyBlockAsm64K: - CMPL R8, $0x04 + CMPL DI, $0x04 JL matchlen_match2_repeat_extend_encodeSnappyBlockAsm64K - MOVL (R9)(R11*1), R10 - CMPL (SI)(R11*1), R10 + MOVL (R8)(R10*1), R9 + CMPL (BX)(R10*1), R9 JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm64K - SUBL $0x04, R8 - LEAL 4(R11), R11 + SUBL $0x04, DI + LEAL 4(R10), R10 matchlen_match2_repeat_extend_encodeSnappyBlockAsm64K: - CMPL R8, $0x02 + CMPL DI, $0x02 JL matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K - MOVW (R9)(R11*1), R10 - CMPW (SI)(R11*1), R10 + MOVW (R8)(R10*1), R9 + CMPW (BX)(R10*1), R9 JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K - SUBL $0x02, R8 - LEAL 2(R11), R11 + SUBL $0x02, DI + LEAL 2(R10), R10 matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K: - CMPL R8, $0x01 + CMPL DI, $0x01 JL repeat_extend_forward_end_encodeSnappyBlockAsm64K - MOVB (R9)(R11*1), R10 - CMPB (SI)(R11*1), R10 + MOVB (R8)(R10*1), R9 + CMPB (BX)(R10*1), R9 JNE repeat_extend_forward_end_encodeSnappyBlockAsm64K - LEAL 1(R11), R11 + LEAL 1(R10), R10 repeat_extend_forward_end_encodeSnappyBlockAsm64K: - ADDL R11, CX - MOVL CX, SI - SUBL DI, SI - MOVL 16(SP), DI + ADDL R10, CX + MOVL CX, BX + SUBL SI, BX + MOVL 16(SP), SI // emitCopy two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm64K: - CMPL SI, $0x40 + CMPL BX, $0x40 JLE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm64K MOVB $0xee, (AX) - MOVW DI, 1(AX) - LEAL -60(SI), SI + MOVW SI, 1(AX) + LEAL -60(BX), BX ADDQ $0x03, AX JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm64K two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm64K: - CMPL SI, $0x0c + MOVL BX, DI + SHLL $0x02, DI + CMPL BX, $0x0c JGE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm64K - CMPL DI, $0x00000800 + CMPL SI, $0x00000800 JGE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm64K - MOVB $0x01, BL - LEAL -16(BX)(SI*4), SI - MOVB DI, 1(AX) - SHRL $0x08, DI - SHLL $0x05, DI - ORL DI, SI - MOVB SI, (AX) + LEAL -15(DI), DI + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (AX) ADDQ $0x02, AX JMP repeat_end_emit_encodeSnappyBlockAsm64K emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm64K: - MOVB $0x02, BL - LEAL -4(BX)(SI*4), SI - MOVB SI, (AX) - MOVW DI, 1(AX) + LEAL -2(DI), DI + MOVB DI, (AX) + MOVW SI, 1(AX) ADDQ $0x03, AX repeat_end_emit_encodeSnappyBlockAsm64K: @@ -11502,16 +11483,16 @@ repeat_end_emit_encodeSnappyBlockAsm64K: JMP search_loop_encodeSnappyBlockAsm64K no_repeat_found_encodeSnappyBlockAsm64K: - CMPL (DX)(SI*1), DI + CMPL (DX)(BX*1), SI JEQ candidate_match_encodeSnappyBlockAsm64K - SHRQ $0x08, DI - MOVL 24(SP)(R10*4), SI - LEAL 2(CX), R9 - CMPL (DX)(R8*1), DI + SHRQ $0x08, SI + MOVL 24(SP)(R9*4), BX + LEAL 2(CX), R8 + CMPL (DX)(DI*1), SI JEQ candidate2_match_encodeSnappyBlockAsm64K - MOVL R9, 24(SP)(R10*4) - SHRQ $0x08, DI - CMPL (DX)(SI*1), DI + MOVL R8, 24(SP)(R9*4) + SHRQ $0x08, SI + CMPL (DX)(BX*1), SI JEQ candidate3_match_encodeSnappyBlockAsm64K MOVL 20(SP), CX JMP search_loop_encodeSnappyBlockAsm64K @@ -11521,288 +11502,288 @@ candidate3_match_encodeSnappyBlockAsm64K: JMP candidate_match_encodeSnappyBlockAsm64K candidate2_match_encodeSnappyBlockAsm64K: - MOVL R9, 24(SP)(R10*4) + MOVL R8, 24(SP)(R9*4) INCL CX - MOVL R8, SI + MOVL DI, BX candidate_match_encodeSnappyBlockAsm64K: - MOVL 12(SP), DI - TESTL SI, SI + MOVL 12(SP), SI + TESTL BX, BX JZ match_extend_back_end_encodeSnappyBlockAsm64K match_extend_back_loop_encodeSnappyBlockAsm64K: - CMPL CX, DI + CMPL CX, SI JLE match_extend_back_end_encodeSnappyBlockAsm64K - MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(BX*1), DI MOVB -1(DX)(CX*1), R8 - CMPB BL, R8 + CMPB DI, R8 JNE match_extend_back_end_encodeSnappyBlockAsm64K LEAL -1(CX), CX - DECL SI + DECL BX JZ match_extend_back_end_encodeSnappyBlockAsm64K JMP match_extend_back_loop_encodeSnappyBlockAsm64K match_extend_back_end_encodeSnappyBlockAsm64K: - MOVL CX, DI - SUBL 12(SP), DI - LEAQ 3(AX)(DI*1), DI - CMPQ DI, (SP) + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 3(AX)(SI*1), SI + CMPQ SI, (SP) JL match_dst_size_check_encodeSnappyBlockAsm64K MOVQ $0x00000000, ret+48(FP) RET match_dst_size_check_encodeSnappyBlockAsm64K: - MOVL CX, DI - MOVL 12(SP), R8 - CMPL R8, DI + MOVL CX, SI + MOVL 12(SP), DI + CMPL DI, SI JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm64K - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (DX)(R8*1), DI - SUBL R8, R9 - LEAL -1(R9), R8 - CMPL R8, $0x3c + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(DI*1), SI + SUBL DI, R8 + LEAL -1(R8), DI + CMPL DI, $0x3c JLT one_byte_match_emit_encodeSnappyBlockAsm64K - CMPL R8, $0x00000100 + CMPL DI, $0x00000100 JLT two_bytes_match_emit_encodeSnappyBlockAsm64K MOVB $0xf4, (AX) - MOVW R8, 1(AX) + MOVW DI, 1(AX) ADDQ $0x03, AX JMP memmove_long_match_emit_encodeSnappyBlockAsm64K two_bytes_match_emit_encodeSnappyBlockAsm64K: MOVB $0xf0, (AX) - MOVB R8, 1(AX) + MOVB DI, 1(AX) ADDQ $0x02, AX - CMPL R8, $0x40 + CMPL DI, $0x40 JL memmove_match_emit_encodeSnappyBlockAsm64K JMP memmove_long_match_emit_encodeSnappyBlockAsm64K one_byte_match_emit_encodeSnappyBlockAsm64K: - SHLB $0x02, R8 - MOVB R8, (AX) + SHLB $0x02, DI + MOVB DI, (AX) ADDQ $0x01, AX memmove_match_emit_encodeSnappyBlockAsm64K: - LEAQ (AX)(R9*1), R8 + LEAQ (AX)(R8*1), DI // genMemMoveShort - CMPQ R9, $0x08 + CMPQ R8, $0x08 JLE emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8 - CMPQ R9, $0x10 + CMPQ R8, $0x10 JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8through16 - CMPQ R9, $0x20 + CMPQ R8, $0x20 JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_17through32 JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_33through64 emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8: - MOVQ (DI), R10 - MOVQ R10, (AX) + MOVQ (SI), R9 + MOVQ R9, (AX) JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm64K emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8through16: - MOVQ (DI), R10 - MOVQ -8(DI)(R9*1), DI - MOVQ R10, (AX) - MOVQ DI, -8(AX)(R9*1) + MOVQ (SI), R9 + MOVQ -8(SI)(R8*1), SI + MOVQ R9, (AX) + MOVQ SI, -8(AX)(R8*1) JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm64K emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_17through32: - MOVOU (DI), X0 - MOVOU -16(DI)(R9*1), X1 + MOVOU (SI), X0 + MOVOU -16(SI)(R8*1), X1 MOVOU X0, (AX) - MOVOU X1, -16(AX)(R9*1) + MOVOU X1, -16(AX)(R8*1) JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm64K emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_33through64: - MOVOU (DI), X0 - MOVOU 16(DI), X1 - MOVOU -32(DI)(R9*1), X2 - MOVOU -16(DI)(R9*1), X3 + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) memmove_end_copy_match_emit_encodeSnappyBlockAsm64K: - MOVQ R8, AX + MOVQ DI, AX JMP emit_literal_done_match_emit_encodeSnappyBlockAsm64K memmove_long_match_emit_encodeSnappyBlockAsm64K: - LEAQ (AX)(R9*1), R8 + LEAQ (AX)(R8*1), DI // genMemMoveLong - MOVOU (DI), X0 - MOVOU 16(DI), X1 - MOVOU -32(DI)(R9*1), X2 - MOVOU -16(DI)(R9*1), X3 - MOVQ R9, R11 - SHRQ $0x05, R11 - MOVQ AX, R10 - ANDL $0x0000001f, R10 - MOVQ $0x00000040, R12 - SUBQ R10, R12 - DECQ R11 + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVQ R8, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R11 + SUBQ R9, R11 + DECQ R10 JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32 - LEAQ -32(DI)(R12*1), R10 - LEAQ -32(AX)(R12*1), R13 + LEAQ -32(SI)(R11*1), R9 + LEAQ -32(AX)(R11*1), R12 emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_big_loop_back: - MOVOU (R10), X4 - MOVOU 16(R10), X5 - MOVOA X4, (R13) - MOVOA X5, 16(R13) - ADDQ $0x20, R13 - ADDQ $0x20, R10 + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R12) + MOVOA X5, 16(R12) ADDQ $0x20, R12 - DECQ R11 + ADDQ $0x20, R9 + ADDQ $0x20, R11 + DECQ R10 JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_big_loop_back emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32: - MOVOU -32(DI)(R12*1), X4 - MOVOU -16(DI)(R12*1), X5 - MOVOA X4, -32(AX)(R12*1) - MOVOA X5, -16(AX)(R12*1) - ADDQ $0x20, R12 - CMPQ R9, R12 + MOVOU -32(SI)(R11*1), X4 + MOVOU -16(SI)(R11*1), X5 + MOVOA X4, -32(AX)(R11*1) + MOVOA X5, -16(AX)(R11*1) + ADDQ $0x20, R11 + CMPQ R8, R11 JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) - MOVQ R8, AX + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ DI, AX emit_literal_done_match_emit_encodeSnappyBlockAsm64K: match_nolit_loop_encodeSnappyBlockAsm64K: - MOVL CX, DI - SUBL SI, DI - MOVL DI, 16(SP) + MOVL CX, SI + SUBL BX, SI + MOVL SI, 16(SP) ADDL $0x04, CX - ADDL $0x04, SI - MOVQ src_len+32(FP), DI - SUBL CX, DI - LEAQ (DX)(CX*1), R8 - LEAQ (DX)(SI*1), SI + ADDL $0x04, BX + MOVQ src_len+32(FP), SI + SUBL CX, SI + LEAQ (DX)(CX*1), DI + LEAQ (DX)(BX*1), BX // matchLen - XORL R10, R10 - CMPL DI, $0x08 + XORL R9, R9 + CMPL SI, $0x08 JL matchlen_match4_match_nolit_encodeSnappyBlockAsm64K matchlen_loopback_match_nolit_encodeSnappyBlockAsm64K: - MOVQ (R8)(R10*1), R9 - XORQ (SI)(R10*1), R9 - TESTQ R9, R9 + MOVQ (DI)(R9*1), R8 + XORQ (BX)(R9*1), R8 + TESTQ R8, R8 JZ matchlen_loop_match_nolit_encodeSnappyBlockAsm64K #ifdef GOAMD64_v3 - TZCNTQ R9, R9 + TZCNTQ R8, R8 #else - BSFQ R9, R9 + BSFQ R8, R8 #endif - SARQ $0x03, R9 - LEAL (R10)(R9*1), R10 + SARQ $0x03, R8 + LEAL (R9)(R8*1), R9 JMP match_nolit_end_encodeSnappyBlockAsm64K matchlen_loop_match_nolit_encodeSnappyBlockAsm64K: - LEAL -8(DI), DI - LEAL 8(R10), R10 - CMPL DI, $0x08 + LEAL -8(SI), SI + LEAL 8(R9), R9 + CMPL SI, $0x08 JGE matchlen_loopback_match_nolit_encodeSnappyBlockAsm64K JZ match_nolit_end_encodeSnappyBlockAsm64K matchlen_match4_match_nolit_encodeSnappyBlockAsm64K: - CMPL DI, $0x04 + CMPL SI, $0x04 JL matchlen_match2_match_nolit_encodeSnappyBlockAsm64K - MOVL (R8)(R10*1), R9 - CMPL (SI)(R10*1), R9 + MOVL (DI)(R9*1), R8 + CMPL (BX)(R9*1), R8 JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm64K - SUBL $0x04, DI - LEAL 4(R10), R10 + SUBL $0x04, SI + LEAL 4(R9), R9 matchlen_match2_match_nolit_encodeSnappyBlockAsm64K: - CMPL DI, $0x02 + CMPL SI, $0x02 JL matchlen_match1_match_nolit_encodeSnappyBlockAsm64K - MOVW (R8)(R10*1), R9 - CMPW (SI)(R10*1), R9 + MOVW (DI)(R9*1), R8 + CMPW (BX)(R9*1), R8 JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm64K - SUBL $0x02, DI - LEAL 2(R10), R10 + SUBL $0x02, SI + LEAL 2(R9), R9 matchlen_match1_match_nolit_encodeSnappyBlockAsm64K: - CMPL DI, $0x01 + CMPL SI, $0x01 JL match_nolit_end_encodeSnappyBlockAsm64K - MOVB (R8)(R10*1), R9 - CMPB (SI)(R10*1), R9 + MOVB (DI)(R9*1), R8 + CMPB (BX)(R9*1), R8 JNE match_nolit_end_encodeSnappyBlockAsm64K - LEAL 1(R10), R10 + LEAL 1(R9), R9 match_nolit_end_encodeSnappyBlockAsm64K: - ADDL R10, CX - MOVL 16(SP), SI - ADDL $0x04, R10 + ADDL R9, CX + MOVL 16(SP), BX + ADDL $0x04, R9 MOVL CX, 12(SP) // emitCopy two_byte_offset_match_nolit_encodeSnappyBlockAsm64K: - CMPL R10, $0x40 + CMPL R9, $0x40 JLE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm64K MOVB $0xee, (AX) - MOVW SI, 1(AX) - LEAL -60(R10), R10 + MOVW BX, 1(AX) + LEAL -60(R9), R9 ADDQ $0x03, AX JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm64K two_byte_offset_short_match_nolit_encodeSnappyBlockAsm64K: - CMPL R10, $0x0c + MOVL R9, SI + SHLL $0x02, SI + CMPL R9, $0x0c JGE emit_copy_three_match_nolit_encodeSnappyBlockAsm64K - CMPL SI, $0x00000800 + CMPL BX, $0x00000800 JGE emit_copy_three_match_nolit_encodeSnappyBlockAsm64K - MOVB $0x01, BL - LEAL -16(BX)(R10*4), R10 - MOVB SI, 1(AX) - SHRL $0x08, SI - SHLL $0x05, SI - ORL SI, R10 - MOVB R10, (AX) + LEAL -15(SI), SI + MOVB BL, 1(AX) + SHRL $0x08, BX + SHLL $0x05, BX + ORL BX, SI + MOVB SI, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm64K emit_copy_three_match_nolit_encodeSnappyBlockAsm64K: - MOVB $0x02, BL - LEAL -4(BX)(R10*4), R10 - MOVB R10, (AX) - MOVW SI, 1(AX) + LEAL -2(SI), SI + MOVB SI, (AX) + MOVW BX, 1(AX) ADDQ $0x03, AX match_nolit_emitcopy_end_encodeSnappyBlockAsm64K: CMPL CX, 8(SP) JGE emit_remainder_encodeSnappyBlockAsm64K - MOVQ -2(DX)(CX*1), DI + MOVQ -2(DX)(CX*1), SI CMPQ AX, (SP) JL match_nolit_dst_ok_encodeSnappyBlockAsm64K MOVQ $0x00000000, ret+48(FP) RET match_nolit_dst_ok_encodeSnappyBlockAsm64K: - MOVQ $0x0000cf1bbcdcbf9b, R9 - MOVQ DI, R8 - SHRQ $0x10, DI - MOVQ DI, SI - SHLQ $0x10, R8 - IMULQ R9, R8 - SHRQ $0x32, R8 - SHLQ $0x10, SI - IMULQ R9, SI - SHRQ $0x32, SI - LEAL -2(CX), R9 - LEAQ 24(SP)(SI*4), R10 - MOVL (R10), SI - MOVL R9, 24(SP)(R8*4) - MOVL CX, (R10) - CMPL (DX)(SI*1), DI + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ SI, DI + SHRQ $0x10, SI + MOVQ SI, BX + SHLQ $0x10, DI + IMULQ R8, DI + SHRQ $0x32, DI + SHLQ $0x10, BX + IMULQ R8, BX + SHRQ $0x32, BX + LEAL -2(CX), R8 + LEAQ 24(SP)(BX*4), R9 + MOVL (R9), BX + MOVL R8, 24(SP)(DI*4) + MOVL CX, (R9) + CMPL (DX)(BX*1), SI JEQ match_nolit_loop_encodeSnappyBlockAsm64K INCL CX JMP search_loop_encodeSnappyBlockAsm64K @@ -11987,8 +11968,8 @@ zero_loop_encodeSnappyBlockAsm12B: MOVL $0x00000000, 12(SP) MOVQ src_len+32(FP), CX LEAQ -9(CX), DX - LEAQ -8(CX), SI - MOVL SI, 8(SP) + LEAQ -8(CX), BX + MOVL BX, 8(SP) SHRQ $0x05, CX SUBL CX, DX LEAQ (AX)(DX*1), DX @@ -11998,278 +11979,278 @@ zero_loop_encodeSnappyBlockAsm12B: MOVQ src_base+24(FP), DX search_loop_encodeSnappyBlockAsm12B: - MOVL CX, SI - SUBL 12(SP), SI - SHRL $0x05, SI - LEAL 4(CX)(SI*1), SI - CMPL SI, 8(SP) + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x05, BX + LEAL 4(CX)(BX*1), BX + CMPL BX, 8(SP) JGE emit_remainder_encodeSnappyBlockAsm12B - MOVQ (DX)(CX*1), DI - MOVL SI, 20(SP) - MOVQ $0x000000cf1bbcdcbb, R9 - MOVQ DI, R10 - MOVQ DI, R11 - SHRQ $0x08, R11 + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x000000cf1bbcdcbb, R8 + MOVQ SI, R9 + MOVQ SI, R10 + SHRQ $0x08, R10 + SHLQ $0x18, R9 + IMULQ R8, R9 + SHRQ $0x34, R9 SHLQ $0x18, R10 - IMULQ R9, R10 + IMULQ R8, R10 SHRQ $0x34, R10 - SHLQ $0x18, R11 - IMULQ R9, R11 - SHRQ $0x34, R11 - MOVL 24(SP)(R10*4), SI - MOVL 24(SP)(R11*4), R8 - MOVL CX, 24(SP)(R10*4) - LEAL 1(CX), R10 - MOVL R10, 24(SP)(R11*4) - MOVQ DI, R10 - SHRQ $0x10, R10 - SHLQ $0x18, R10 - IMULQ R9, R10 - SHRQ $0x34, R10 - MOVL CX, R9 - SUBL 16(SP), R9 - MOVL 1(DX)(R9*1), R11 - MOVQ DI, R9 - SHRQ $0x08, R9 - CMPL R9, R11 - JNE no_repeat_found_encodeSnappyBlockAsm12B - LEAL 1(CX), DI - MOVL 12(SP), SI - MOVL DI, R8 + MOVL 24(SP)(R9*4), BX + MOVL 24(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + LEAL 1(CX), R9 + MOVL R9, 24(SP)(R10*4) + MOVQ SI, R9 + SHRQ $0x10, R9 + SHLQ $0x18, R9 + IMULQ R8, R9 + SHRQ $0x34, R9 + MOVL CX, R8 SUBL 16(SP), R8 + MOVL 1(DX)(R8*1), R10 + MOVQ SI, R8 + SHRQ $0x08, R8 + CMPL R8, R10 + JNE no_repeat_found_encodeSnappyBlockAsm12B + LEAL 1(CX), SI + MOVL 12(SP), BX + MOVL SI, DI + SUBL 16(SP), DI JZ repeat_extend_back_end_encodeSnappyBlockAsm12B repeat_extend_back_loop_encodeSnappyBlockAsm12B: - CMPL DI, SI + CMPL SI, BX JLE repeat_extend_back_end_encodeSnappyBlockAsm12B - MOVB -1(DX)(R8*1), BL - MOVB -1(DX)(DI*1), R9 - CMPB BL, R9 + MOVB -1(DX)(DI*1), R8 + MOVB -1(DX)(SI*1), R9 + CMPB R8, R9 JNE repeat_extend_back_end_encodeSnappyBlockAsm12B - LEAL -1(DI), DI - DECL R8 + LEAL -1(SI), SI + DECL DI JNZ repeat_extend_back_loop_encodeSnappyBlockAsm12B repeat_extend_back_end_encodeSnappyBlockAsm12B: - MOVL 12(SP), SI - CMPL SI, DI + MOVL 12(SP), BX + CMPL BX, SI JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm12B - MOVL DI, R8 - MOVL DI, 12(SP) - LEAQ (DX)(SI*1), R9 - SUBL SI, R8 - LEAL -1(R8), SI - CMPL SI, $0x3c + MOVL SI, DI + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R8 + SUBL BX, DI + LEAL -1(DI), BX + CMPL BX, $0x3c JLT one_byte_repeat_emit_encodeSnappyBlockAsm12B - CMPL SI, $0x00000100 + CMPL BX, $0x00000100 JLT two_bytes_repeat_emit_encodeSnappyBlockAsm12B MOVB $0xf4, (AX) - MOVW SI, 1(AX) + MOVW BX, 1(AX) ADDQ $0x03, AX JMP memmove_long_repeat_emit_encodeSnappyBlockAsm12B two_bytes_repeat_emit_encodeSnappyBlockAsm12B: MOVB $0xf0, (AX) - MOVB SI, 1(AX) + MOVB BL, 1(AX) ADDQ $0x02, AX - CMPL SI, $0x40 + CMPL BX, $0x40 JL memmove_repeat_emit_encodeSnappyBlockAsm12B JMP memmove_long_repeat_emit_encodeSnappyBlockAsm12B one_byte_repeat_emit_encodeSnappyBlockAsm12B: - SHLB $0x02, SI - MOVB SI, (AX) + SHLB $0x02, BL + MOVB BL, (AX) ADDQ $0x01, AX memmove_repeat_emit_encodeSnappyBlockAsm12B: - LEAQ (AX)(R8*1), SI + LEAQ (AX)(DI*1), BX // genMemMoveShort - CMPQ R8, $0x08 + CMPQ DI, $0x08 JLE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8 - CMPQ R8, $0x10 + CMPQ DI, $0x10 JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8through16 - CMPQ R8, $0x20 + CMPQ DI, $0x20 JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_17through32 JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_33through64 emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8: - MOVQ (R9), R10 - MOVQ R10, (AX) + MOVQ (R8), R9 + MOVQ R9, (AX) JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8through16: - MOVQ (R9), R10 - MOVQ -8(R9)(R8*1), R9 - MOVQ R10, (AX) - MOVQ R9, -8(AX)(R8*1) + MOVQ (R8), R9 + MOVQ -8(R8)(DI*1), R8 + MOVQ R9, (AX) + MOVQ R8, -8(AX)(DI*1) JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_17through32: - MOVOU (R9), X0 - MOVOU -16(R9)(R8*1), X1 + MOVOU (R8), X0 + MOVOU -16(R8)(DI*1), X1 MOVOU X0, (AX) - MOVOU X1, -16(AX)(R8*1) + MOVOU X1, -16(AX)(DI*1) JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_33through64: - MOVOU (R9), X0 - MOVOU 16(R9), X1 - MOVOU -32(R9)(R8*1), X2 - MOVOU -16(R9)(R8*1), X3 + MOVOU (R8), X0 + MOVOU 16(R8), X1 + MOVOU -32(R8)(DI*1), X2 + MOVOU -16(R8)(DI*1), X3 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R8*1) - MOVOU X3, -16(AX)(R8*1) + MOVOU X2, -32(AX)(DI*1) + MOVOU X3, -16(AX)(DI*1) memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B: - MOVQ SI, AX + MOVQ BX, AX JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm12B memmove_long_repeat_emit_encodeSnappyBlockAsm12B: - LEAQ (AX)(R8*1), SI + LEAQ (AX)(DI*1), BX // genMemMoveLong - MOVOU (R9), X0 - MOVOU 16(R9), X1 - MOVOU -32(R9)(R8*1), X2 - MOVOU -16(R9)(R8*1), X3 - MOVQ R8, R11 - SHRQ $0x05, R11 - MOVQ AX, R10 - ANDL $0x0000001f, R10 - MOVQ $0x00000040, R12 - SUBQ R10, R12 - DECQ R11 + MOVOU (R8), X0 + MOVOU 16(R8), X1 + MOVOU -32(R8)(DI*1), X2 + MOVOU -16(R8)(DI*1), X3 + MOVQ DI, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R11 + SUBQ R9, R11 + DECQ R10 JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32 - LEAQ -32(R9)(R12*1), R10 - LEAQ -32(AX)(R12*1), R13 + LEAQ -32(R8)(R11*1), R9 + LEAQ -32(AX)(R11*1), R12 emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_big_loop_back: - MOVOU (R10), X4 - MOVOU 16(R10), X5 - MOVOA X4, (R13) - MOVOA X5, 16(R13) - ADDQ $0x20, R13 - ADDQ $0x20, R10 + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R12) + MOVOA X5, 16(R12) ADDQ $0x20, R12 - DECQ R11 + ADDQ $0x20, R9 + ADDQ $0x20, R11 + DECQ R10 JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_big_loop_back emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32: - MOVOU -32(R9)(R12*1), X4 - MOVOU -16(R9)(R12*1), X5 - MOVOA X4, -32(AX)(R12*1) - MOVOA X5, -16(AX)(R12*1) - ADDQ $0x20, R12 - CMPQ R8, R12 + MOVOU -32(R8)(R11*1), X4 + MOVOU -16(R8)(R11*1), X5 + MOVOA X4, -32(AX)(R11*1) + MOVOA X5, -16(AX)(R11*1) + ADDQ $0x20, R11 + CMPQ DI, R11 JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R8*1) - MOVOU X3, -16(AX)(R8*1) - MOVQ SI, AX + MOVOU X2, -32(AX)(DI*1) + MOVOU X3, -16(AX)(DI*1) + MOVQ BX, AX emit_literal_done_repeat_emit_encodeSnappyBlockAsm12B: ADDL $0x05, CX - MOVL CX, SI - SUBL 16(SP), SI - MOVQ src_len+32(FP), R8 - SUBL CX, R8 - LEAQ (DX)(CX*1), R9 - LEAQ (DX)(SI*1), SI + MOVL CX, BX + SUBL 16(SP), BX + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), BX // matchLen - XORL R11, R11 - CMPL R8, $0x08 + XORL R10, R10 + CMPL DI, $0x08 JL matchlen_match4_repeat_extend_encodeSnappyBlockAsm12B matchlen_loopback_repeat_extend_encodeSnappyBlockAsm12B: - MOVQ (R9)(R11*1), R10 - XORQ (SI)(R11*1), R10 - TESTQ R10, R10 + MOVQ (R8)(R10*1), R9 + XORQ (BX)(R10*1), R9 + TESTQ R9, R9 JZ matchlen_loop_repeat_extend_encodeSnappyBlockAsm12B #ifdef GOAMD64_v3 - TZCNTQ R10, R10 + TZCNTQ R9, R9 #else - BSFQ R10, R10 + BSFQ R9, R9 #endif - SARQ $0x03, R10 - LEAL (R11)(R10*1), R11 + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 JMP repeat_extend_forward_end_encodeSnappyBlockAsm12B matchlen_loop_repeat_extend_encodeSnappyBlockAsm12B: - LEAL -8(R8), R8 - LEAL 8(R11), R11 - CMPL R8, $0x08 + LEAL -8(DI), DI + LEAL 8(R10), R10 + CMPL DI, $0x08 JGE matchlen_loopback_repeat_extend_encodeSnappyBlockAsm12B JZ repeat_extend_forward_end_encodeSnappyBlockAsm12B matchlen_match4_repeat_extend_encodeSnappyBlockAsm12B: - CMPL R8, $0x04 + CMPL DI, $0x04 JL matchlen_match2_repeat_extend_encodeSnappyBlockAsm12B - MOVL (R9)(R11*1), R10 - CMPL (SI)(R11*1), R10 + MOVL (R8)(R10*1), R9 + CMPL (BX)(R10*1), R9 JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm12B - SUBL $0x04, R8 - LEAL 4(R11), R11 + SUBL $0x04, DI + LEAL 4(R10), R10 matchlen_match2_repeat_extend_encodeSnappyBlockAsm12B: - CMPL R8, $0x02 + CMPL DI, $0x02 JL matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B - MOVW (R9)(R11*1), R10 - CMPW (SI)(R11*1), R10 + MOVW (R8)(R10*1), R9 + CMPW (BX)(R10*1), R9 JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B - SUBL $0x02, R8 - LEAL 2(R11), R11 + SUBL $0x02, DI + LEAL 2(R10), R10 matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B: - CMPL R8, $0x01 + CMPL DI, $0x01 JL repeat_extend_forward_end_encodeSnappyBlockAsm12B - MOVB (R9)(R11*1), R10 - CMPB (SI)(R11*1), R10 + MOVB (R8)(R10*1), R9 + CMPB (BX)(R10*1), R9 JNE repeat_extend_forward_end_encodeSnappyBlockAsm12B - LEAL 1(R11), R11 + LEAL 1(R10), R10 repeat_extend_forward_end_encodeSnappyBlockAsm12B: - ADDL R11, CX - MOVL CX, SI - SUBL DI, SI - MOVL 16(SP), DI + ADDL R10, CX + MOVL CX, BX + SUBL SI, BX + MOVL 16(SP), SI // emitCopy two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm12B: - CMPL SI, $0x40 + CMPL BX, $0x40 JLE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm12B MOVB $0xee, (AX) - MOVW DI, 1(AX) - LEAL -60(SI), SI + MOVW SI, 1(AX) + LEAL -60(BX), BX ADDQ $0x03, AX JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm12B two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm12B: - CMPL SI, $0x0c + MOVL BX, DI + SHLL $0x02, DI + CMPL BX, $0x0c JGE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm12B - CMPL DI, $0x00000800 + CMPL SI, $0x00000800 JGE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm12B - MOVB $0x01, BL - LEAL -16(BX)(SI*4), SI - MOVB DI, 1(AX) - SHRL $0x08, DI - SHLL $0x05, DI - ORL DI, SI - MOVB SI, (AX) + LEAL -15(DI), DI + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (AX) ADDQ $0x02, AX JMP repeat_end_emit_encodeSnappyBlockAsm12B emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm12B: - MOVB $0x02, BL - LEAL -4(BX)(SI*4), SI - MOVB SI, (AX) - MOVW DI, 1(AX) + LEAL -2(DI), DI + MOVB DI, (AX) + MOVW SI, 1(AX) ADDQ $0x03, AX repeat_end_emit_encodeSnappyBlockAsm12B: @@ -12277,16 +12258,16 @@ repeat_end_emit_encodeSnappyBlockAsm12B: JMP search_loop_encodeSnappyBlockAsm12B no_repeat_found_encodeSnappyBlockAsm12B: - CMPL (DX)(SI*1), DI + CMPL (DX)(BX*1), SI JEQ candidate_match_encodeSnappyBlockAsm12B - SHRQ $0x08, DI - MOVL 24(SP)(R10*4), SI - LEAL 2(CX), R9 - CMPL (DX)(R8*1), DI + SHRQ $0x08, SI + MOVL 24(SP)(R9*4), BX + LEAL 2(CX), R8 + CMPL (DX)(DI*1), SI JEQ candidate2_match_encodeSnappyBlockAsm12B - MOVL R9, 24(SP)(R10*4) - SHRQ $0x08, DI - CMPL (DX)(SI*1), DI + MOVL R8, 24(SP)(R9*4) + SHRQ $0x08, SI + CMPL (DX)(BX*1), SI JEQ candidate3_match_encodeSnappyBlockAsm12B MOVL 20(SP), CX JMP search_loop_encodeSnappyBlockAsm12B @@ -12296,288 +12277,288 @@ candidate3_match_encodeSnappyBlockAsm12B: JMP candidate_match_encodeSnappyBlockAsm12B candidate2_match_encodeSnappyBlockAsm12B: - MOVL R9, 24(SP)(R10*4) + MOVL R8, 24(SP)(R9*4) INCL CX - MOVL R8, SI + MOVL DI, BX candidate_match_encodeSnappyBlockAsm12B: - MOVL 12(SP), DI - TESTL SI, SI + MOVL 12(SP), SI + TESTL BX, BX JZ match_extend_back_end_encodeSnappyBlockAsm12B match_extend_back_loop_encodeSnappyBlockAsm12B: - CMPL CX, DI + CMPL CX, SI JLE match_extend_back_end_encodeSnappyBlockAsm12B - MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(BX*1), DI MOVB -1(DX)(CX*1), R8 - CMPB BL, R8 + CMPB DI, R8 JNE match_extend_back_end_encodeSnappyBlockAsm12B LEAL -1(CX), CX - DECL SI + DECL BX JZ match_extend_back_end_encodeSnappyBlockAsm12B JMP match_extend_back_loop_encodeSnappyBlockAsm12B match_extend_back_end_encodeSnappyBlockAsm12B: - MOVL CX, DI - SUBL 12(SP), DI - LEAQ 3(AX)(DI*1), DI - CMPQ DI, (SP) + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 3(AX)(SI*1), SI + CMPQ SI, (SP) JL match_dst_size_check_encodeSnappyBlockAsm12B MOVQ $0x00000000, ret+48(FP) RET match_dst_size_check_encodeSnappyBlockAsm12B: - MOVL CX, DI - MOVL 12(SP), R8 - CMPL R8, DI + MOVL CX, SI + MOVL 12(SP), DI + CMPL DI, SI JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm12B - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (DX)(R8*1), DI - SUBL R8, R9 - LEAL -1(R9), R8 - CMPL R8, $0x3c + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(DI*1), SI + SUBL DI, R8 + LEAL -1(R8), DI + CMPL DI, $0x3c JLT one_byte_match_emit_encodeSnappyBlockAsm12B - CMPL R8, $0x00000100 + CMPL DI, $0x00000100 JLT two_bytes_match_emit_encodeSnappyBlockAsm12B MOVB $0xf4, (AX) - MOVW R8, 1(AX) + MOVW DI, 1(AX) ADDQ $0x03, AX JMP memmove_long_match_emit_encodeSnappyBlockAsm12B two_bytes_match_emit_encodeSnappyBlockAsm12B: MOVB $0xf0, (AX) - MOVB R8, 1(AX) + MOVB DI, 1(AX) ADDQ $0x02, AX - CMPL R8, $0x40 + CMPL DI, $0x40 JL memmove_match_emit_encodeSnappyBlockAsm12B JMP memmove_long_match_emit_encodeSnappyBlockAsm12B one_byte_match_emit_encodeSnappyBlockAsm12B: - SHLB $0x02, R8 - MOVB R8, (AX) + SHLB $0x02, DI + MOVB DI, (AX) ADDQ $0x01, AX memmove_match_emit_encodeSnappyBlockAsm12B: - LEAQ (AX)(R9*1), R8 + LEAQ (AX)(R8*1), DI // genMemMoveShort - CMPQ R9, $0x08 + CMPQ R8, $0x08 JLE emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8 - CMPQ R9, $0x10 + CMPQ R8, $0x10 JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8through16 - CMPQ R9, $0x20 + CMPQ R8, $0x20 JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_17through32 JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_33through64 emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8: - MOVQ (DI), R10 - MOVQ R10, (AX) + MOVQ (SI), R9 + MOVQ R9, (AX) JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm12B emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8through16: - MOVQ (DI), R10 - MOVQ -8(DI)(R9*1), DI - MOVQ R10, (AX) - MOVQ DI, -8(AX)(R9*1) + MOVQ (SI), R9 + MOVQ -8(SI)(R8*1), SI + MOVQ R9, (AX) + MOVQ SI, -8(AX)(R8*1) JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm12B emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_17through32: - MOVOU (DI), X0 - MOVOU -16(DI)(R9*1), X1 + MOVOU (SI), X0 + MOVOU -16(SI)(R8*1), X1 MOVOU X0, (AX) - MOVOU X1, -16(AX)(R9*1) + MOVOU X1, -16(AX)(R8*1) JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm12B emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_33through64: - MOVOU (DI), X0 - MOVOU 16(DI), X1 - MOVOU -32(DI)(R9*1), X2 - MOVOU -16(DI)(R9*1), X3 + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) memmove_end_copy_match_emit_encodeSnappyBlockAsm12B: - MOVQ R8, AX + MOVQ DI, AX JMP emit_literal_done_match_emit_encodeSnappyBlockAsm12B memmove_long_match_emit_encodeSnappyBlockAsm12B: - LEAQ (AX)(R9*1), R8 + LEAQ (AX)(R8*1), DI // genMemMoveLong - MOVOU (DI), X0 - MOVOU 16(DI), X1 - MOVOU -32(DI)(R9*1), X2 - MOVOU -16(DI)(R9*1), X3 - MOVQ R9, R11 - SHRQ $0x05, R11 - MOVQ AX, R10 - ANDL $0x0000001f, R10 - MOVQ $0x00000040, R12 - SUBQ R10, R12 - DECQ R11 + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVQ R8, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R11 + SUBQ R9, R11 + DECQ R10 JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32 - LEAQ -32(DI)(R12*1), R10 - LEAQ -32(AX)(R12*1), R13 + LEAQ -32(SI)(R11*1), R9 + LEAQ -32(AX)(R11*1), R12 emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_big_loop_back: - MOVOU (R10), X4 - MOVOU 16(R10), X5 - MOVOA X4, (R13) - MOVOA X5, 16(R13) - ADDQ $0x20, R13 - ADDQ $0x20, R10 + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R12) + MOVOA X5, 16(R12) ADDQ $0x20, R12 - DECQ R11 + ADDQ $0x20, R9 + ADDQ $0x20, R11 + DECQ R10 JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_big_loop_back emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32: - MOVOU -32(DI)(R12*1), X4 - MOVOU -16(DI)(R12*1), X5 - MOVOA X4, -32(AX)(R12*1) - MOVOA X5, -16(AX)(R12*1) - ADDQ $0x20, R12 - CMPQ R9, R12 + MOVOU -32(SI)(R11*1), X4 + MOVOU -16(SI)(R11*1), X5 + MOVOA X4, -32(AX)(R11*1) + MOVOA X5, -16(AX)(R11*1) + ADDQ $0x20, R11 + CMPQ R8, R11 JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) - MOVQ R8, AX + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ DI, AX emit_literal_done_match_emit_encodeSnappyBlockAsm12B: match_nolit_loop_encodeSnappyBlockAsm12B: - MOVL CX, DI - SUBL SI, DI - MOVL DI, 16(SP) + MOVL CX, SI + SUBL BX, SI + MOVL SI, 16(SP) ADDL $0x04, CX - ADDL $0x04, SI - MOVQ src_len+32(FP), DI - SUBL CX, DI - LEAQ (DX)(CX*1), R8 - LEAQ (DX)(SI*1), SI + ADDL $0x04, BX + MOVQ src_len+32(FP), SI + SUBL CX, SI + LEAQ (DX)(CX*1), DI + LEAQ (DX)(BX*1), BX // matchLen - XORL R10, R10 - CMPL DI, $0x08 + XORL R9, R9 + CMPL SI, $0x08 JL matchlen_match4_match_nolit_encodeSnappyBlockAsm12B matchlen_loopback_match_nolit_encodeSnappyBlockAsm12B: - MOVQ (R8)(R10*1), R9 - XORQ (SI)(R10*1), R9 - TESTQ R9, R9 + MOVQ (DI)(R9*1), R8 + XORQ (BX)(R9*1), R8 + TESTQ R8, R8 JZ matchlen_loop_match_nolit_encodeSnappyBlockAsm12B #ifdef GOAMD64_v3 - TZCNTQ R9, R9 + TZCNTQ R8, R8 #else - BSFQ R9, R9 + BSFQ R8, R8 #endif - SARQ $0x03, R9 - LEAL (R10)(R9*1), R10 + SARQ $0x03, R8 + LEAL (R9)(R8*1), R9 JMP match_nolit_end_encodeSnappyBlockAsm12B matchlen_loop_match_nolit_encodeSnappyBlockAsm12B: - LEAL -8(DI), DI - LEAL 8(R10), R10 - CMPL DI, $0x08 + LEAL -8(SI), SI + LEAL 8(R9), R9 + CMPL SI, $0x08 JGE matchlen_loopback_match_nolit_encodeSnappyBlockAsm12B JZ match_nolit_end_encodeSnappyBlockAsm12B matchlen_match4_match_nolit_encodeSnappyBlockAsm12B: - CMPL DI, $0x04 + CMPL SI, $0x04 JL matchlen_match2_match_nolit_encodeSnappyBlockAsm12B - MOVL (R8)(R10*1), R9 - CMPL (SI)(R10*1), R9 + MOVL (DI)(R9*1), R8 + CMPL (BX)(R9*1), R8 JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm12B - SUBL $0x04, DI - LEAL 4(R10), R10 + SUBL $0x04, SI + LEAL 4(R9), R9 matchlen_match2_match_nolit_encodeSnappyBlockAsm12B: - CMPL DI, $0x02 + CMPL SI, $0x02 JL matchlen_match1_match_nolit_encodeSnappyBlockAsm12B - MOVW (R8)(R10*1), R9 - CMPW (SI)(R10*1), R9 + MOVW (DI)(R9*1), R8 + CMPW (BX)(R9*1), R8 JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm12B - SUBL $0x02, DI - LEAL 2(R10), R10 + SUBL $0x02, SI + LEAL 2(R9), R9 matchlen_match1_match_nolit_encodeSnappyBlockAsm12B: - CMPL DI, $0x01 + CMPL SI, $0x01 JL match_nolit_end_encodeSnappyBlockAsm12B - MOVB (R8)(R10*1), R9 - CMPB (SI)(R10*1), R9 + MOVB (DI)(R9*1), R8 + CMPB (BX)(R9*1), R8 JNE match_nolit_end_encodeSnappyBlockAsm12B - LEAL 1(R10), R10 + LEAL 1(R9), R9 match_nolit_end_encodeSnappyBlockAsm12B: - ADDL R10, CX - MOVL 16(SP), SI - ADDL $0x04, R10 + ADDL R9, CX + MOVL 16(SP), BX + ADDL $0x04, R9 MOVL CX, 12(SP) // emitCopy two_byte_offset_match_nolit_encodeSnappyBlockAsm12B: - CMPL R10, $0x40 + CMPL R9, $0x40 JLE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm12B MOVB $0xee, (AX) - MOVW SI, 1(AX) - LEAL -60(R10), R10 + MOVW BX, 1(AX) + LEAL -60(R9), R9 ADDQ $0x03, AX JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm12B two_byte_offset_short_match_nolit_encodeSnappyBlockAsm12B: - CMPL R10, $0x0c + MOVL R9, SI + SHLL $0x02, SI + CMPL R9, $0x0c JGE emit_copy_three_match_nolit_encodeSnappyBlockAsm12B - CMPL SI, $0x00000800 + CMPL BX, $0x00000800 JGE emit_copy_three_match_nolit_encodeSnappyBlockAsm12B - MOVB $0x01, BL - LEAL -16(BX)(R10*4), R10 - MOVB SI, 1(AX) - SHRL $0x08, SI - SHLL $0x05, SI - ORL SI, R10 - MOVB R10, (AX) + LEAL -15(SI), SI + MOVB BL, 1(AX) + SHRL $0x08, BX + SHLL $0x05, BX + ORL BX, SI + MOVB SI, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm12B emit_copy_three_match_nolit_encodeSnappyBlockAsm12B: - MOVB $0x02, BL - LEAL -4(BX)(R10*4), R10 - MOVB R10, (AX) - MOVW SI, 1(AX) + LEAL -2(SI), SI + MOVB SI, (AX) + MOVW BX, 1(AX) ADDQ $0x03, AX match_nolit_emitcopy_end_encodeSnappyBlockAsm12B: CMPL CX, 8(SP) JGE emit_remainder_encodeSnappyBlockAsm12B - MOVQ -2(DX)(CX*1), DI + MOVQ -2(DX)(CX*1), SI CMPQ AX, (SP) JL match_nolit_dst_ok_encodeSnappyBlockAsm12B MOVQ $0x00000000, ret+48(FP) RET match_nolit_dst_ok_encodeSnappyBlockAsm12B: - MOVQ $0x000000cf1bbcdcbb, R9 - MOVQ DI, R8 - SHRQ $0x10, DI - MOVQ DI, SI - SHLQ $0x18, R8 - IMULQ R9, R8 - SHRQ $0x34, R8 - SHLQ $0x18, SI - IMULQ R9, SI - SHRQ $0x34, SI - LEAL -2(CX), R9 - LEAQ 24(SP)(SI*4), R10 - MOVL (R10), SI - MOVL R9, 24(SP)(R8*4) - MOVL CX, (R10) - CMPL (DX)(SI*1), DI + MOVQ $0x000000cf1bbcdcbb, R8 + MOVQ SI, DI + SHRQ $0x10, SI + MOVQ SI, BX + SHLQ $0x18, DI + IMULQ R8, DI + SHRQ $0x34, DI + SHLQ $0x18, BX + IMULQ R8, BX + SHRQ $0x34, BX + LEAL -2(CX), R8 + LEAQ 24(SP)(BX*4), R9 + MOVL (R9), BX + MOVL R8, 24(SP)(DI*4) + MOVL CX, (R9) + CMPL (DX)(BX*1), SI JEQ match_nolit_loop_encodeSnappyBlockAsm12B INCL CX JMP search_loop_encodeSnappyBlockAsm12B @@ -12762,8 +12743,8 @@ zero_loop_encodeSnappyBlockAsm10B: MOVL $0x00000000, 12(SP) MOVQ src_len+32(FP), CX LEAQ -9(CX), DX - LEAQ -8(CX), SI - MOVL SI, 8(SP) + LEAQ -8(CX), BX + MOVL BX, 8(SP) SHRQ $0x05, CX SUBL CX, DX LEAQ (AX)(DX*1), DX @@ -12773,278 +12754,278 @@ zero_loop_encodeSnappyBlockAsm10B: MOVQ src_base+24(FP), DX search_loop_encodeSnappyBlockAsm10B: - MOVL CX, SI - SUBL 12(SP), SI - SHRL $0x05, SI - LEAL 4(CX)(SI*1), SI - CMPL SI, 8(SP) + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x05, BX + LEAL 4(CX)(BX*1), BX + CMPL BX, 8(SP) JGE emit_remainder_encodeSnappyBlockAsm10B - MOVQ (DX)(CX*1), DI - MOVL SI, 20(SP) - MOVQ $0x9e3779b1, R9 - MOVQ DI, R10 - MOVQ DI, R11 - SHRQ $0x08, R11 - SHLQ $0x20, R10 - IMULQ R9, R10 - SHRQ $0x36, R10 - SHLQ $0x20, R11 - IMULQ R9, R11 - SHRQ $0x36, R11 - MOVL 24(SP)(R10*4), SI - MOVL 24(SP)(R11*4), R8 - MOVL CX, 24(SP)(R10*4) - LEAL 1(CX), R10 - MOVL R10, 24(SP)(R11*4) - MOVQ DI, R10 - SHRQ $0x10, R10 + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x9e3779b1, R8 + MOVQ SI, R9 + MOVQ SI, R10 + SHRQ $0x08, R10 + SHLQ $0x20, R9 + IMULQ R8, R9 + SHRQ $0x36, R9 SHLQ $0x20, R10 - IMULQ R9, R10 + IMULQ R8, R10 SHRQ $0x36, R10 - MOVL CX, R9 - SUBL 16(SP), R9 - MOVL 1(DX)(R9*1), R11 - MOVQ DI, R9 - SHRQ $0x08, R9 - CMPL R9, R11 - JNE no_repeat_found_encodeSnappyBlockAsm10B - LEAL 1(CX), DI - MOVL 12(SP), SI - MOVL DI, R8 + MOVL 24(SP)(R9*4), BX + MOVL 24(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + LEAL 1(CX), R9 + MOVL R9, 24(SP)(R10*4) + MOVQ SI, R9 + SHRQ $0x10, R9 + SHLQ $0x20, R9 + IMULQ R8, R9 + SHRQ $0x36, R9 + MOVL CX, R8 SUBL 16(SP), R8 + MOVL 1(DX)(R8*1), R10 + MOVQ SI, R8 + SHRQ $0x08, R8 + CMPL R8, R10 + JNE no_repeat_found_encodeSnappyBlockAsm10B + LEAL 1(CX), SI + MOVL 12(SP), BX + MOVL SI, DI + SUBL 16(SP), DI JZ repeat_extend_back_end_encodeSnappyBlockAsm10B repeat_extend_back_loop_encodeSnappyBlockAsm10B: - CMPL DI, SI + CMPL SI, BX JLE repeat_extend_back_end_encodeSnappyBlockAsm10B - MOVB -1(DX)(R8*1), BL - MOVB -1(DX)(DI*1), R9 - CMPB BL, R9 + MOVB -1(DX)(DI*1), R8 + MOVB -1(DX)(SI*1), R9 + CMPB R8, R9 JNE repeat_extend_back_end_encodeSnappyBlockAsm10B - LEAL -1(DI), DI - DECL R8 + LEAL -1(SI), SI + DECL DI JNZ repeat_extend_back_loop_encodeSnappyBlockAsm10B repeat_extend_back_end_encodeSnappyBlockAsm10B: - MOVL 12(SP), SI - CMPL SI, DI + MOVL 12(SP), BX + CMPL BX, SI JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm10B - MOVL DI, R8 - MOVL DI, 12(SP) - LEAQ (DX)(SI*1), R9 - SUBL SI, R8 - LEAL -1(R8), SI - CMPL SI, $0x3c + MOVL SI, DI + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R8 + SUBL BX, DI + LEAL -1(DI), BX + CMPL BX, $0x3c JLT one_byte_repeat_emit_encodeSnappyBlockAsm10B - CMPL SI, $0x00000100 + CMPL BX, $0x00000100 JLT two_bytes_repeat_emit_encodeSnappyBlockAsm10B MOVB $0xf4, (AX) - MOVW SI, 1(AX) + MOVW BX, 1(AX) ADDQ $0x03, AX JMP memmove_long_repeat_emit_encodeSnappyBlockAsm10B two_bytes_repeat_emit_encodeSnappyBlockAsm10B: MOVB $0xf0, (AX) - MOVB SI, 1(AX) + MOVB BL, 1(AX) ADDQ $0x02, AX - CMPL SI, $0x40 + CMPL BX, $0x40 JL memmove_repeat_emit_encodeSnappyBlockAsm10B JMP memmove_long_repeat_emit_encodeSnappyBlockAsm10B one_byte_repeat_emit_encodeSnappyBlockAsm10B: - SHLB $0x02, SI - MOVB SI, (AX) + SHLB $0x02, BL + MOVB BL, (AX) ADDQ $0x01, AX memmove_repeat_emit_encodeSnappyBlockAsm10B: - LEAQ (AX)(R8*1), SI + LEAQ (AX)(DI*1), BX // genMemMoveShort - CMPQ R8, $0x08 + CMPQ DI, $0x08 JLE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8 - CMPQ R8, $0x10 + CMPQ DI, $0x10 JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8through16 - CMPQ R8, $0x20 + CMPQ DI, $0x20 JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_17through32 JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_33through64 emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8: - MOVQ (R9), R10 - MOVQ R10, (AX) + MOVQ (R8), R9 + MOVQ R9, (AX) JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8through16: - MOVQ (R9), R10 - MOVQ -8(R9)(R8*1), R9 - MOVQ R10, (AX) - MOVQ R9, -8(AX)(R8*1) + MOVQ (R8), R9 + MOVQ -8(R8)(DI*1), R8 + MOVQ R9, (AX) + MOVQ R8, -8(AX)(DI*1) JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_17through32: - MOVOU (R9), X0 - MOVOU -16(R9)(R8*1), X1 + MOVOU (R8), X0 + MOVOU -16(R8)(DI*1), X1 MOVOU X0, (AX) - MOVOU X1, -16(AX)(R8*1) + MOVOU X1, -16(AX)(DI*1) JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_33through64: - MOVOU (R9), X0 - MOVOU 16(R9), X1 - MOVOU -32(R9)(R8*1), X2 - MOVOU -16(R9)(R8*1), X3 + MOVOU (R8), X0 + MOVOU 16(R8), X1 + MOVOU -32(R8)(DI*1), X2 + MOVOU -16(R8)(DI*1), X3 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R8*1) - MOVOU X3, -16(AX)(R8*1) + MOVOU X2, -32(AX)(DI*1) + MOVOU X3, -16(AX)(DI*1) memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B: - MOVQ SI, AX + MOVQ BX, AX JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm10B memmove_long_repeat_emit_encodeSnappyBlockAsm10B: - LEAQ (AX)(R8*1), SI + LEAQ (AX)(DI*1), BX // genMemMoveLong - MOVOU (R9), X0 - MOVOU 16(R9), X1 - MOVOU -32(R9)(R8*1), X2 - MOVOU -16(R9)(R8*1), X3 - MOVQ R8, R11 - SHRQ $0x05, R11 - MOVQ AX, R10 - ANDL $0x0000001f, R10 - MOVQ $0x00000040, R12 - SUBQ R10, R12 - DECQ R11 + MOVOU (R8), X0 + MOVOU 16(R8), X1 + MOVOU -32(R8)(DI*1), X2 + MOVOU -16(R8)(DI*1), X3 + MOVQ DI, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R11 + SUBQ R9, R11 + DECQ R10 JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32 - LEAQ -32(R9)(R12*1), R10 - LEAQ -32(AX)(R12*1), R13 + LEAQ -32(R8)(R11*1), R9 + LEAQ -32(AX)(R11*1), R12 emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_big_loop_back: - MOVOU (R10), X4 - MOVOU 16(R10), X5 - MOVOA X4, (R13) - MOVOA X5, 16(R13) - ADDQ $0x20, R13 - ADDQ $0x20, R10 + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R12) + MOVOA X5, 16(R12) ADDQ $0x20, R12 - DECQ R11 + ADDQ $0x20, R9 + ADDQ $0x20, R11 + DECQ R10 JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_big_loop_back emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32: - MOVOU -32(R9)(R12*1), X4 - MOVOU -16(R9)(R12*1), X5 - MOVOA X4, -32(AX)(R12*1) - MOVOA X5, -16(AX)(R12*1) - ADDQ $0x20, R12 - CMPQ R8, R12 + MOVOU -32(R8)(R11*1), X4 + MOVOU -16(R8)(R11*1), X5 + MOVOA X4, -32(AX)(R11*1) + MOVOA X5, -16(AX)(R11*1) + ADDQ $0x20, R11 + CMPQ DI, R11 JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R8*1) - MOVOU X3, -16(AX)(R8*1) - MOVQ SI, AX + MOVOU X2, -32(AX)(DI*1) + MOVOU X3, -16(AX)(DI*1) + MOVQ BX, AX emit_literal_done_repeat_emit_encodeSnappyBlockAsm10B: ADDL $0x05, CX - MOVL CX, SI - SUBL 16(SP), SI - MOVQ src_len+32(FP), R8 - SUBL CX, R8 - LEAQ (DX)(CX*1), R9 - LEAQ (DX)(SI*1), SI + MOVL CX, BX + SUBL 16(SP), BX + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), BX // matchLen - XORL R11, R11 - CMPL R8, $0x08 + XORL R10, R10 + CMPL DI, $0x08 JL matchlen_match4_repeat_extend_encodeSnappyBlockAsm10B matchlen_loopback_repeat_extend_encodeSnappyBlockAsm10B: - MOVQ (R9)(R11*1), R10 - XORQ (SI)(R11*1), R10 - TESTQ R10, R10 + MOVQ (R8)(R10*1), R9 + XORQ (BX)(R10*1), R9 + TESTQ R9, R9 JZ matchlen_loop_repeat_extend_encodeSnappyBlockAsm10B #ifdef GOAMD64_v3 - TZCNTQ R10, R10 + TZCNTQ R9, R9 #else - BSFQ R10, R10 + BSFQ R9, R9 #endif - SARQ $0x03, R10 - LEAL (R11)(R10*1), R11 + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 JMP repeat_extend_forward_end_encodeSnappyBlockAsm10B matchlen_loop_repeat_extend_encodeSnappyBlockAsm10B: - LEAL -8(R8), R8 - LEAL 8(R11), R11 - CMPL R8, $0x08 + LEAL -8(DI), DI + LEAL 8(R10), R10 + CMPL DI, $0x08 JGE matchlen_loopback_repeat_extend_encodeSnappyBlockAsm10B JZ repeat_extend_forward_end_encodeSnappyBlockAsm10B matchlen_match4_repeat_extend_encodeSnappyBlockAsm10B: - CMPL R8, $0x04 + CMPL DI, $0x04 JL matchlen_match2_repeat_extend_encodeSnappyBlockAsm10B - MOVL (R9)(R11*1), R10 - CMPL (SI)(R11*1), R10 + MOVL (R8)(R10*1), R9 + CMPL (BX)(R10*1), R9 JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm10B - SUBL $0x04, R8 - LEAL 4(R11), R11 + SUBL $0x04, DI + LEAL 4(R10), R10 matchlen_match2_repeat_extend_encodeSnappyBlockAsm10B: - CMPL R8, $0x02 + CMPL DI, $0x02 JL matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B - MOVW (R9)(R11*1), R10 - CMPW (SI)(R11*1), R10 + MOVW (R8)(R10*1), R9 + CMPW (BX)(R10*1), R9 JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B - SUBL $0x02, R8 - LEAL 2(R11), R11 + SUBL $0x02, DI + LEAL 2(R10), R10 matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B: - CMPL R8, $0x01 + CMPL DI, $0x01 JL repeat_extend_forward_end_encodeSnappyBlockAsm10B - MOVB (R9)(R11*1), R10 - CMPB (SI)(R11*1), R10 + MOVB (R8)(R10*1), R9 + CMPB (BX)(R10*1), R9 JNE repeat_extend_forward_end_encodeSnappyBlockAsm10B - LEAL 1(R11), R11 + LEAL 1(R10), R10 repeat_extend_forward_end_encodeSnappyBlockAsm10B: - ADDL R11, CX - MOVL CX, SI - SUBL DI, SI - MOVL 16(SP), DI + ADDL R10, CX + MOVL CX, BX + SUBL SI, BX + MOVL 16(SP), SI // emitCopy two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm10B: - CMPL SI, $0x40 + CMPL BX, $0x40 JLE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm10B MOVB $0xee, (AX) - MOVW DI, 1(AX) - LEAL -60(SI), SI + MOVW SI, 1(AX) + LEAL -60(BX), BX ADDQ $0x03, AX JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm10B two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm10B: - CMPL SI, $0x0c + MOVL BX, DI + SHLL $0x02, DI + CMPL BX, $0x0c JGE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm10B - CMPL DI, $0x00000800 + CMPL SI, $0x00000800 JGE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm10B - MOVB $0x01, BL - LEAL -16(BX)(SI*4), SI - MOVB DI, 1(AX) - SHRL $0x08, DI - SHLL $0x05, DI - ORL DI, SI - MOVB SI, (AX) + LEAL -15(DI), DI + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (AX) ADDQ $0x02, AX JMP repeat_end_emit_encodeSnappyBlockAsm10B emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm10B: - MOVB $0x02, BL - LEAL -4(BX)(SI*4), SI - MOVB SI, (AX) - MOVW DI, 1(AX) + LEAL -2(DI), DI + MOVB DI, (AX) + MOVW SI, 1(AX) ADDQ $0x03, AX repeat_end_emit_encodeSnappyBlockAsm10B: @@ -13052,16 +13033,16 @@ repeat_end_emit_encodeSnappyBlockAsm10B: JMP search_loop_encodeSnappyBlockAsm10B no_repeat_found_encodeSnappyBlockAsm10B: - CMPL (DX)(SI*1), DI + CMPL (DX)(BX*1), SI JEQ candidate_match_encodeSnappyBlockAsm10B - SHRQ $0x08, DI - MOVL 24(SP)(R10*4), SI - LEAL 2(CX), R9 - CMPL (DX)(R8*1), DI + SHRQ $0x08, SI + MOVL 24(SP)(R9*4), BX + LEAL 2(CX), R8 + CMPL (DX)(DI*1), SI JEQ candidate2_match_encodeSnappyBlockAsm10B - MOVL R9, 24(SP)(R10*4) - SHRQ $0x08, DI - CMPL (DX)(SI*1), DI + MOVL R8, 24(SP)(R9*4) + SHRQ $0x08, SI + CMPL (DX)(BX*1), SI JEQ candidate3_match_encodeSnappyBlockAsm10B MOVL 20(SP), CX JMP search_loop_encodeSnappyBlockAsm10B @@ -13071,288 +13052,288 @@ candidate3_match_encodeSnappyBlockAsm10B: JMP candidate_match_encodeSnappyBlockAsm10B candidate2_match_encodeSnappyBlockAsm10B: - MOVL R9, 24(SP)(R10*4) + MOVL R8, 24(SP)(R9*4) INCL CX - MOVL R8, SI + MOVL DI, BX candidate_match_encodeSnappyBlockAsm10B: - MOVL 12(SP), DI - TESTL SI, SI + MOVL 12(SP), SI + TESTL BX, BX JZ match_extend_back_end_encodeSnappyBlockAsm10B match_extend_back_loop_encodeSnappyBlockAsm10B: - CMPL CX, DI + CMPL CX, SI JLE match_extend_back_end_encodeSnappyBlockAsm10B - MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(BX*1), DI MOVB -1(DX)(CX*1), R8 - CMPB BL, R8 + CMPB DI, R8 JNE match_extend_back_end_encodeSnappyBlockAsm10B LEAL -1(CX), CX - DECL SI + DECL BX JZ match_extend_back_end_encodeSnappyBlockAsm10B JMP match_extend_back_loop_encodeSnappyBlockAsm10B match_extend_back_end_encodeSnappyBlockAsm10B: - MOVL CX, DI - SUBL 12(SP), DI - LEAQ 3(AX)(DI*1), DI - CMPQ DI, (SP) + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 3(AX)(SI*1), SI + CMPQ SI, (SP) JL match_dst_size_check_encodeSnappyBlockAsm10B MOVQ $0x00000000, ret+48(FP) RET match_dst_size_check_encodeSnappyBlockAsm10B: - MOVL CX, DI - MOVL 12(SP), R8 - CMPL R8, DI + MOVL CX, SI + MOVL 12(SP), DI + CMPL DI, SI JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm10B - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (DX)(R8*1), DI - SUBL R8, R9 - LEAL -1(R9), R8 - CMPL R8, $0x3c + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(DI*1), SI + SUBL DI, R8 + LEAL -1(R8), DI + CMPL DI, $0x3c JLT one_byte_match_emit_encodeSnappyBlockAsm10B - CMPL R8, $0x00000100 + CMPL DI, $0x00000100 JLT two_bytes_match_emit_encodeSnappyBlockAsm10B MOVB $0xf4, (AX) - MOVW R8, 1(AX) + MOVW DI, 1(AX) ADDQ $0x03, AX JMP memmove_long_match_emit_encodeSnappyBlockAsm10B two_bytes_match_emit_encodeSnappyBlockAsm10B: MOVB $0xf0, (AX) - MOVB R8, 1(AX) + MOVB DI, 1(AX) ADDQ $0x02, AX - CMPL R8, $0x40 + CMPL DI, $0x40 JL memmove_match_emit_encodeSnappyBlockAsm10B JMP memmove_long_match_emit_encodeSnappyBlockAsm10B one_byte_match_emit_encodeSnappyBlockAsm10B: - SHLB $0x02, R8 - MOVB R8, (AX) + SHLB $0x02, DI + MOVB DI, (AX) ADDQ $0x01, AX memmove_match_emit_encodeSnappyBlockAsm10B: - LEAQ (AX)(R9*1), R8 + LEAQ (AX)(R8*1), DI // genMemMoveShort - CMPQ R9, $0x08 + CMPQ R8, $0x08 JLE emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8 - CMPQ R9, $0x10 + CMPQ R8, $0x10 JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8through16 - CMPQ R9, $0x20 + CMPQ R8, $0x20 JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_17through32 JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_33through64 emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8: - MOVQ (DI), R10 - MOVQ R10, (AX) + MOVQ (SI), R9 + MOVQ R9, (AX) JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm10B emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8through16: - MOVQ (DI), R10 - MOVQ -8(DI)(R9*1), DI - MOVQ R10, (AX) - MOVQ DI, -8(AX)(R9*1) + MOVQ (SI), R9 + MOVQ -8(SI)(R8*1), SI + MOVQ R9, (AX) + MOVQ SI, -8(AX)(R8*1) JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm10B emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_17through32: - MOVOU (DI), X0 - MOVOU -16(DI)(R9*1), X1 + MOVOU (SI), X0 + MOVOU -16(SI)(R8*1), X1 MOVOU X0, (AX) - MOVOU X1, -16(AX)(R9*1) + MOVOU X1, -16(AX)(R8*1) JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm10B emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_33through64: - MOVOU (DI), X0 - MOVOU 16(DI), X1 - MOVOU -32(DI)(R9*1), X2 - MOVOU -16(DI)(R9*1), X3 + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) memmove_end_copy_match_emit_encodeSnappyBlockAsm10B: - MOVQ R8, AX + MOVQ DI, AX JMP emit_literal_done_match_emit_encodeSnappyBlockAsm10B memmove_long_match_emit_encodeSnappyBlockAsm10B: - LEAQ (AX)(R9*1), R8 + LEAQ (AX)(R8*1), DI // genMemMoveLong - MOVOU (DI), X0 - MOVOU 16(DI), X1 - MOVOU -32(DI)(R9*1), X2 - MOVOU -16(DI)(R9*1), X3 - MOVQ R9, R11 - SHRQ $0x05, R11 - MOVQ AX, R10 - ANDL $0x0000001f, R10 - MOVQ $0x00000040, R12 - SUBQ R10, R12 - DECQ R11 + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVQ R8, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R11 + SUBQ R9, R11 + DECQ R10 JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32 - LEAQ -32(DI)(R12*1), R10 - LEAQ -32(AX)(R12*1), R13 + LEAQ -32(SI)(R11*1), R9 + LEAQ -32(AX)(R11*1), R12 emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_big_loop_back: - MOVOU (R10), X4 - MOVOU 16(R10), X5 - MOVOA X4, (R13) - MOVOA X5, 16(R13) - ADDQ $0x20, R13 - ADDQ $0x20, R10 + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R12) + MOVOA X5, 16(R12) ADDQ $0x20, R12 - DECQ R11 + ADDQ $0x20, R9 + ADDQ $0x20, R11 + DECQ R10 JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_big_loop_back emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32: - MOVOU -32(DI)(R12*1), X4 - MOVOU -16(DI)(R12*1), X5 - MOVOA X4, -32(AX)(R12*1) - MOVOA X5, -16(AX)(R12*1) - ADDQ $0x20, R12 - CMPQ R9, R12 + MOVOU -32(SI)(R11*1), X4 + MOVOU -16(SI)(R11*1), X5 + MOVOA X4, -32(AX)(R11*1) + MOVOA X5, -16(AX)(R11*1) + ADDQ $0x20, R11 + CMPQ R8, R11 JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) - MOVQ R8, AX + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ DI, AX emit_literal_done_match_emit_encodeSnappyBlockAsm10B: match_nolit_loop_encodeSnappyBlockAsm10B: - MOVL CX, DI - SUBL SI, DI - MOVL DI, 16(SP) + MOVL CX, SI + SUBL BX, SI + MOVL SI, 16(SP) ADDL $0x04, CX - ADDL $0x04, SI - MOVQ src_len+32(FP), DI - SUBL CX, DI - LEAQ (DX)(CX*1), R8 - LEAQ (DX)(SI*1), SI + ADDL $0x04, BX + MOVQ src_len+32(FP), SI + SUBL CX, SI + LEAQ (DX)(CX*1), DI + LEAQ (DX)(BX*1), BX // matchLen - XORL R10, R10 - CMPL DI, $0x08 + XORL R9, R9 + CMPL SI, $0x08 JL matchlen_match4_match_nolit_encodeSnappyBlockAsm10B matchlen_loopback_match_nolit_encodeSnappyBlockAsm10B: - MOVQ (R8)(R10*1), R9 - XORQ (SI)(R10*1), R9 - TESTQ R9, R9 + MOVQ (DI)(R9*1), R8 + XORQ (BX)(R9*1), R8 + TESTQ R8, R8 JZ matchlen_loop_match_nolit_encodeSnappyBlockAsm10B #ifdef GOAMD64_v3 - TZCNTQ R9, R9 + TZCNTQ R8, R8 #else - BSFQ R9, R9 + BSFQ R8, R8 #endif - SARQ $0x03, R9 - LEAL (R10)(R9*1), R10 + SARQ $0x03, R8 + LEAL (R9)(R8*1), R9 JMP match_nolit_end_encodeSnappyBlockAsm10B matchlen_loop_match_nolit_encodeSnappyBlockAsm10B: - LEAL -8(DI), DI - LEAL 8(R10), R10 - CMPL DI, $0x08 + LEAL -8(SI), SI + LEAL 8(R9), R9 + CMPL SI, $0x08 JGE matchlen_loopback_match_nolit_encodeSnappyBlockAsm10B JZ match_nolit_end_encodeSnappyBlockAsm10B matchlen_match4_match_nolit_encodeSnappyBlockAsm10B: - CMPL DI, $0x04 + CMPL SI, $0x04 JL matchlen_match2_match_nolit_encodeSnappyBlockAsm10B - MOVL (R8)(R10*1), R9 - CMPL (SI)(R10*1), R9 + MOVL (DI)(R9*1), R8 + CMPL (BX)(R9*1), R8 JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm10B - SUBL $0x04, DI - LEAL 4(R10), R10 + SUBL $0x04, SI + LEAL 4(R9), R9 matchlen_match2_match_nolit_encodeSnappyBlockAsm10B: - CMPL DI, $0x02 + CMPL SI, $0x02 JL matchlen_match1_match_nolit_encodeSnappyBlockAsm10B - MOVW (R8)(R10*1), R9 - CMPW (SI)(R10*1), R9 + MOVW (DI)(R9*1), R8 + CMPW (BX)(R9*1), R8 JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm10B - SUBL $0x02, DI - LEAL 2(R10), R10 + SUBL $0x02, SI + LEAL 2(R9), R9 matchlen_match1_match_nolit_encodeSnappyBlockAsm10B: - CMPL DI, $0x01 + CMPL SI, $0x01 JL match_nolit_end_encodeSnappyBlockAsm10B - MOVB (R8)(R10*1), R9 - CMPB (SI)(R10*1), R9 + MOVB (DI)(R9*1), R8 + CMPB (BX)(R9*1), R8 JNE match_nolit_end_encodeSnappyBlockAsm10B - LEAL 1(R10), R10 + LEAL 1(R9), R9 match_nolit_end_encodeSnappyBlockAsm10B: - ADDL R10, CX - MOVL 16(SP), SI - ADDL $0x04, R10 + ADDL R9, CX + MOVL 16(SP), BX + ADDL $0x04, R9 MOVL CX, 12(SP) // emitCopy two_byte_offset_match_nolit_encodeSnappyBlockAsm10B: - CMPL R10, $0x40 + CMPL R9, $0x40 JLE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm10B MOVB $0xee, (AX) - MOVW SI, 1(AX) - LEAL -60(R10), R10 + MOVW BX, 1(AX) + LEAL -60(R9), R9 ADDQ $0x03, AX JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm10B two_byte_offset_short_match_nolit_encodeSnappyBlockAsm10B: - CMPL R10, $0x0c + MOVL R9, SI + SHLL $0x02, SI + CMPL R9, $0x0c JGE emit_copy_three_match_nolit_encodeSnappyBlockAsm10B - CMPL SI, $0x00000800 + CMPL BX, $0x00000800 JGE emit_copy_three_match_nolit_encodeSnappyBlockAsm10B - MOVB $0x01, BL - LEAL -16(BX)(R10*4), R10 - MOVB SI, 1(AX) - SHRL $0x08, SI - SHLL $0x05, SI - ORL SI, R10 - MOVB R10, (AX) + LEAL -15(SI), SI + MOVB BL, 1(AX) + SHRL $0x08, BX + SHLL $0x05, BX + ORL BX, SI + MOVB SI, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm10B emit_copy_three_match_nolit_encodeSnappyBlockAsm10B: - MOVB $0x02, BL - LEAL -4(BX)(R10*4), R10 - MOVB R10, (AX) - MOVW SI, 1(AX) + LEAL -2(SI), SI + MOVB SI, (AX) + MOVW BX, 1(AX) ADDQ $0x03, AX match_nolit_emitcopy_end_encodeSnappyBlockAsm10B: CMPL CX, 8(SP) JGE emit_remainder_encodeSnappyBlockAsm10B - MOVQ -2(DX)(CX*1), DI + MOVQ -2(DX)(CX*1), SI CMPQ AX, (SP) JL match_nolit_dst_ok_encodeSnappyBlockAsm10B MOVQ $0x00000000, ret+48(FP) RET match_nolit_dst_ok_encodeSnappyBlockAsm10B: - MOVQ $0x9e3779b1, R9 - MOVQ DI, R8 - SHRQ $0x10, DI - MOVQ DI, SI - SHLQ $0x20, R8 - IMULQ R9, R8 - SHRQ $0x36, R8 - SHLQ $0x20, SI - IMULQ R9, SI - SHRQ $0x36, SI - LEAL -2(CX), R9 - LEAQ 24(SP)(SI*4), R10 - MOVL (R10), SI - MOVL R9, 24(SP)(R8*4) - MOVL CX, (R10) - CMPL (DX)(SI*1), DI + MOVQ $0x9e3779b1, R8 + MOVQ SI, DI + SHRQ $0x10, SI + MOVQ SI, BX + SHLQ $0x20, DI + IMULQ R8, DI + SHRQ $0x36, DI + SHLQ $0x20, BX + IMULQ R8, BX + SHRQ $0x36, BX + LEAL -2(CX), R8 + LEAQ 24(SP)(BX*4), R9 + MOVL (R9), BX + MOVL R8, 24(SP)(DI*4) + MOVL CX, (R9) + CMPL (DX)(BX*1), SI JEQ match_nolit_loop_encodeSnappyBlockAsm10B INCL CX JMP search_loop_encodeSnappyBlockAsm10B @@ -13537,8 +13518,8 @@ zero_loop_encodeSnappyBlockAsm8B: MOVL $0x00000000, 12(SP) MOVQ src_len+32(FP), CX LEAQ -9(CX), DX - LEAQ -8(CX), SI - MOVL SI, 8(SP) + LEAQ -8(CX), BX + MOVL BX, 8(SP) SHRQ $0x05, CX SUBL CX, DX LEAQ (AX)(DX*1), DX @@ -13548,276 +13529,276 @@ zero_loop_encodeSnappyBlockAsm8B: MOVQ src_base+24(FP), DX search_loop_encodeSnappyBlockAsm8B: - MOVL CX, SI - SUBL 12(SP), SI - SHRL $0x04, SI - LEAL 4(CX)(SI*1), SI - CMPL SI, 8(SP) + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x04, BX + LEAL 4(CX)(BX*1), BX + CMPL BX, 8(SP) JGE emit_remainder_encodeSnappyBlockAsm8B - MOVQ (DX)(CX*1), DI - MOVL SI, 20(SP) - MOVQ $0x9e3779b1, R9 - MOVQ DI, R10 - MOVQ DI, R11 - SHRQ $0x08, R11 - SHLQ $0x20, R10 - IMULQ R9, R10 - SHRQ $0x38, R10 - SHLQ $0x20, R11 - IMULQ R9, R11 - SHRQ $0x38, R11 - MOVL 24(SP)(R10*4), SI - MOVL 24(SP)(R11*4), R8 - MOVL CX, 24(SP)(R10*4) - LEAL 1(CX), R10 - MOVL R10, 24(SP)(R11*4) - MOVQ DI, R10 - SHRQ $0x10, R10 + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x9e3779b1, R8 + MOVQ SI, R9 + MOVQ SI, R10 + SHRQ $0x08, R10 + SHLQ $0x20, R9 + IMULQ R8, R9 + SHRQ $0x38, R9 SHLQ $0x20, R10 - IMULQ R9, R10 + IMULQ R8, R10 SHRQ $0x38, R10 - MOVL CX, R9 - SUBL 16(SP), R9 - MOVL 1(DX)(R9*1), R11 - MOVQ DI, R9 - SHRQ $0x08, R9 - CMPL R9, R11 - JNE no_repeat_found_encodeSnappyBlockAsm8B - LEAL 1(CX), DI - MOVL 12(SP), SI - MOVL DI, R8 + MOVL 24(SP)(R9*4), BX + MOVL 24(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + LEAL 1(CX), R9 + MOVL R9, 24(SP)(R10*4) + MOVQ SI, R9 + SHRQ $0x10, R9 + SHLQ $0x20, R9 + IMULQ R8, R9 + SHRQ $0x38, R9 + MOVL CX, R8 SUBL 16(SP), R8 + MOVL 1(DX)(R8*1), R10 + MOVQ SI, R8 + SHRQ $0x08, R8 + CMPL R8, R10 + JNE no_repeat_found_encodeSnappyBlockAsm8B + LEAL 1(CX), SI + MOVL 12(SP), BX + MOVL SI, DI + SUBL 16(SP), DI JZ repeat_extend_back_end_encodeSnappyBlockAsm8B repeat_extend_back_loop_encodeSnappyBlockAsm8B: - CMPL DI, SI + CMPL SI, BX JLE repeat_extend_back_end_encodeSnappyBlockAsm8B - MOVB -1(DX)(R8*1), BL - MOVB -1(DX)(DI*1), R9 - CMPB BL, R9 + MOVB -1(DX)(DI*1), R8 + MOVB -1(DX)(SI*1), R9 + CMPB R8, R9 JNE repeat_extend_back_end_encodeSnappyBlockAsm8B - LEAL -1(DI), DI - DECL R8 + LEAL -1(SI), SI + DECL DI JNZ repeat_extend_back_loop_encodeSnappyBlockAsm8B repeat_extend_back_end_encodeSnappyBlockAsm8B: - MOVL 12(SP), SI - CMPL SI, DI + MOVL 12(SP), BX + CMPL BX, SI JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm8B - MOVL DI, R8 - MOVL DI, 12(SP) - LEAQ (DX)(SI*1), R9 - SUBL SI, R8 - LEAL -1(R8), SI - CMPL SI, $0x3c + MOVL SI, DI + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R8 + SUBL BX, DI + LEAL -1(DI), BX + CMPL BX, $0x3c JLT one_byte_repeat_emit_encodeSnappyBlockAsm8B - CMPL SI, $0x00000100 + CMPL BX, $0x00000100 JLT two_bytes_repeat_emit_encodeSnappyBlockAsm8B MOVB $0xf4, (AX) - MOVW SI, 1(AX) + MOVW BX, 1(AX) ADDQ $0x03, AX JMP memmove_long_repeat_emit_encodeSnappyBlockAsm8B two_bytes_repeat_emit_encodeSnappyBlockAsm8B: MOVB $0xf0, (AX) - MOVB SI, 1(AX) + MOVB BL, 1(AX) ADDQ $0x02, AX - CMPL SI, $0x40 + CMPL BX, $0x40 JL memmove_repeat_emit_encodeSnappyBlockAsm8B JMP memmove_long_repeat_emit_encodeSnappyBlockAsm8B one_byte_repeat_emit_encodeSnappyBlockAsm8B: - SHLB $0x02, SI - MOVB SI, (AX) + SHLB $0x02, BL + MOVB BL, (AX) ADDQ $0x01, AX memmove_repeat_emit_encodeSnappyBlockAsm8B: - LEAQ (AX)(R8*1), SI + LEAQ (AX)(DI*1), BX // genMemMoveShort - CMPQ R8, $0x08 + CMPQ DI, $0x08 JLE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8 - CMPQ R8, $0x10 + CMPQ DI, $0x10 JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8through16 - CMPQ R8, $0x20 + CMPQ DI, $0x20 JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_17through32 JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_33through64 emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8: - MOVQ (R9), R10 - MOVQ R10, (AX) + MOVQ (R8), R9 + MOVQ R9, (AX) JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8through16: - MOVQ (R9), R10 - MOVQ -8(R9)(R8*1), R9 - MOVQ R10, (AX) - MOVQ R9, -8(AX)(R8*1) + MOVQ (R8), R9 + MOVQ -8(R8)(DI*1), R8 + MOVQ R9, (AX) + MOVQ R8, -8(AX)(DI*1) JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_17through32: - MOVOU (R9), X0 - MOVOU -16(R9)(R8*1), X1 + MOVOU (R8), X0 + MOVOU -16(R8)(DI*1), X1 MOVOU X0, (AX) - MOVOU X1, -16(AX)(R8*1) + MOVOU X1, -16(AX)(DI*1) JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_33through64: - MOVOU (R9), X0 - MOVOU 16(R9), X1 - MOVOU -32(R9)(R8*1), X2 - MOVOU -16(R9)(R8*1), X3 + MOVOU (R8), X0 + MOVOU 16(R8), X1 + MOVOU -32(R8)(DI*1), X2 + MOVOU -16(R8)(DI*1), X3 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R8*1) - MOVOU X3, -16(AX)(R8*1) + MOVOU X2, -32(AX)(DI*1) + MOVOU X3, -16(AX)(DI*1) memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B: - MOVQ SI, AX + MOVQ BX, AX JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm8B memmove_long_repeat_emit_encodeSnappyBlockAsm8B: - LEAQ (AX)(R8*1), SI + LEAQ (AX)(DI*1), BX // genMemMoveLong - MOVOU (R9), X0 - MOVOU 16(R9), X1 - MOVOU -32(R9)(R8*1), X2 - MOVOU -16(R9)(R8*1), X3 - MOVQ R8, R11 - SHRQ $0x05, R11 - MOVQ AX, R10 - ANDL $0x0000001f, R10 - MOVQ $0x00000040, R12 - SUBQ R10, R12 - DECQ R11 + MOVOU (R8), X0 + MOVOU 16(R8), X1 + MOVOU -32(R8)(DI*1), X2 + MOVOU -16(R8)(DI*1), X3 + MOVQ DI, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R11 + SUBQ R9, R11 + DECQ R10 JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32 - LEAQ -32(R9)(R12*1), R10 - LEAQ -32(AX)(R12*1), R13 + LEAQ -32(R8)(R11*1), R9 + LEAQ -32(AX)(R11*1), R12 emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_big_loop_back: - MOVOU (R10), X4 - MOVOU 16(R10), X5 - MOVOA X4, (R13) - MOVOA X5, 16(R13) - ADDQ $0x20, R13 - ADDQ $0x20, R10 + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R12) + MOVOA X5, 16(R12) ADDQ $0x20, R12 - DECQ R11 + ADDQ $0x20, R9 + ADDQ $0x20, R11 + DECQ R10 JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_big_loop_back emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32: - MOVOU -32(R9)(R12*1), X4 - MOVOU -16(R9)(R12*1), X5 - MOVOA X4, -32(AX)(R12*1) - MOVOA X5, -16(AX)(R12*1) - ADDQ $0x20, R12 - CMPQ R8, R12 + MOVOU -32(R8)(R11*1), X4 + MOVOU -16(R8)(R11*1), X5 + MOVOA X4, -32(AX)(R11*1) + MOVOA X5, -16(AX)(R11*1) + ADDQ $0x20, R11 + CMPQ DI, R11 JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R8*1) - MOVOU X3, -16(AX)(R8*1) - MOVQ SI, AX + MOVOU X2, -32(AX)(DI*1) + MOVOU X3, -16(AX)(DI*1) + MOVQ BX, AX emit_literal_done_repeat_emit_encodeSnappyBlockAsm8B: ADDL $0x05, CX - MOVL CX, SI - SUBL 16(SP), SI - MOVQ src_len+32(FP), R8 - SUBL CX, R8 - LEAQ (DX)(CX*1), R9 - LEAQ (DX)(SI*1), SI + MOVL CX, BX + SUBL 16(SP), BX + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), BX // matchLen - XORL R11, R11 - CMPL R8, $0x08 + XORL R10, R10 + CMPL DI, $0x08 JL matchlen_match4_repeat_extend_encodeSnappyBlockAsm8B matchlen_loopback_repeat_extend_encodeSnappyBlockAsm8B: - MOVQ (R9)(R11*1), R10 - XORQ (SI)(R11*1), R10 - TESTQ R10, R10 + MOVQ (R8)(R10*1), R9 + XORQ (BX)(R10*1), R9 + TESTQ R9, R9 JZ matchlen_loop_repeat_extend_encodeSnappyBlockAsm8B #ifdef GOAMD64_v3 - TZCNTQ R10, R10 + TZCNTQ R9, R9 #else - BSFQ R10, R10 + BSFQ R9, R9 #endif - SARQ $0x03, R10 - LEAL (R11)(R10*1), R11 + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 JMP repeat_extend_forward_end_encodeSnappyBlockAsm8B matchlen_loop_repeat_extend_encodeSnappyBlockAsm8B: - LEAL -8(R8), R8 - LEAL 8(R11), R11 - CMPL R8, $0x08 + LEAL -8(DI), DI + LEAL 8(R10), R10 + CMPL DI, $0x08 JGE matchlen_loopback_repeat_extend_encodeSnappyBlockAsm8B JZ repeat_extend_forward_end_encodeSnappyBlockAsm8B matchlen_match4_repeat_extend_encodeSnappyBlockAsm8B: - CMPL R8, $0x04 + CMPL DI, $0x04 JL matchlen_match2_repeat_extend_encodeSnappyBlockAsm8B - MOVL (R9)(R11*1), R10 - CMPL (SI)(R11*1), R10 + MOVL (R8)(R10*1), R9 + CMPL (BX)(R10*1), R9 JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm8B - SUBL $0x04, R8 - LEAL 4(R11), R11 + SUBL $0x04, DI + LEAL 4(R10), R10 matchlen_match2_repeat_extend_encodeSnappyBlockAsm8B: - CMPL R8, $0x02 + CMPL DI, $0x02 JL matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B - MOVW (R9)(R11*1), R10 - CMPW (SI)(R11*1), R10 + MOVW (R8)(R10*1), R9 + CMPW (BX)(R10*1), R9 JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B - SUBL $0x02, R8 - LEAL 2(R11), R11 + SUBL $0x02, DI + LEAL 2(R10), R10 matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B: - CMPL R8, $0x01 + CMPL DI, $0x01 JL repeat_extend_forward_end_encodeSnappyBlockAsm8B - MOVB (R9)(R11*1), R10 - CMPB (SI)(R11*1), R10 + MOVB (R8)(R10*1), R9 + CMPB (BX)(R10*1), R9 JNE repeat_extend_forward_end_encodeSnappyBlockAsm8B - LEAL 1(R11), R11 + LEAL 1(R10), R10 repeat_extend_forward_end_encodeSnappyBlockAsm8B: - ADDL R11, CX - MOVL CX, SI - SUBL DI, SI - MOVL 16(SP), DI + ADDL R10, CX + MOVL CX, BX + SUBL SI, BX + MOVL 16(SP), SI // emitCopy two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm8B: - CMPL SI, $0x40 + CMPL BX, $0x40 JLE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm8B MOVB $0xee, (AX) - MOVW DI, 1(AX) - LEAL -60(SI), SI + MOVW SI, 1(AX) + LEAL -60(BX), BX ADDQ $0x03, AX JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm8B two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm8B: - CMPL SI, $0x0c + MOVL BX, DI + SHLL $0x02, DI + CMPL BX, $0x0c JGE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm8B - MOVB $0x01, BL - LEAL -16(BX)(SI*4), SI - MOVB DI, 1(AX) - SHRL $0x08, DI - SHLL $0x05, DI - ORL DI, SI - MOVB SI, (AX) + LEAL -15(DI), DI + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (AX) ADDQ $0x02, AX JMP repeat_end_emit_encodeSnappyBlockAsm8B emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm8B: - MOVB $0x02, BL - LEAL -4(BX)(SI*4), SI - MOVB SI, (AX) - MOVW DI, 1(AX) + LEAL -2(DI), DI + MOVB DI, (AX) + MOVW SI, 1(AX) ADDQ $0x03, AX repeat_end_emit_encodeSnappyBlockAsm8B: @@ -13825,16 +13806,16 @@ repeat_end_emit_encodeSnappyBlockAsm8B: JMP search_loop_encodeSnappyBlockAsm8B no_repeat_found_encodeSnappyBlockAsm8B: - CMPL (DX)(SI*1), DI + CMPL (DX)(BX*1), SI JEQ candidate_match_encodeSnappyBlockAsm8B - SHRQ $0x08, DI - MOVL 24(SP)(R10*4), SI - LEAL 2(CX), R9 - CMPL (DX)(R8*1), DI + SHRQ $0x08, SI + MOVL 24(SP)(R9*4), BX + LEAL 2(CX), R8 + CMPL (DX)(DI*1), SI JEQ candidate2_match_encodeSnappyBlockAsm8B - MOVL R9, 24(SP)(R10*4) - SHRQ $0x08, DI - CMPL (DX)(SI*1), DI + MOVL R8, 24(SP)(R9*4) + SHRQ $0x08, SI + CMPL (DX)(BX*1), SI JEQ candidate3_match_encodeSnappyBlockAsm8B MOVL 20(SP), CX JMP search_loop_encodeSnappyBlockAsm8B @@ -13844,286 +13825,286 @@ candidate3_match_encodeSnappyBlockAsm8B: JMP candidate_match_encodeSnappyBlockAsm8B candidate2_match_encodeSnappyBlockAsm8B: - MOVL R9, 24(SP)(R10*4) + MOVL R8, 24(SP)(R9*4) INCL CX - MOVL R8, SI + MOVL DI, BX candidate_match_encodeSnappyBlockAsm8B: - MOVL 12(SP), DI - TESTL SI, SI + MOVL 12(SP), SI + TESTL BX, BX JZ match_extend_back_end_encodeSnappyBlockAsm8B match_extend_back_loop_encodeSnappyBlockAsm8B: - CMPL CX, DI + CMPL CX, SI JLE match_extend_back_end_encodeSnappyBlockAsm8B - MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(BX*1), DI MOVB -1(DX)(CX*1), R8 - CMPB BL, R8 + CMPB DI, R8 JNE match_extend_back_end_encodeSnappyBlockAsm8B LEAL -1(CX), CX - DECL SI + DECL BX JZ match_extend_back_end_encodeSnappyBlockAsm8B JMP match_extend_back_loop_encodeSnappyBlockAsm8B match_extend_back_end_encodeSnappyBlockAsm8B: - MOVL CX, DI - SUBL 12(SP), DI - LEAQ 3(AX)(DI*1), DI - CMPQ DI, (SP) + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 3(AX)(SI*1), SI + CMPQ SI, (SP) JL match_dst_size_check_encodeSnappyBlockAsm8B MOVQ $0x00000000, ret+48(FP) RET match_dst_size_check_encodeSnappyBlockAsm8B: - MOVL CX, DI - MOVL 12(SP), R8 - CMPL R8, DI + MOVL CX, SI + MOVL 12(SP), DI + CMPL DI, SI JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm8B - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (DX)(R8*1), DI - SUBL R8, R9 - LEAL -1(R9), R8 - CMPL R8, $0x3c + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(DI*1), SI + SUBL DI, R8 + LEAL -1(R8), DI + CMPL DI, $0x3c JLT one_byte_match_emit_encodeSnappyBlockAsm8B - CMPL R8, $0x00000100 + CMPL DI, $0x00000100 JLT two_bytes_match_emit_encodeSnappyBlockAsm8B MOVB $0xf4, (AX) - MOVW R8, 1(AX) + MOVW DI, 1(AX) ADDQ $0x03, AX JMP memmove_long_match_emit_encodeSnappyBlockAsm8B two_bytes_match_emit_encodeSnappyBlockAsm8B: MOVB $0xf0, (AX) - MOVB R8, 1(AX) + MOVB DI, 1(AX) ADDQ $0x02, AX - CMPL R8, $0x40 + CMPL DI, $0x40 JL memmove_match_emit_encodeSnappyBlockAsm8B JMP memmove_long_match_emit_encodeSnappyBlockAsm8B one_byte_match_emit_encodeSnappyBlockAsm8B: - SHLB $0x02, R8 - MOVB R8, (AX) + SHLB $0x02, DI + MOVB DI, (AX) ADDQ $0x01, AX memmove_match_emit_encodeSnappyBlockAsm8B: - LEAQ (AX)(R9*1), R8 + LEAQ (AX)(R8*1), DI // genMemMoveShort - CMPQ R9, $0x08 + CMPQ R8, $0x08 JLE emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8 - CMPQ R9, $0x10 + CMPQ R8, $0x10 JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8through16 - CMPQ R9, $0x20 + CMPQ R8, $0x20 JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_17through32 JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_33through64 emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8: - MOVQ (DI), R10 - MOVQ R10, (AX) + MOVQ (SI), R9 + MOVQ R9, (AX) JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm8B emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8through16: - MOVQ (DI), R10 - MOVQ -8(DI)(R9*1), DI - MOVQ R10, (AX) - MOVQ DI, -8(AX)(R9*1) + MOVQ (SI), R9 + MOVQ -8(SI)(R8*1), SI + MOVQ R9, (AX) + MOVQ SI, -8(AX)(R8*1) JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm8B emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_17through32: - MOVOU (DI), X0 - MOVOU -16(DI)(R9*1), X1 + MOVOU (SI), X0 + MOVOU -16(SI)(R8*1), X1 MOVOU X0, (AX) - MOVOU X1, -16(AX)(R9*1) + MOVOU X1, -16(AX)(R8*1) JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm8B emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_33through64: - MOVOU (DI), X0 - MOVOU 16(DI), X1 - MOVOU -32(DI)(R9*1), X2 - MOVOU -16(DI)(R9*1), X3 + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) memmove_end_copy_match_emit_encodeSnappyBlockAsm8B: - MOVQ R8, AX + MOVQ DI, AX JMP emit_literal_done_match_emit_encodeSnappyBlockAsm8B memmove_long_match_emit_encodeSnappyBlockAsm8B: - LEAQ (AX)(R9*1), R8 + LEAQ (AX)(R8*1), DI // genMemMoveLong - MOVOU (DI), X0 - MOVOU 16(DI), X1 - MOVOU -32(DI)(R9*1), X2 - MOVOU -16(DI)(R9*1), X3 - MOVQ R9, R11 - SHRQ $0x05, R11 - MOVQ AX, R10 - ANDL $0x0000001f, R10 - MOVQ $0x00000040, R12 - SUBQ R10, R12 - DECQ R11 + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVQ R8, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R11 + SUBQ R9, R11 + DECQ R10 JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32 - LEAQ -32(DI)(R12*1), R10 - LEAQ -32(AX)(R12*1), R13 + LEAQ -32(SI)(R11*1), R9 + LEAQ -32(AX)(R11*1), R12 emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_big_loop_back: - MOVOU (R10), X4 - MOVOU 16(R10), X5 - MOVOA X4, (R13) - MOVOA X5, 16(R13) - ADDQ $0x20, R13 - ADDQ $0x20, R10 + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R12) + MOVOA X5, 16(R12) ADDQ $0x20, R12 - DECQ R11 + ADDQ $0x20, R9 + ADDQ $0x20, R11 + DECQ R10 JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_big_loop_back emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32: - MOVOU -32(DI)(R12*1), X4 - MOVOU -16(DI)(R12*1), X5 - MOVOA X4, -32(AX)(R12*1) - MOVOA X5, -16(AX)(R12*1) - ADDQ $0x20, R12 - CMPQ R9, R12 + MOVOU -32(SI)(R11*1), X4 + MOVOU -16(SI)(R11*1), X5 + MOVOA X4, -32(AX)(R11*1) + MOVOA X5, -16(AX)(R11*1) + ADDQ $0x20, R11 + CMPQ R8, R11 JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) - MOVQ R8, AX + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ DI, AX emit_literal_done_match_emit_encodeSnappyBlockAsm8B: match_nolit_loop_encodeSnappyBlockAsm8B: - MOVL CX, DI - SUBL SI, DI - MOVL DI, 16(SP) + MOVL CX, SI + SUBL BX, SI + MOVL SI, 16(SP) ADDL $0x04, CX - ADDL $0x04, SI - MOVQ src_len+32(FP), DI - SUBL CX, DI - LEAQ (DX)(CX*1), R8 - LEAQ (DX)(SI*1), SI + ADDL $0x04, BX + MOVQ src_len+32(FP), SI + SUBL CX, SI + LEAQ (DX)(CX*1), DI + LEAQ (DX)(BX*1), BX // matchLen - XORL R10, R10 - CMPL DI, $0x08 + XORL R9, R9 + CMPL SI, $0x08 JL matchlen_match4_match_nolit_encodeSnappyBlockAsm8B matchlen_loopback_match_nolit_encodeSnappyBlockAsm8B: - MOVQ (R8)(R10*1), R9 - XORQ (SI)(R10*1), R9 - TESTQ R9, R9 + MOVQ (DI)(R9*1), R8 + XORQ (BX)(R9*1), R8 + TESTQ R8, R8 JZ matchlen_loop_match_nolit_encodeSnappyBlockAsm8B #ifdef GOAMD64_v3 - TZCNTQ R9, R9 + TZCNTQ R8, R8 #else - BSFQ R9, R9 + BSFQ R8, R8 #endif - SARQ $0x03, R9 - LEAL (R10)(R9*1), R10 + SARQ $0x03, R8 + LEAL (R9)(R8*1), R9 JMP match_nolit_end_encodeSnappyBlockAsm8B matchlen_loop_match_nolit_encodeSnappyBlockAsm8B: - LEAL -8(DI), DI - LEAL 8(R10), R10 - CMPL DI, $0x08 + LEAL -8(SI), SI + LEAL 8(R9), R9 + CMPL SI, $0x08 JGE matchlen_loopback_match_nolit_encodeSnappyBlockAsm8B JZ match_nolit_end_encodeSnappyBlockAsm8B matchlen_match4_match_nolit_encodeSnappyBlockAsm8B: - CMPL DI, $0x04 + CMPL SI, $0x04 JL matchlen_match2_match_nolit_encodeSnappyBlockAsm8B - MOVL (R8)(R10*1), R9 - CMPL (SI)(R10*1), R9 + MOVL (DI)(R9*1), R8 + CMPL (BX)(R9*1), R8 JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm8B - SUBL $0x04, DI - LEAL 4(R10), R10 + SUBL $0x04, SI + LEAL 4(R9), R9 matchlen_match2_match_nolit_encodeSnappyBlockAsm8B: - CMPL DI, $0x02 + CMPL SI, $0x02 JL matchlen_match1_match_nolit_encodeSnappyBlockAsm8B - MOVW (R8)(R10*1), R9 - CMPW (SI)(R10*1), R9 + MOVW (DI)(R9*1), R8 + CMPW (BX)(R9*1), R8 JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm8B - SUBL $0x02, DI - LEAL 2(R10), R10 + SUBL $0x02, SI + LEAL 2(R9), R9 matchlen_match1_match_nolit_encodeSnappyBlockAsm8B: - CMPL DI, $0x01 + CMPL SI, $0x01 JL match_nolit_end_encodeSnappyBlockAsm8B - MOVB (R8)(R10*1), R9 - CMPB (SI)(R10*1), R9 + MOVB (DI)(R9*1), R8 + CMPB (BX)(R9*1), R8 JNE match_nolit_end_encodeSnappyBlockAsm8B - LEAL 1(R10), R10 + LEAL 1(R9), R9 match_nolit_end_encodeSnappyBlockAsm8B: - ADDL R10, CX - MOVL 16(SP), SI - ADDL $0x04, R10 + ADDL R9, CX + MOVL 16(SP), BX + ADDL $0x04, R9 MOVL CX, 12(SP) // emitCopy two_byte_offset_match_nolit_encodeSnappyBlockAsm8B: - CMPL R10, $0x40 + CMPL R9, $0x40 JLE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm8B MOVB $0xee, (AX) - MOVW SI, 1(AX) - LEAL -60(R10), R10 + MOVW BX, 1(AX) + LEAL -60(R9), R9 ADDQ $0x03, AX JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm8B two_byte_offset_short_match_nolit_encodeSnappyBlockAsm8B: - CMPL R10, $0x0c + MOVL R9, SI + SHLL $0x02, SI + CMPL R9, $0x0c JGE emit_copy_three_match_nolit_encodeSnappyBlockAsm8B - MOVB $0x01, BL - LEAL -16(BX)(R10*4), R10 - MOVB SI, 1(AX) - SHRL $0x08, SI - SHLL $0x05, SI - ORL SI, R10 - MOVB R10, (AX) + LEAL -15(SI), SI + MOVB BL, 1(AX) + SHRL $0x08, BX + SHLL $0x05, BX + ORL BX, SI + MOVB SI, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm8B emit_copy_three_match_nolit_encodeSnappyBlockAsm8B: - MOVB $0x02, BL - LEAL -4(BX)(R10*4), R10 - MOVB R10, (AX) - MOVW SI, 1(AX) + LEAL -2(SI), SI + MOVB SI, (AX) + MOVW BX, 1(AX) ADDQ $0x03, AX match_nolit_emitcopy_end_encodeSnappyBlockAsm8B: CMPL CX, 8(SP) JGE emit_remainder_encodeSnappyBlockAsm8B - MOVQ -2(DX)(CX*1), DI + MOVQ -2(DX)(CX*1), SI CMPQ AX, (SP) JL match_nolit_dst_ok_encodeSnappyBlockAsm8B MOVQ $0x00000000, ret+48(FP) RET match_nolit_dst_ok_encodeSnappyBlockAsm8B: - MOVQ $0x9e3779b1, R9 - MOVQ DI, R8 - SHRQ $0x10, DI - MOVQ DI, SI - SHLQ $0x20, R8 - IMULQ R9, R8 - SHRQ $0x38, R8 - SHLQ $0x20, SI - IMULQ R9, SI - SHRQ $0x38, SI - LEAL -2(CX), R9 - LEAQ 24(SP)(SI*4), R10 - MOVL (R10), SI - MOVL R9, 24(SP)(R8*4) - MOVL CX, (R10) - CMPL (DX)(SI*1), DI + MOVQ $0x9e3779b1, R8 + MOVQ SI, DI + SHRQ $0x10, SI + MOVQ SI, BX + SHLQ $0x20, DI + IMULQ R8, DI + SHRQ $0x38, DI + SHLQ $0x20, BX + IMULQ R8, BX + SHRQ $0x38, BX + LEAL -2(CX), R8 + LEAQ 24(SP)(BX*4), R9 + MOVL (R9), BX + MOVL R8, 24(SP)(DI*4) + MOVL CX, (R9) + CMPL (DX)(BX*1), SI JEQ match_nolit_loop_encodeSnappyBlockAsm8B INCL CX JMP search_loop_encodeSnappyBlockAsm8B @@ -14287,9 +14268,9 @@ emit_literal_done_emit_remainder_encodeSnappyBlockAsm8B: // func encodeSnappyBetterBlockAsm(dst []byte, src []byte) int // Requires: BMI, SSE2 -TEXT ·encodeSnappyBetterBlockAsm(SB), $327704-56 +TEXT ·encodeSnappyBetterBlockAsm(SB), $589848-56 MOVQ dst_base+0(FP), AX - MOVQ $0x00000a00, CX + MOVQ $0x00001200, CX LEAQ 24(SP), DX PXOR X0, X0 @@ -14308,8 +14289,8 @@ zero_loop_encodeSnappyBetterBlockAsm: MOVL $0x00000000, 12(SP) MOVQ src_len+32(FP), CX LEAQ -9(CX), DX - LEAQ -8(CX), SI - MOVL SI, 8(SP) + LEAQ -8(CX), BX + MOVL BX, 8(SP) SHRQ $0x05, CX SUBL CX, DX LEAQ (AX)(DX*1), DX @@ -14319,359 +14300,369 @@ zero_loop_encodeSnappyBetterBlockAsm: MOVQ src_base+24(FP), DX search_loop_encodeSnappyBetterBlockAsm: - MOVL CX, SI - SUBL 12(SP), SI - SHRL $0x07, SI - CMPL SI, $0x63 + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x07, BX + CMPL BX, $0x63 JLE check_maxskip_ok_encodeSnappyBetterBlockAsm - LEAL 100(CX), SI + LEAL 100(CX), BX JMP check_maxskip_cont_encodeSnappyBetterBlockAsm check_maxskip_ok_encodeSnappyBetterBlockAsm: - LEAL 1(CX)(SI*1), SI + LEAL 1(CX)(BX*1), BX check_maxskip_cont_encodeSnappyBetterBlockAsm: - CMPL SI, 8(SP) + CMPL BX, 8(SP) JGE emit_remainder_encodeSnappyBetterBlockAsm - MOVQ (DX)(CX*1), DI - MOVL SI, 20(SP) - MOVQ $0x00cf1bbcdcbfa563, R9 - MOVQ $0x9e3779b1, SI - MOVQ DI, R10 - MOVQ DI, R11 - SHLQ $0x08, R10 - IMULQ R9, R10 - SHRQ $0x30, R10 - SHLQ $0x20, R11 - IMULQ SI, R11 - SHRQ $0x32, R11 - MOVL 24(SP)(R10*4), SI - MOVL 262168(SP)(R11*4), R8 - MOVL CX, 24(SP)(R10*4) - MOVL CX, 262168(SP)(R11*4) - CMPL (DX)(SI*1), DI + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x00cf1bbcdcbfa563, R8 + MOVQ $0x9e3779b1, BX + MOVQ SI, R9 + MOVQ SI, R10 + SHLQ $0x08, R9 + IMULQ R8, R9 + SHRQ $0x2f, R9 + SHLQ $0x20, R10 + IMULQ BX, R10 + SHRQ $0x32, R10 + MOVL 24(SP)(R9*4), BX + MOVL 524312(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + MOVL CX, 524312(SP)(R10*4) + MOVQ (DX)(BX*1), R9 + MOVQ (DX)(DI*1), R10 + CMPQ R9, SI JEQ candidate_match_encodeSnappyBetterBlockAsm - CMPL (DX)(R8*1), DI - JEQ candidateS_match_encodeSnappyBetterBlockAsm - MOVL 20(SP), CX - JMP search_loop_encodeSnappyBetterBlockAsm + CMPQ R10, SI + JNE no_short_found_encodeSnappyBetterBlockAsm + MOVL DI, BX + JMP candidate_match_encodeSnappyBetterBlockAsm + +no_short_found_encodeSnappyBetterBlockAsm: + CMPL R9, SI + JEQ candidate_match_encodeSnappyBetterBlockAsm + CMPL R10, SI + JEQ candidateS_match_encodeSnappyBetterBlockAsm + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBetterBlockAsm candidateS_match_encodeSnappyBetterBlockAsm: - SHRQ $0x08, DI - MOVQ DI, R10 - SHLQ $0x08, R10 - IMULQ R9, R10 - SHRQ $0x30, R10 - MOVL 24(SP)(R10*4), SI + SHRQ $0x08, SI + MOVQ SI, R9 + SHLQ $0x08, R9 + IMULQ R8, R9 + SHRQ $0x2f, R9 + MOVL 24(SP)(R9*4), BX INCL CX - MOVL CX, 24(SP)(R10*4) - CMPL (DX)(SI*1), DI + MOVL CX, 24(SP)(R9*4) + CMPL (DX)(BX*1), SI JEQ candidate_match_encodeSnappyBetterBlockAsm DECL CX - MOVL R8, SI + MOVL DI, BX candidate_match_encodeSnappyBetterBlockAsm: - MOVL 12(SP), DI - TESTL SI, SI + MOVL 12(SP), SI + TESTL BX, BX JZ match_extend_back_end_encodeSnappyBetterBlockAsm match_extend_back_loop_encodeSnappyBetterBlockAsm: - CMPL CX, DI + CMPL CX, SI JLE match_extend_back_end_encodeSnappyBetterBlockAsm - MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(BX*1), DI MOVB -1(DX)(CX*1), R8 - CMPB BL, R8 + CMPB DI, R8 JNE match_extend_back_end_encodeSnappyBetterBlockAsm LEAL -1(CX), CX - DECL SI + DECL BX JZ match_extend_back_end_encodeSnappyBetterBlockAsm JMP match_extend_back_loop_encodeSnappyBetterBlockAsm match_extend_back_end_encodeSnappyBetterBlockAsm: - MOVL CX, DI - SUBL 12(SP), DI - LEAQ 5(AX)(DI*1), DI - CMPQ DI, (SP) + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 5(AX)(SI*1), SI + CMPQ SI, (SP) JL match_dst_size_check_encodeSnappyBetterBlockAsm MOVQ $0x00000000, ret+48(FP) RET match_dst_size_check_encodeSnappyBetterBlockAsm: - MOVL CX, DI + MOVL CX, SI ADDL $0x04, CX - ADDL $0x04, SI - MOVQ src_len+32(FP), R8 - SUBL CX, R8 - LEAQ (DX)(CX*1), R9 - LEAQ (DX)(SI*1), R10 + ADDL $0x04, BX + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), R9 // matchLen - XORL R12, R12 - CMPL R8, $0x08 + XORL R11, R11 + CMPL DI, $0x08 JL matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm: - MOVQ (R9)(R12*1), R11 - XORQ (R10)(R12*1), R11 - TESTQ R11, R11 + MOVQ (R8)(R11*1), R10 + XORQ (R9)(R11*1), R10 + TESTQ R10, R10 JZ matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm #ifdef GOAMD64_v3 - TZCNTQ R11, R11 + TZCNTQ R10, R10 #else - BSFQ R11, R11 + BSFQ R10, R10 #endif - SARQ $0x03, R11 - LEAL (R12)(R11*1), R12 + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 JMP match_nolit_end_encodeSnappyBetterBlockAsm matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm: - LEAL -8(R8), R8 - LEAL 8(R12), R12 - CMPL R8, $0x08 + LEAL -8(DI), DI + LEAL 8(R11), R11 + CMPL DI, $0x08 JGE matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm JZ match_nolit_end_encodeSnappyBetterBlockAsm matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm: - CMPL R8, $0x04 + CMPL DI, $0x04 JL matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm - MOVL (R9)(R12*1), R11 - CMPL (R10)(R12*1), R11 + MOVL (R8)(R11*1), R10 + CMPL (R9)(R11*1), R10 JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm - SUBL $0x04, R8 - LEAL 4(R12), R12 + SUBL $0x04, DI + LEAL 4(R11), R11 matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm: - CMPL R8, $0x02 + CMPL DI, $0x02 JL matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm - MOVW (R9)(R12*1), R11 - CMPW (R10)(R12*1), R11 + MOVW (R8)(R11*1), R10 + CMPW (R9)(R11*1), R10 JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm - SUBL $0x02, R8 - LEAL 2(R12), R12 + SUBL $0x02, DI + LEAL 2(R11), R11 matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm: - CMPL R8, $0x01 + CMPL DI, $0x01 JL match_nolit_end_encodeSnappyBetterBlockAsm - MOVB (R9)(R12*1), R11 - CMPB (R10)(R12*1), R11 + MOVB (R8)(R11*1), R10 + CMPB (R9)(R11*1), R10 JNE match_nolit_end_encodeSnappyBetterBlockAsm - LEAL 1(R12), R12 + LEAL 1(R11), R11 match_nolit_end_encodeSnappyBetterBlockAsm: - MOVL CX, R8 - SUBL SI, R8 + MOVL CX, DI + SUBL BX, DI // Check if repeat - CMPL R12, $0x01 + CMPL R11, $0x01 JG match_length_ok_encodeSnappyBetterBlockAsm - CMPL R8, $0x0000ffff + CMPL DI, $0x0000ffff JLE match_length_ok_encodeSnappyBetterBlockAsm MOVL 20(SP), CX INCL CX JMP search_loop_encodeSnappyBetterBlockAsm match_length_ok_encodeSnappyBetterBlockAsm: - MOVL R8, 16(SP) - MOVL 12(SP), SI - CMPL SI, DI + MOVL DI, 16(SP) + MOVL 12(SP), BX + CMPL BX, SI JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (DX)(SI*1), R10 - SUBL SI, R9 - LEAL -1(R9), SI - CMPL SI, $0x3c + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c JLT one_byte_match_emit_encodeSnappyBetterBlockAsm - CMPL SI, $0x00000100 + CMPL BX, $0x00000100 JLT two_bytes_match_emit_encodeSnappyBetterBlockAsm - CMPL SI, $0x00010000 + CMPL BX, $0x00010000 JLT three_bytes_match_emit_encodeSnappyBetterBlockAsm - CMPL SI, $0x01000000 + CMPL BX, $0x01000000 JLT four_bytes_match_emit_encodeSnappyBetterBlockAsm MOVB $0xfc, (AX) - MOVL SI, 1(AX) + MOVL BX, 1(AX) ADDQ $0x05, AX JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm four_bytes_match_emit_encodeSnappyBetterBlockAsm: - MOVL SI, R11 - SHRL $0x10, R11 + MOVL BX, R10 + SHRL $0x10, R10 MOVB $0xf8, (AX) - MOVW SI, 1(AX) - MOVB R11, 3(AX) + MOVW BX, 1(AX) + MOVB R10, 3(AX) ADDQ $0x04, AX JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm three_bytes_match_emit_encodeSnappyBetterBlockAsm: MOVB $0xf4, (AX) - MOVW SI, 1(AX) + MOVW BX, 1(AX) ADDQ $0x03, AX JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm two_bytes_match_emit_encodeSnappyBetterBlockAsm: MOVB $0xf0, (AX) - MOVB SI, 1(AX) + MOVB BL, 1(AX) ADDQ $0x02, AX - CMPL SI, $0x40 + CMPL BX, $0x40 JL memmove_match_emit_encodeSnappyBetterBlockAsm JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm one_byte_match_emit_encodeSnappyBetterBlockAsm: - SHLB $0x02, SI - MOVB SI, (AX) + SHLB $0x02, BL + MOVB BL, (AX) ADDQ $0x01, AX memmove_match_emit_encodeSnappyBetterBlockAsm: - LEAQ (AX)(R9*1), SI + LEAQ (AX)(R8*1), BX // genMemMoveShort - CMPQ R9, $0x08 + CMPQ R8, $0x08 JLE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8 - CMPQ R9, $0x10 + CMPQ R8, $0x10 JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8through16 - CMPQ R9, $0x20 + CMPQ R8, $0x20 JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_17through32 JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_33through64 emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8: - MOVQ (R10), R11 - MOVQ R11, (AX) + MOVQ (R9), R10 + MOVQ R10, (AX) JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8through16: - MOVQ (R10), R11 - MOVQ -8(R10)(R9*1), R10 - MOVQ R11, (AX) - MOVQ R10, -8(AX)(R9*1) + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_17through32: - MOVOU (R10), X0 - MOVOU -16(R10)(R9*1), X1 + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 MOVOU X0, (AX) - MOVOU X1, -16(AX)(R9*1) + MOVOU X1, -16(AX)(R8*1) JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_33through64: - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm: - MOVQ SI, AX + MOVQ BX, AX JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm memmove_long_match_emit_encodeSnappyBetterBlockAsm: - LEAQ (AX)(R9*1), SI + LEAQ (AX)(R8*1), BX // genMemMoveLong - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVQ R9, R13 - SHRQ $0x05, R13 - MOVQ AX, R11 - ANDL $0x0000001f, R11 - MOVQ $0x00000040, R14 - SUBQ R11, R14 - DECQ R13 + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R13 + SUBQ R10, R13 + DECQ R12 JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32 - LEAQ -32(R10)(R14*1), R11 - LEAQ -32(AX)(R14*1), R15 + LEAQ -32(R9)(R13*1), R10 + LEAQ -32(AX)(R13*1), R14 emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_big_loop_back: - MOVOU (R11), X4 - MOVOU 16(R11), X5 - MOVOA X4, (R15) - MOVOA X5, 16(R15) - ADDQ $0x20, R15 - ADDQ $0x20, R11 + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) ADDQ $0x20, R14 - DECQ R13 + ADDQ $0x20, R10 + ADDQ $0x20, R13 + DECQ R12 JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_big_loop_back emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32: - MOVOU -32(R10)(R14*1), X4 - MOVOU -16(R10)(R14*1), X5 - MOVOA X4, -32(AX)(R14*1) - MOVOA X5, -16(AX)(R14*1) - ADDQ $0x20, R14 - CMPQ R9, R14 + MOVOU -32(R9)(R13*1), X4 + MOVOU -16(R9)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) - MOVQ SI, AX + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX emit_literal_done_match_emit_encodeSnappyBetterBlockAsm: - ADDL R12, CX - ADDL $0x04, R12 + ADDL R11, CX + ADDL $0x04, R11 MOVL CX, 12(SP) // emitCopy - CMPL R8, $0x00010000 + CMPL DI, $0x00010000 JL two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm four_bytes_loop_back_match_nolit_encodeSnappyBetterBlockAsm: - CMPL R12, $0x40 + CMPL R11, $0x40 JLE four_bytes_remain_match_nolit_encodeSnappyBetterBlockAsm MOVB $0xff, (AX) - MOVL R8, 1(AX) - LEAL -64(R12), R12 + MOVL DI, 1(AX) + LEAL -64(R11), R11 ADDQ $0x05, AX - CMPL R12, $0x04 + CMPL R11, $0x04 JL four_bytes_remain_match_nolit_encodeSnappyBetterBlockAsm JMP four_bytes_loop_back_match_nolit_encodeSnappyBetterBlockAsm four_bytes_remain_match_nolit_encodeSnappyBetterBlockAsm: - TESTL R12, R12 + TESTL R11, R11 JZ match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm - MOVB $0x03, BL - LEAL -4(BX)(R12*4), R12 - MOVB R12, (AX) - MOVL R8, 1(AX) + XORL BX, BX + LEAL -1(BX)(R11*4), R11 + MOVB R11, (AX) + MOVL DI, 1(AX) ADDQ $0x05, AX JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm: - CMPL R12, $0x40 + CMPL R11, $0x40 JLE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm MOVB $0xee, (AX) - MOVW R8, 1(AX) - LEAL -60(R12), R12 + MOVW DI, 1(AX) + LEAL -60(R11), R11 ADDQ $0x03, AX JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm: - CMPL R12, $0x0c + MOVL R11, BX + SHLL $0x02, BX + CMPL R11, $0x0c JGE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm - CMPL R8, $0x00000800 + CMPL DI, $0x00000800 JGE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm - MOVB $0x01, BL - LEAL -16(BX)(R12*4), R12 - MOVB R8, 1(AX) - SHRL $0x08, R8 - SHLL $0x05, R8 - ORL R8, R12 - MOVB R12, (AX) + LEAL -15(BX), BX + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, BX + MOVB BL, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm: - MOVB $0x02, BL - LEAL -4(BX)(R12*4), R12 - MOVB R12, (AX) - MOVW R8, 1(AX) + LEAL -2(BX), BX + MOVB BL, (AX) + MOVW DI, 1(AX) ADDQ $0x03, AX match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm: @@ -14683,54 +14674,51 @@ match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm: RET match_nolit_dst_ok_encodeSnappyBetterBlockAsm: - MOVQ $0x00cf1bbcdcbfa563, SI - MOVQ $0x9e3779b1, R8 - INCL DI - MOVQ (DX)(DI*1), R9 - MOVQ R9, R10 - MOVQ R9, R11 - MOVQ R9, R12 - SHRQ $0x08, R11 - MOVQ R11, R13 - SHRQ $0x10, R12 - LEAL 1(DI), R14 - LEAL 2(DI), R15 - MOVQ -2(DX)(CX*1), R9 - SHLQ $0x08, R10 - IMULQ SI, R10 - SHRQ $0x30, R10 - SHLQ $0x08, R13 - IMULQ SI, R13 - SHRQ $0x30, R13 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x32, R11 + MOVQ $0x00cf1bbcdcbfa563, BX + MOVQ $0x9e3779b1, DI + LEAQ 1(SI), SI + LEAQ -2(CX), R8 + MOVQ (DX)(SI*1), R9 + MOVQ 1(DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + MOVQ 1(DX)(R8*1), R12 + SHLQ $0x08, R9 + IMULQ BX, R9 + SHRQ $0x2f, R9 + SHLQ $0x20, R10 + IMULQ DI, R10 + SHRQ $0x32, R10 + SHLQ $0x08, R11 + IMULQ BX, R11 + SHRQ $0x2f, R11 SHLQ $0x20, R12 - IMULQ R8, R12 + IMULQ DI, R12 SHRQ $0x32, R12 - MOVL DI, 24(SP)(R10*4) - MOVL R14, 24(SP)(R13*4) - MOVL R14, 262168(SP)(R11*4) - MOVL R15, 262168(SP)(R12*4) - MOVQ R9, R10 - MOVQ R9, R11 - SHRQ $0x08, R11 - MOVQ R11, R13 - LEAL -2(CX), R9 - LEAL -1(CX), DI - SHLQ $0x08, R10 - IMULQ SI, R10 - SHRQ $0x30, R10 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x32, R11 - SHLQ $0x08, R13 - IMULQ SI, R13 - SHRQ $0x30, R13 - MOVL R9, 24(SP)(R10*4) - MOVL DI, 262168(SP)(R11*4) - MOVL DI, 24(SP)(R13*4) - JMP search_loop_encodeSnappyBetterBlockAsm + LEAQ 1(SI), DI + LEAQ 1(R8), R13 + MOVL SI, 24(SP)(R9*4) + MOVL R8, 24(SP)(R11*4) + MOVL DI, 524312(SP)(R10*4) + MOVL R13, 524312(SP)(R12*4) + ADDQ $0x01, SI + SUBQ $0x01, R8 + +index_loop_encodeSnappyBetterBlockAsm: + CMPQ SI, R8 + JAE search_loop_encodeSnappyBetterBlockAsm + MOVQ (DX)(SI*1), DI + MOVQ (DX)(R8*1), R9 + SHLQ $0x08, DI + IMULQ BX, DI + SHRQ $0x2f, DI + SHLQ $0x08, R9 + IMULQ BX, R9 + SHRQ $0x2f, R9 + MOVL SI, 24(SP)(DI*4) + MOVL R8, 24(SP)(R9*4) + ADDQ $0x02, SI + SUBQ $0x02, R8 + JMP index_loop_encodeSnappyBetterBlockAsm emit_remainder_encodeSnappyBetterBlockAsm: MOVQ src_len+32(FP), CX @@ -14931,8 +14919,8 @@ zero_loop_encodeSnappyBetterBlockAsm64K: MOVL $0x00000000, 12(SP) MOVQ src_len+32(FP), CX LEAQ -9(CX), DX - LEAQ -8(CX), SI - MOVL SI, 8(SP) + LEAQ -8(CX), BX + MOVL BX, 8(SP) SHRQ $0x05, CX SUBL CX, DX LEAQ (AX)(DX*1), DX @@ -14942,299 +14930,309 @@ zero_loop_encodeSnappyBetterBlockAsm64K: MOVQ src_base+24(FP), DX search_loop_encodeSnappyBetterBlockAsm64K: - MOVL CX, SI - SUBL 12(SP), SI - SHRL $0x07, SI - LEAL 1(CX)(SI*1), SI - CMPL SI, 8(SP) + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x07, BX + LEAL 1(CX)(BX*1), BX + CMPL BX, 8(SP) JGE emit_remainder_encodeSnappyBetterBlockAsm64K - MOVQ (DX)(CX*1), DI - MOVL SI, 20(SP) - MOVQ $0x00cf1bbcdcbfa563, R9 - MOVQ $0x9e3779b1, SI - MOVQ DI, R10 - MOVQ DI, R11 - SHLQ $0x08, R10 - IMULQ R9, R10 - SHRQ $0x30, R10 - SHLQ $0x20, R11 - IMULQ SI, R11 - SHRQ $0x32, R11 - MOVL 24(SP)(R10*4), SI - MOVL 262168(SP)(R11*4), R8 - MOVL CX, 24(SP)(R10*4) - MOVL CX, 262168(SP)(R11*4) - CMPL (DX)(SI*1), DI + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x00cf1bbcdcbfa563, R8 + MOVQ $0x9e3779b1, BX + MOVQ SI, R9 + MOVQ SI, R10 + SHLQ $0x08, R9 + IMULQ R8, R9 + SHRQ $0x30, R9 + SHLQ $0x20, R10 + IMULQ BX, R10 + SHRQ $0x32, R10 + MOVL 24(SP)(R9*4), BX + MOVL 262168(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + MOVL CX, 262168(SP)(R10*4) + MOVQ (DX)(BX*1), R9 + MOVQ (DX)(DI*1), R10 + CMPQ R9, SI JEQ candidate_match_encodeSnappyBetterBlockAsm64K - CMPL (DX)(R8*1), DI - JEQ candidateS_match_encodeSnappyBetterBlockAsm64K - MOVL 20(SP), CX - JMP search_loop_encodeSnappyBetterBlockAsm64K + CMPQ R10, SI + JNE no_short_found_encodeSnappyBetterBlockAsm64K + MOVL DI, BX + JMP candidate_match_encodeSnappyBetterBlockAsm64K + +no_short_found_encodeSnappyBetterBlockAsm64K: + CMPL R9, SI + JEQ candidate_match_encodeSnappyBetterBlockAsm64K + CMPL R10, SI + JEQ candidateS_match_encodeSnappyBetterBlockAsm64K + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBetterBlockAsm64K candidateS_match_encodeSnappyBetterBlockAsm64K: - SHRQ $0x08, DI - MOVQ DI, R10 - SHLQ $0x08, R10 - IMULQ R9, R10 - SHRQ $0x30, R10 - MOVL 24(SP)(R10*4), SI + SHRQ $0x08, SI + MOVQ SI, R9 + SHLQ $0x08, R9 + IMULQ R8, R9 + SHRQ $0x30, R9 + MOVL 24(SP)(R9*4), BX INCL CX - MOVL CX, 24(SP)(R10*4) - CMPL (DX)(SI*1), DI + MOVL CX, 24(SP)(R9*4) + CMPL (DX)(BX*1), SI JEQ candidate_match_encodeSnappyBetterBlockAsm64K DECL CX - MOVL R8, SI + MOVL DI, BX candidate_match_encodeSnappyBetterBlockAsm64K: - MOVL 12(SP), DI - TESTL SI, SI + MOVL 12(SP), SI + TESTL BX, BX JZ match_extend_back_end_encodeSnappyBetterBlockAsm64K match_extend_back_loop_encodeSnappyBetterBlockAsm64K: - CMPL CX, DI + CMPL CX, SI JLE match_extend_back_end_encodeSnappyBetterBlockAsm64K - MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(BX*1), DI MOVB -1(DX)(CX*1), R8 - CMPB BL, R8 + CMPB DI, R8 JNE match_extend_back_end_encodeSnappyBetterBlockAsm64K LEAL -1(CX), CX - DECL SI + DECL BX JZ match_extend_back_end_encodeSnappyBetterBlockAsm64K JMP match_extend_back_loop_encodeSnappyBetterBlockAsm64K match_extend_back_end_encodeSnappyBetterBlockAsm64K: - MOVL CX, DI - SUBL 12(SP), DI - LEAQ 3(AX)(DI*1), DI - CMPQ DI, (SP) + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 3(AX)(SI*1), SI + CMPQ SI, (SP) JL match_dst_size_check_encodeSnappyBetterBlockAsm64K MOVQ $0x00000000, ret+48(FP) RET match_dst_size_check_encodeSnappyBetterBlockAsm64K: - MOVL CX, DI + MOVL CX, SI ADDL $0x04, CX - ADDL $0x04, SI - MOVQ src_len+32(FP), R8 - SUBL CX, R8 - LEAQ (DX)(CX*1), R9 - LEAQ (DX)(SI*1), R10 + ADDL $0x04, BX + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), R9 // matchLen - XORL R12, R12 - CMPL R8, $0x08 + XORL R11, R11 + CMPL DI, $0x08 JL matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm64K matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm64K: - MOVQ (R9)(R12*1), R11 - XORQ (R10)(R12*1), R11 - TESTQ R11, R11 + MOVQ (R8)(R11*1), R10 + XORQ (R9)(R11*1), R10 + TESTQ R10, R10 JZ matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm64K #ifdef GOAMD64_v3 - TZCNTQ R11, R11 + TZCNTQ R10, R10 #else - BSFQ R11, R11 + BSFQ R10, R10 #endif - SARQ $0x03, R11 - LEAL (R12)(R11*1), R12 + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 JMP match_nolit_end_encodeSnappyBetterBlockAsm64K matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm64K: - LEAL -8(R8), R8 - LEAL 8(R12), R12 - CMPL R8, $0x08 + LEAL -8(DI), DI + LEAL 8(R11), R11 + CMPL DI, $0x08 JGE matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm64K JZ match_nolit_end_encodeSnappyBetterBlockAsm64K matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm64K: - CMPL R8, $0x04 + CMPL DI, $0x04 JL matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm64K - MOVL (R9)(R12*1), R11 - CMPL (R10)(R12*1), R11 + MOVL (R8)(R11*1), R10 + CMPL (R9)(R11*1), R10 JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm64K - SUBL $0x04, R8 - LEAL 4(R12), R12 + SUBL $0x04, DI + LEAL 4(R11), R11 matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm64K: - CMPL R8, $0x02 + CMPL DI, $0x02 JL matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K - MOVW (R9)(R12*1), R11 - CMPW (R10)(R12*1), R11 + MOVW (R8)(R11*1), R10 + CMPW (R9)(R11*1), R10 JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K - SUBL $0x02, R8 - LEAL 2(R12), R12 + SUBL $0x02, DI + LEAL 2(R11), R11 matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K: - CMPL R8, $0x01 + CMPL DI, $0x01 JL match_nolit_end_encodeSnappyBetterBlockAsm64K - MOVB (R9)(R12*1), R11 - CMPB (R10)(R12*1), R11 + MOVB (R8)(R11*1), R10 + CMPB (R9)(R11*1), R10 JNE match_nolit_end_encodeSnappyBetterBlockAsm64K - LEAL 1(R12), R12 + LEAL 1(R11), R11 match_nolit_end_encodeSnappyBetterBlockAsm64K: - MOVL CX, R8 - SUBL SI, R8 + MOVL CX, DI + SUBL BX, DI // Check if repeat - MOVL R8, 16(SP) - MOVL 12(SP), SI - CMPL SI, DI + MOVL DI, 16(SP) + MOVL 12(SP), BX + CMPL BX, SI JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm64K - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (DX)(SI*1), R10 - SUBL SI, R9 - LEAL -1(R9), SI - CMPL SI, $0x3c + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c JLT one_byte_match_emit_encodeSnappyBetterBlockAsm64K - CMPL SI, $0x00000100 + CMPL BX, $0x00000100 JLT two_bytes_match_emit_encodeSnappyBetterBlockAsm64K MOVB $0xf4, (AX) - MOVW SI, 1(AX) + MOVW BX, 1(AX) ADDQ $0x03, AX JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm64K two_bytes_match_emit_encodeSnappyBetterBlockAsm64K: MOVB $0xf0, (AX) - MOVB SI, 1(AX) + MOVB BL, 1(AX) ADDQ $0x02, AX - CMPL SI, $0x40 + CMPL BX, $0x40 JL memmove_match_emit_encodeSnappyBetterBlockAsm64K JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm64K one_byte_match_emit_encodeSnappyBetterBlockAsm64K: - SHLB $0x02, SI - MOVB SI, (AX) + SHLB $0x02, BL + MOVB BL, (AX) ADDQ $0x01, AX memmove_match_emit_encodeSnappyBetterBlockAsm64K: - LEAQ (AX)(R9*1), SI + LEAQ (AX)(R8*1), BX // genMemMoveShort - CMPQ R9, $0x08 + CMPQ R8, $0x08 JLE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8 - CMPQ R9, $0x10 + CMPQ R8, $0x10 JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8through16 - CMPQ R9, $0x20 + CMPQ R8, $0x20 JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_17through32 JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_33through64 emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8: - MOVQ (R10), R11 - MOVQ R11, (AX) + MOVQ (R9), R10 + MOVQ R10, (AX) JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8through16: - MOVQ (R10), R11 - MOVQ -8(R10)(R9*1), R10 - MOVQ R11, (AX) - MOVQ R10, -8(AX)(R9*1) + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_17through32: - MOVOU (R10), X0 - MOVOU -16(R10)(R9*1), X1 - MOVOU X0, (AX) - MOVOU X1, -16(AX)(R9*1) + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_33through64: - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K: - MOVQ SI, AX + MOVQ BX, AX JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm64K memmove_long_match_emit_encodeSnappyBetterBlockAsm64K: - LEAQ (AX)(R9*1), SI + LEAQ (AX)(R8*1), BX // genMemMoveLong - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVQ R9, R13 - SHRQ $0x05, R13 - MOVQ AX, R11 - ANDL $0x0000001f, R11 - MOVQ $0x00000040, R14 - SUBQ R11, R14 - DECQ R13 + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R13 + SUBQ R10, R13 + DECQ R12 JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32 - LEAQ -32(R10)(R14*1), R11 - LEAQ -32(AX)(R14*1), R15 + LEAQ -32(R9)(R13*1), R10 + LEAQ -32(AX)(R13*1), R14 emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_big_loop_back: - MOVOU (R11), X4 - MOVOU 16(R11), X5 - MOVOA X4, (R15) - MOVOA X5, 16(R15) - ADDQ $0x20, R15 - ADDQ $0x20, R11 + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) ADDQ $0x20, R14 - DECQ R13 + ADDQ $0x20, R10 + ADDQ $0x20, R13 + DECQ R12 JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_big_loop_back emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32: - MOVOU -32(R10)(R14*1), X4 - MOVOU -16(R10)(R14*1), X5 - MOVOA X4, -32(AX)(R14*1) - MOVOA X5, -16(AX)(R14*1) - ADDQ $0x20, R14 - CMPQ R9, R14 + MOVOU -32(R9)(R13*1), X4 + MOVOU -16(R9)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) - MOVQ SI, AX + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX emit_literal_done_match_emit_encodeSnappyBetterBlockAsm64K: - ADDL R12, CX - ADDL $0x04, R12 + ADDL R11, CX + ADDL $0x04, R11 MOVL CX, 12(SP) // emitCopy two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm64K: - CMPL R12, $0x40 + CMPL R11, $0x40 JLE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm64K MOVB $0xee, (AX) - MOVW R8, 1(AX) - LEAL -60(R12), R12 + MOVW DI, 1(AX) + LEAL -60(R11), R11 ADDQ $0x03, AX JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm64K two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm64K: - CMPL R12, $0x0c + MOVL R11, BX + SHLL $0x02, BX + CMPL R11, $0x0c JGE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm64K - CMPL R8, $0x00000800 + CMPL DI, $0x00000800 JGE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm64K - MOVB $0x01, BL - LEAL -16(BX)(R12*4), R12 - MOVB R8, 1(AX) - SHRL $0x08, R8 - SHLL $0x05, R8 - ORL R8, R12 - MOVB R12, (AX) + LEAL -15(BX), BX + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, BX + MOVB BL, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm64K emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm64K: - MOVB $0x02, BL - LEAL -4(BX)(R12*4), R12 - MOVB R12, (AX) - MOVW R8, 1(AX) + LEAL -2(BX), BX + MOVB BL, (AX) + MOVW DI, 1(AX) ADDQ $0x03, AX match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm64K: @@ -15246,54 +15244,51 @@ match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm64K: RET match_nolit_dst_ok_encodeSnappyBetterBlockAsm64K: - MOVQ $0x00cf1bbcdcbfa563, SI - MOVQ $0x9e3779b1, R8 - INCL DI - MOVQ (DX)(DI*1), R9 - MOVQ R9, R10 - MOVQ R9, R11 - MOVQ R9, R12 - SHRQ $0x08, R11 - MOVQ R11, R13 - SHRQ $0x10, R12 - LEAL 1(DI), R14 - LEAL 2(DI), R15 - MOVQ -2(DX)(CX*1), R9 - SHLQ $0x08, R10 - IMULQ SI, R10 - SHRQ $0x30, R10 - SHLQ $0x08, R13 - IMULQ SI, R13 - SHRQ $0x30, R13 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x32, R11 + MOVQ $0x00cf1bbcdcbfa563, BX + MOVQ $0x9e3779b1, DI + LEAQ 1(SI), SI + LEAQ -2(CX), R8 + MOVQ (DX)(SI*1), R9 + MOVQ 1(DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + MOVQ 1(DX)(R8*1), R12 + SHLQ $0x08, R9 + IMULQ BX, R9 + SHRQ $0x30, R9 + SHLQ $0x20, R10 + IMULQ DI, R10 + SHRQ $0x32, R10 + SHLQ $0x08, R11 + IMULQ BX, R11 + SHRQ $0x30, R11 SHLQ $0x20, R12 - IMULQ R8, R12 + IMULQ DI, R12 SHRQ $0x32, R12 - MOVL DI, 24(SP)(R10*4) - MOVL R14, 24(SP)(R13*4) - MOVL R14, 262168(SP)(R11*4) - MOVL R15, 262168(SP)(R12*4) - MOVQ R9, R10 - MOVQ R9, R11 - SHRQ $0x08, R11 - MOVQ R11, R13 - LEAL -2(CX), R9 - LEAL -1(CX), DI - SHLQ $0x08, R10 - IMULQ SI, R10 - SHRQ $0x30, R10 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x32, R11 - SHLQ $0x08, R13 - IMULQ SI, R13 - SHRQ $0x30, R13 - MOVL R9, 24(SP)(R10*4) - MOVL DI, 262168(SP)(R11*4) - MOVL DI, 24(SP)(R13*4) - JMP search_loop_encodeSnappyBetterBlockAsm64K + LEAQ 1(SI), DI + LEAQ 1(R8), R13 + MOVL SI, 24(SP)(R9*4) + MOVL R8, 24(SP)(R11*4) + MOVL DI, 262168(SP)(R10*4) + MOVL R13, 262168(SP)(R12*4) + ADDQ $0x01, SI + SUBQ $0x01, R8 + +index_loop_encodeSnappyBetterBlockAsm64K: + CMPQ SI, R8 + JAE search_loop_encodeSnappyBetterBlockAsm64K + MOVQ (DX)(SI*1), DI + MOVQ (DX)(R8*1), R9 + SHLQ $0x08, DI + IMULQ BX, DI + SHRQ $0x30, DI + SHLQ $0x08, R9 + IMULQ BX, R9 + SHRQ $0x30, R9 + MOVL SI, 24(SP)(DI*4) + MOVL R8, 24(SP)(R9*4) + ADDQ $0x02, SI + SUBQ $0x02, R8 + JMP index_loop_encodeSnappyBetterBlockAsm64K emit_remainder_encodeSnappyBetterBlockAsm64K: MOVQ src_len+32(FP), CX @@ -15475,8 +15470,8 @@ zero_loop_encodeSnappyBetterBlockAsm12B: MOVL $0x00000000, 12(SP) MOVQ src_len+32(FP), CX LEAQ -9(CX), DX - LEAQ -8(CX), SI - MOVL SI, 8(SP) + LEAQ -8(CX), BX + MOVL BX, 8(SP) SHRQ $0x05, CX SUBL CX, DX LEAQ (AX)(DX*1), DX @@ -15486,299 +15481,309 @@ zero_loop_encodeSnappyBetterBlockAsm12B: MOVQ src_base+24(FP), DX search_loop_encodeSnappyBetterBlockAsm12B: - MOVL CX, SI - SUBL 12(SP), SI - SHRL $0x06, SI - LEAL 1(CX)(SI*1), SI - CMPL SI, 8(SP) + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x06, BX + LEAL 1(CX)(BX*1), BX + CMPL BX, 8(SP) JGE emit_remainder_encodeSnappyBetterBlockAsm12B - MOVQ (DX)(CX*1), DI - MOVL SI, 20(SP) - MOVQ $0x0000cf1bbcdcbf9b, R9 - MOVQ $0x9e3779b1, SI - MOVQ DI, R10 - MOVQ DI, R11 - SHLQ $0x10, R10 - IMULQ R9, R10 - SHRQ $0x32, R10 - SHLQ $0x20, R11 - IMULQ SI, R11 - SHRQ $0x34, R11 - MOVL 24(SP)(R10*4), SI - MOVL 65560(SP)(R11*4), R8 - MOVL CX, 24(SP)(R10*4) - MOVL CX, 65560(SP)(R11*4) - CMPL (DX)(SI*1), DI + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ $0x9e3779b1, BX + MOVQ SI, R9 + MOVQ SI, R10 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x32, R9 + SHLQ $0x20, R10 + IMULQ BX, R10 + SHRQ $0x34, R10 + MOVL 24(SP)(R9*4), BX + MOVL 65560(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + MOVL CX, 65560(SP)(R10*4) + MOVQ (DX)(BX*1), R9 + MOVQ (DX)(DI*1), R10 + CMPQ R9, SI JEQ candidate_match_encodeSnappyBetterBlockAsm12B - CMPL (DX)(R8*1), DI - JEQ candidateS_match_encodeSnappyBetterBlockAsm12B - MOVL 20(SP), CX - JMP search_loop_encodeSnappyBetterBlockAsm12B + CMPQ R10, SI + JNE no_short_found_encodeSnappyBetterBlockAsm12B + MOVL DI, BX + JMP candidate_match_encodeSnappyBetterBlockAsm12B + +no_short_found_encodeSnappyBetterBlockAsm12B: + CMPL R9, SI + JEQ candidate_match_encodeSnappyBetterBlockAsm12B + CMPL R10, SI + JEQ candidateS_match_encodeSnappyBetterBlockAsm12B + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBetterBlockAsm12B candidateS_match_encodeSnappyBetterBlockAsm12B: - SHRQ $0x08, DI - MOVQ DI, R10 - SHLQ $0x10, R10 - IMULQ R9, R10 - SHRQ $0x32, R10 - MOVL 24(SP)(R10*4), SI + SHRQ $0x08, SI + MOVQ SI, R9 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x32, R9 + MOVL 24(SP)(R9*4), BX INCL CX - MOVL CX, 24(SP)(R10*4) - CMPL (DX)(SI*1), DI + MOVL CX, 24(SP)(R9*4) + CMPL (DX)(BX*1), SI JEQ candidate_match_encodeSnappyBetterBlockAsm12B DECL CX - MOVL R8, SI + MOVL DI, BX candidate_match_encodeSnappyBetterBlockAsm12B: - MOVL 12(SP), DI - TESTL SI, SI + MOVL 12(SP), SI + TESTL BX, BX JZ match_extend_back_end_encodeSnappyBetterBlockAsm12B match_extend_back_loop_encodeSnappyBetterBlockAsm12B: - CMPL CX, DI + CMPL CX, SI JLE match_extend_back_end_encodeSnappyBetterBlockAsm12B - MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(BX*1), DI MOVB -1(DX)(CX*1), R8 - CMPB BL, R8 + CMPB DI, R8 JNE match_extend_back_end_encodeSnappyBetterBlockAsm12B LEAL -1(CX), CX - DECL SI + DECL BX JZ match_extend_back_end_encodeSnappyBetterBlockAsm12B JMP match_extend_back_loop_encodeSnappyBetterBlockAsm12B match_extend_back_end_encodeSnappyBetterBlockAsm12B: - MOVL CX, DI - SUBL 12(SP), DI - LEAQ 3(AX)(DI*1), DI - CMPQ DI, (SP) + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 3(AX)(SI*1), SI + CMPQ SI, (SP) JL match_dst_size_check_encodeSnappyBetterBlockAsm12B MOVQ $0x00000000, ret+48(FP) RET match_dst_size_check_encodeSnappyBetterBlockAsm12B: - MOVL CX, DI + MOVL CX, SI ADDL $0x04, CX - ADDL $0x04, SI - MOVQ src_len+32(FP), R8 - SUBL CX, R8 - LEAQ (DX)(CX*1), R9 - LEAQ (DX)(SI*1), R10 + ADDL $0x04, BX + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), R9 // matchLen - XORL R12, R12 - CMPL R8, $0x08 + XORL R11, R11 + CMPL DI, $0x08 JL matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm12B matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm12B: - MOVQ (R9)(R12*1), R11 - XORQ (R10)(R12*1), R11 - TESTQ R11, R11 + MOVQ (R8)(R11*1), R10 + XORQ (R9)(R11*1), R10 + TESTQ R10, R10 JZ matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm12B #ifdef GOAMD64_v3 - TZCNTQ R11, R11 + TZCNTQ R10, R10 #else - BSFQ R11, R11 + BSFQ R10, R10 #endif - SARQ $0x03, R11 - LEAL (R12)(R11*1), R12 + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 JMP match_nolit_end_encodeSnappyBetterBlockAsm12B matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm12B: - LEAL -8(R8), R8 - LEAL 8(R12), R12 - CMPL R8, $0x08 + LEAL -8(DI), DI + LEAL 8(R11), R11 + CMPL DI, $0x08 JGE matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm12B JZ match_nolit_end_encodeSnappyBetterBlockAsm12B matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm12B: - CMPL R8, $0x04 + CMPL DI, $0x04 JL matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm12B - MOVL (R9)(R12*1), R11 - CMPL (R10)(R12*1), R11 + MOVL (R8)(R11*1), R10 + CMPL (R9)(R11*1), R10 JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm12B - SUBL $0x04, R8 - LEAL 4(R12), R12 + SUBL $0x04, DI + LEAL 4(R11), R11 matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm12B: - CMPL R8, $0x02 + CMPL DI, $0x02 JL matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B - MOVW (R9)(R12*1), R11 - CMPW (R10)(R12*1), R11 + MOVW (R8)(R11*1), R10 + CMPW (R9)(R11*1), R10 JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B - SUBL $0x02, R8 - LEAL 2(R12), R12 + SUBL $0x02, DI + LEAL 2(R11), R11 matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B: - CMPL R8, $0x01 + CMPL DI, $0x01 JL match_nolit_end_encodeSnappyBetterBlockAsm12B - MOVB (R9)(R12*1), R11 - CMPB (R10)(R12*1), R11 + MOVB (R8)(R11*1), R10 + CMPB (R9)(R11*1), R10 JNE match_nolit_end_encodeSnappyBetterBlockAsm12B - LEAL 1(R12), R12 + LEAL 1(R11), R11 match_nolit_end_encodeSnappyBetterBlockAsm12B: - MOVL CX, R8 - SUBL SI, R8 + MOVL CX, DI + SUBL BX, DI // Check if repeat - MOVL R8, 16(SP) - MOVL 12(SP), SI - CMPL SI, DI + MOVL DI, 16(SP) + MOVL 12(SP), BX + CMPL BX, SI JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm12B - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (DX)(SI*1), R10 - SUBL SI, R9 - LEAL -1(R9), SI - CMPL SI, $0x3c + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c JLT one_byte_match_emit_encodeSnappyBetterBlockAsm12B - CMPL SI, $0x00000100 + CMPL BX, $0x00000100 JLT two_bytes_match_emit_encodeSnappyBetterBlockAsm12B MOVB $0xf4, (AX) - MOVW SI, 1(AX) + MOVW BX, 1(AX) ADDQ $0x03, AX JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm12B two_bytes_match_emit_encodeSnappyBetterBlockAsm12B: MOVB $0xf0, (AX) - MOVB SI, 1(AX) + MOVB BL, 1(AX) ADDQ $0x02, AX - CMPL SI, $0x40 + CMPL BX, $0x40 JL memmove_match_emit_encodeSnappyBetterBlockAsm12B JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm12B one_byte_match_emit_encodeSnappyBetterBlockAsm12B: - SHLB $0x02, SI - MOVB SI, (AX) + SHLB $0x02, BL + MOVB BL, (AX) ADDQ $0x01, AX memmove_match_emit_encodeSnappyBetterBlockAsm12B: - LEAQ (AX)(R9*1), SI + LEAQ (AX)(R8*1), BX // genMemMoveShort - CMPQ R9, $0x08 + CMPQ R8, $0x08 JLE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8 - CMPQ R9, $0x10 + CMPQ R8, $0x10 JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8through16 - CMPQ R9, $0x20 + CMPQ R8, $0x20 JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_17through32 JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_33through64 emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8: - MOVQ (R10), R11 - MOVQ R11, (AX) + MOVQ (R9), R10 + MOVQ R10, (AX) JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8through16: - MOVQ (R10), R11 - MOVQ -8(R10)(R9*1), R10 - MOVQ R11, (AX) - MOVQ R10, -8(AX)(R9*1) + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_17through32: - MOVOU (R10), X0 - MOVOU -16(R10)(R9*1), X1 + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 MOVOU X0, (AX) - MOVOU X1, -16(AX)(R9*1) + MOVOU X1, -16(AX)(R8*1) JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_33through64: - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B: - MOVQ SI, AX + MOVQ BX, AX JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm12B memmove_long_match_emit_encodeSnappyBetterBlockAsm12B: - LEAQ (AX)(R9*1), SI + LEAQ (AX)(R8*1), BX // genMemMoveLong - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVQ R9, R13 - SHRQ $0x05, R13 - MOVQ AX, R11 - ANDL $0x0000001f, R11 - MOVQ $0x00000040, R14 - SUBQ R11, R14 - DECQ R13 + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R13 + SUBQ R10, R13 + DECQ R12 JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32 - LEAQ -32(R10)(R14*1), R11 - LEAQ -32(AX)(R14*1), R15 + LEAQ -32(R9)(R13*1), R10 + LEAQ -32(AX)(R13*1), R14 emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_big_loop_back: - MOVOU (R11), X4 - MOVOU 16(R11), X5 - MOVOA X4, (R15) - MOVOA X5, 16(R15) - ADDQ $0x20, R15 - ADDQ $0x20, R11 + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) ADDQ $0x20, R14 - DECQ R13 + ADDQ $0x20, R10 + ADDQ $0x20, R13 + DECQ R12 JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_big_loop_back emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32: - MOVOU -32(R10)(R14*1), X4 - MOVOU -16(R10)(R14*1), X5 - MOVOA X4, -32(AX)(R14*1) - MOVOA X5, -16(AX)(R14*1) - ADDQ $0x20, R14 - CMPQ R9, R14 + MOVOU -32(R9)(R13*1), X4 + MOVOU -16(R9)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) - MOVQ SI, AX + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX emit_literal_done_match_emit_encodeSnappyBetterBlockAsm12B: - ADDL R12, CX - ADDL $0x04, R12 + ADDL R11, CX + ADDL $0x04, R11 MOVL CX, 12(SP) // emitCopy two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm12B: - CMPL R12, $0x40 + CMPL R11, $0x40 JLE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm12B MOVB $0xee, (AX) - MOVW R8, 1(AX) - LEAL -60(R12), R12 + MOVW DI, 1(AX) + LEAL -60(R11), R11 ADDQ $0x03, AX JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm12B two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm12B: - CMPL R12, $0x0c + MOVL R11, BX + SHLL $0x02, BX + CMPL R11, $0x0c JGE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm12B - CMPL R8, $0x00000800 + CMPL DI, $0x00000800 JGE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm12B - MOVB $0x01, BL - LEAL -16(BX)(R12*4), R12 - MOVB R8, 1(AX) - SHRL $0x08, R8 - SHLL $0x05, R8 - ORL R8, R12 - MOVB R12, (AX) + LEAL -15(BX), BX + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, BX + MOVB BL, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm12B emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm12B: - MOVB $0x02, BL - LEAL -4(BX)(R12*4), R12 - MOVB R12, (AX) - MOVW R8, 1(AX) + LEAL -2(BX), BX + MOVB BL, (AX) + MOVW DI, 1(AX) ADDQ $0x03, AX match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm12B: @@ -15790,54 +15795,51 @@ match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm12B: RET match_nolit_dst_ok_encodeSnappyBetterBlockAsm12B: - MOVQ $0x0000cf1bbcdcbf9b, SI - MOVQ $0x9e3779b1, R8 - INCL DI - MOVQ (DX)(DI*1), R9 - MOVQ R9, R10 - MOVQ R9, R11 - MOVQ R9, R12 - SHRQ $0x08, R11 - MOVQ R11, R13 - SHRQ $0x10, R12 - LEAL 1(DI), R14 - LEAL 2(DI), R15 - MOVQ -2(DX)(CX*1), R9 - SHLQ $0x10, R10 - IMULQ SI, R10 - SHRQ $0x32, R10 - SHLQ $0x10, R13 - IMULQ SI, R13 - SHRQ $0x32, R13 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x34, R11 + MOVQ $0x0000cf1bbcdcbf9b, BX + MOVQ $0x9e3779b1, DI + LEAQ 1(SI), SI + LEAQ -2(CX), R8 + MOVQ (DX)(SI*1), R9 + MOVQ 1(DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + MOVQ 1(DX)(R8*1), R12 + SHLQ $0x10, R9 + IMULQ BX, R9 + SHRQ $0x32, R9 + SHLQ $0x20, R10 + IMULQ DI, R10 + SHRQ $0x34, R10 + SHLQ $0x10, R11 + IMULQ BX, R11 + SHRQ $0x32, R11 SHLQ $0x20, R12 - IMULQ R8, R12 + IMULQ DI, R12 SHRQ $0x34, R12 - MOVL DI, 24(SP)(R10*4) - MOVL R14, 24(SP)(R13*4) - MOVL R14, 65560(SP)(R11*4) - MOVL R15, 65560(SP)(R12*4) - MOVQ R9, R10 - MOVQ R9, R11 - SHRQ $0x08, R11 - MOVQ R11, R13 - LEAL -2(CX), R9 - LEAL -1(CX), DI - SHLQ $0x10, R10 - IMULQ SI, R10 - SHRQ $0x32, R10 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x34, R11 - SHLQ $0x10, R13 - IMULQ SI, R13 - SHRQ $0x32, R13 - MOVL R9, 24(SP)(R10*4) - MOVL DI, 65560(SP)(R11*4) - MOVL DI, 24(SP)(R13*4) - JMP search_loop_encodeSnappyBetterBlockAsm12B + LEAQ 1(SI), DI + LEAQ 1(R8), R13 + MOVL SI, 24(SP)(R9*4) + MOVL R8, 24(SP)(R11*4) + MOVL DI, 65560(SP)(R10*4) + MOVL R13, 65560(SP)(R12*4) + ADDQ $0x01, SI + SUBQ $0x01, R8 + +index_loop_encodeSnappyBetterBlockAsm12B: + CMPQ SI, R8 + JAE search_loop_encodeSnappyBetterBlockAsm12B + MOVQ (DX)(SI*1), DI + MOVQ (DX)(R8*1), R9 + SHLQ $0x10, DI + IMULQ BX, DI + SHRQ $0x32, DI + SHLQ $0x10, R9 + IMULQ BX, R9 + SHRQ $0x32, R9 + MOVL SI, 24(SP)(DI*4) + MOVL R8, 24(SP)(R9*4) + ADDQ $0x02, SI + SUBQ $0x02, R8 + JMP index_loop_encodeSnappyBetterBlockAsm12B emit_remainder_encodeSnappyBetterBlockAsm12B: MOVQ src_len+32(FP), CX @@ -16019,8 +16021,8 @@ zero_loop_encodeSnappyBetterBlockAsm10B: MOVL $0x00000000, 12(SP) MOVQ src_len+32(FP), CX LEAQ -9(CX), DX - LEAQ -8(CX), SI - MOVL SI, 8(SP) + LEAQ -8(CX), BX + MOVL BX, 8(SP) SHRQ $0x05, CX SUBL CX, DX LEAQ (AX)(DX*1), DX @@ -16030,299 +16032,309 @@ zero_loop_encodeSnappyBetterBlockAsm10B: MOVQ src_base+24(FP), DX search_loop_encodeSnappyBetterBlockAsm10B: - MOVL CX, SI - SUBL 12(SP), SI - SHRL $0x05, SI - LEAL 1(CX)(SI*1), SI - CMPL SI, 8(SP) + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x05, BX + LEAL 1(CX)(BX*1), BX + CMPL BX, 8(SP) JGE emit_remainder_encodeSnappyBetterBlockAsm10B - MOVQ (DX)(CX*1), DI - MOVL SI, 20(SP) - MOVQ $0x0000cf1bbcdcbf9b, R9 - MOVQ $0x9e3779b1, SI - MOVQ DI, R10 - MOVQ DI, R11 - SHLQ $0x10, R10 - IMULQ R9, R10 - SHRQ $0x34, R10 - SHLQ $0x20, R11 - IMULQ SI, R11 - SHRQ $0x36, R11 - MOVL 24(SP)(R10*4), SI - MOVL 16408(SP)(R11*4), R8 - MOVL CX, 24(SP)(R10*4) - MOVL CX, 16408(SP)(R11*4) - CMPL (DX)(SI*1), DI + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ $0x9e3779b1, BX + MOVQ SI, R9 + MOVQ SI, R10 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x34, R9 + SHLQ $0x20, R10 + IMULQ BX, R10 + SHRQ $0x36, R10 + MOVL 24(SP)(R9*4), BX + MOVL 16408(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + MOVL CX, 16408(SP)(R10*4) + MOVQ (DX)(BX*1), R9 + MOVQ (DX)(DI*1), R10 + CMPQ R9, SI JEQ candidate_match_encodeSnappyBetterBlockAsm10B - CMPL (DX)(R8*1), DI - JEQ candidateS_match_encodeSnappyBetterBlockAsm10B - MOVL 20(SP), CX - JMP search_loop_encodeSnappyBetterBlockAsm10B + CMPQ R10, SI + JNE no_short_found_encodeSnappyBetterBlockAsm10B + MOVL DI, BX + JMP candidate_match_encodeSnappyBetterBlockAsm10B + +no_short_found_encodeSnappyBetterBlockAsm10B: + CMPL R9, SI + JEQ candidate_match_encodeSnappyBetterBlockAsm10B + CMPL R10, SI + JEQ candidateS_match_encodeSnappyBetterBlockAsm10B + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBetterBlockAsm10B candidateS_match_encodeSnappyBetterBlockAsm10B: - SHRQ $0x08, DI - MOVQ DI, R10 - SHLQ $0x10, R10 - IMULQ R9, R10 - SHRQ $0x34, R10 - MOVL 24(SP)(R10*4), SI + SHRQ $0x08, SI + MOVQ SI, R9 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x34, R9 + MOVL 24(SP)(R9*4), BX INCL CX - MOVL CX, 24(SP)(R10*4) - CMPL (DX)(SI*1), DI + MOVL CX, 24(SP)(R9*4) + CMPL (DX)(BX*1), SI JEQ candidate_match_encodeSnappyBetterBlockAsm10B DECL CX - MOVL R8, SI + MOVL DI, BX candidate_match_encodeSnappyBetterBlockAsm10B: - MOVL 12(SP), DI - TESTL SI, SI + MOVL 12(SP), SI + TESTL BX, BX JZ match_extend_back_end_encodeSnappyBetterBlockAsm10B match_extend_back_loop_encodeSnappyBetterBlockAsm10B: - CMPL CX, DI + CMPL CX, SI JLE match_extend_back_end_encodeSnappyBetterBlockAsm10B - MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(BX*1), DI MOVB -1(DX)(CX*1), R8 - CMPB BL, R8 + CMPB DI, R8 JNE match_extend_back_end_encodeSnappyBetterBlockAsm10B LEAL -1(CX), CX - DECL SI + DECL BX JZ match_extend_back_end_encodeSnappyBetterBlockAsm10B JMP match_extend_back_loop_encodeSnappyBetterBlockAsm10B match_extend_back_end_encodeSnappyBetterBlockAsm10B: - MOVL CX, DI - SUBL 12(SP), DI - LEAQ 3(AX)(DI*1), DI - CMPQ DI, (SP) + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 3(AX)(SI*1), SI + CMPQ SI, (SP) JL match_dst_size_check_encodeSnappyBetterBlockAsm10B MOVQ $0x00000000, ret+48(FP) RET match_dst_size_check_encodeSnappyBetterBlockAsm10B: - MOVL CX, DI + MOVL CX, SI ADDL $0x04, CX - ADDL $0x04, SI - MOVQ src_len+32(FP), R8 - SUBL CX, R8 - LEAQ (DX)(CX*1), R9 - LEAQ (DX)(SI*1), R10 + ADDL $0x04, BX + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), R9 // matchLen - XORL R12, R12 - CMPL R8, $0x08 + XORL R11, R11 + CMPL DI, $0x08 JL matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm10B matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm10B: - MOVQ (R9)(R12*1), R11 - XORQ (R10)(R12*1), R11 - TESTQ R11, R11 + MOVQ (R8)(R11*1), R10 + XORQ (R9)(R11*1), R10 + TESTQ R10, R10 JZ matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm10B #ifdef GOAMD64_v3 - TZCNTQ R11, R11 + TZCNTQ R10, R10 #else - BSFQ R11, R11 + BSFQ R10, R10 #endif - SARQ $0x03, R11 - LEAL (R12)(R11*1), R12 + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 JMP match_nolit_end_encodeSnappyBetterBlockAsm10B matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm10B: - LEAL -8(R8), R8 - LEAL 8(R12), R12 - CMPL R8, $0x08 + LEAL -8(DI), DI + LEAL 8(R11), R11 + CMPL DI, $0x08 JGE matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm10B JZ match_nolit_end_encodeSnappyBetterBlockAsm10B matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm10B: - CMPL R8, $0x04 + CMPL DI, $0x04 JL matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm10B - MOVL (R9)(R12*1), R11 - CMPL (R10)(R12*1), R11 + MOVL (R8)(R11*1), R10 + CMPL (R9)(R11*1), R10 JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm10B - SUBL $0x04, R8 - LEAL 4(R12), R12 + SUBL $0x04, DI + LEAL 4(R11), R11 matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm10B: - CMPL R8, $0x02 + CMPL DI, $0x02 JL matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B - MOVW (R9)(R12*1), R11 - CMPW (R10)(R12*1), R11 + MOVW (R8)(R11*1), R10 + CMPW (R9)(R11*1), R10 JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B - SUBL $0x02, R8 - LEAL 2(R12), R12 + SUBL $0x02, DI + LEAL 2(R11), R11 matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B: - CMPL R8, $0x01 + CMPL DI, $0x01 JL match_nolit_end_encodeSnappyBetterBlockAsm10B - MOVB (R9)(R12*1), R11 - CMPB (R10)(R12*1), R11 + MOVB (R8)(R11*1), R10 + CMPB (R9)(R11*1), R10 JNE match_nolit_end_encodeSnappyBetterBlockAsm10B - LEAL 1(R12), R12 + LEAL 1(R11), R11 match_nolit_end_encodeSnappyBetterBlockAsm10B: - MOVL CX, R8 - SUBL SI, R8 + MOVL CX, DI + SUBL BX, DI // Check if repeat - MOVL R8, 16(SP) - MOVL 12(SP), SI - CMPL SI, DI + MOVL DI, 16(SP) + MOVL 12(SP), BX + CMPL BX, SI JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm10B - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (DX)(SI*1), R10 - SUBL SI, R9 - LEAL -1(R9), SI - CMPL SI, $0x3c + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c JLT one_byte_match_emit_encodeSnappyBetterBlockAsm10B - CMPL SI, $0x00000100 + CMPL BX, $0x00000100 JLT two_bytes_match_emit_encodeSnappyBetterBlockAsm10B MOVB $0xf4, (AX) - MOVW SI, 1(AX) + MOVW BX, 1(AX) ADDQ $0x03, AX JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm10B two_bytes_match_emit_encodeSnappyBetterBlockAsm10B: MOVB $0xf0, (AX) - MOVB SI, 1(AX) + MOVB BL, 1(AX) ADDQ $0x02, AX - CMPL SI, $0x40 + CMPL BX, $0x40 JL memmove_match_emit_encodeSnappyBetterBlockAsm10B JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm10B one_byte_match_emit_encodeSnappyBetterBlockAsm10B: - SHLB $0x02, SI - MOVB SI, (AX) + SHLB $0x02, BL + MOVB BL, (AX) ADDQ $0x01, AX memmove_match_emit_encodeSnappyBetterBlockAsm10B: - LEAQ (AX)(R9*1), SI + LEAQ (AX)(R8*1), BX // genMemMoveShort - CMPQ R9, $0x08 + CMPQ R8, $0x08 JLE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8 - CMPQ R9, $0x10 + CMPQ R8, $0x10 JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8through16 - CMPQ R9, $0x20 + CMPQ R8, $0x20 JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_17through32 JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_33through64 emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8: - MOVQ (R10), R11 - MOVQ R11, (AX) + MOVQ (R9), R10 + MOVQ R10, (AX) JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8through16: - MOVQ (R10), R11 - MOVQ -8(R10)(R9*1), R10 - MOVQ R11, (AX) - MOVQ R10, -8(AX)(R9*1) + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_17through32: - MOVOU (R10), X0 - MOVOU -16(R10)(R9*1), X1 + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 MOVOU X0, (AX) - MOVOU X1, -16(AX)(R9*1) + MOVOU X1, -16(AX)(R8*1) JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_33through64: - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B: - MOVQ SI, AX + MOVQ BX, AX JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm10B memmove_long_match_emit_encodeSnappyBetterBlockAsm10B: - LEAQ (AX)(R9*1), SI + LEAQ (AX)(R8*1), BX // genMemMoveLong - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVQ R9, R13 - SHRQ $0x05, R13 - MOVQ AX, R11 - ANDL $0x0000001f, R11 - MOVQ $0x00000040, R14 - SUBQ R11, R14 - DECQ R13 + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R13 + SUBQ R10, R13 + DECQ R12 JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32 - LEAQ -32(R10)(R14*1), R11 - LEAQ -32(AX)(R14*1), R15 + LEAQ -32(R9)(R13*1), R10 + LEAQ -32(AX)(R13*1), R14 emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_big_loop_back: - MOVOU (R11), X4 - MOVOU 16(R11), X5 - MOVOA X4, (R15) - MOVOA X5, 16(R15) - ADDQ $0x20, R15 - ADDQ $0x20, R11 + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) ADDQ $0x20, R14 - DECQ R13 + ADDQ $0x20, R10 + ADDQ $0x20, R13 + DECQ R12 JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_big_loop_back emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32: - MOVOU -32(R10)(R14*1), X4 - MOVOU -16(R10)(R14*1), X5 - MOVOA X4, -32(AX)(R14*1) - MOVOA X5, -16(AX)(R14*1) - ADDQ $0x20, R14 - CMPQ R9, R14 + MOVOU -32(R9)(R13*1), X4 + MOVOU -16(R9)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) - MOVQ SI, AX + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX emit_literal_done_match_emit_encodeSnappyBetterBlockAsm10B: - ADDL R12, CX - ADDL $0x04, R12 + ADDL R11, CX + ADDL $0x04, R11 MOVL CX, 12(SP) // emitCopy two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm10B: - CMPL R12, $0x40 + CMPL R11, $0x40 JLE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm10B MOVB $0xee, (AX) - MOVW R8, 1(AX) - LEAL -60(R12), R12 + MOVW DI, 1(AX) + LEAL -60(R11), R11 ADDQ $0x03, AX JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm10B two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm10B: - CMPL R12, $0x0c + MOVL R11, BX + SHLL $0x02, BX + CMPL R11, $0x0c JGE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm10B - CMPL R8, $0x00000800 + CMPL DI, $0x00000800 JGE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm10B - MOVB $0x01, BL - LEAL -16(BX)(R12*4), R12 - MOVB R8, 1(AX) - SHRL $0x08, R8 - SHLL $0x05, R8 - ORL R8, R12 - MOVB R12, (AX) + LEAL -15(BX), BX + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, BX + MOVB BL, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm10B emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm10B: - MOVB $0x02, BL - LEAL -4(BX)(R12*4), R12 - MOVB R12, (AX) - MOVW R8, 1(AX) + LEAL -2(BX), BX + MOVB BL, (AX) + MOVW DI, 1(AX) ADDQ $0x03, AX match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm10B: @@ -16334,54 +16346,51 @@ match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm10B: RET match_nolit_dst_ok_encodeSnappyBetterBlockAsm10B: - MOVQ $0x0000cf1bbcdcbf9b, SI - MOVQ $0x9e3779b1, R8 - INCL DI - MOVQ (DX)(DI*1), R9 - MOVQ R9, R10 - MOVQ R9, R11 - MOVQ R9, R12 - SHRQ $0x08, R11 - MOVQ R11, R13 - SHRQ $0x10, R12 - LEAL 1(DI), R14 - LEAL 2(DI), R15 - MOVQ -2(DX)(CX*1), R9 - SHLQ $0x10, R10 - IMULQ SI, R10 - SHRQ $0x34, R10 - SHLQ $0x10, R13 - IMULQ SI, R13 - SHRQ $0x34, R13 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x36, R11 + MOVQ $0x0000cf1bbcdcbf9b, BX + MOVQ $0x9e3779b1, DI + LEAQ 1(SI), SI + LEAQ -2(CX), R8 + MOVQ (DX)(SI*1), R9 + MOVQ 1(DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + MOVQ 1(DX)(R8*1), R12 + SHLQ $0x10, R9 + IMULQ BX, R9 + SHRQ $0x34, R9 + SHLQ $0x20, R10 + IMULQ DI, R10 + SHRQ $0x36, R10 + SHLQ $0x10, R11 + IMULQ BX, R11 + SHRQ $0x34, R11 SHLQ $0x20, R12 - IMULQ R8, R12 + IMULQ DI, R12 SHRQ $0x36, R12 - MOVL DI, 24(SP)(R10*4) - MOVL R14, 24(SP)(R13*4) - MOVL R14, 16408(SP)(R11*4) - MOVL R15, 16408(SP)(R12*4) - MOVQ R9, R10 - MOVQ R9, R11 - SHRQ $0x08, R11 - MOVQ R11, R13 - LEAL -2(CX), R9 - LEAL -1(CX), DI - SHLQ $0x10, R10 - IMULQ SI, R10 - SHRQ $0x34, R10 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x36, R11 - SHLQ $0x10, R13 - IMULQ SI, R13 - SHRQ $0x34, R13 - MOVL R9, 24(SP)(R10*4) - MOVL DI, 16408(SP)(R11*4) - MOVL DI, 24(SP)(R13*4) - JMP search_loop_encodeSnappyBetterBlockAsm10B + LEAQ 1(SI), DI + LEAQ 1(R8), R13 + MOVL SI, 24(SP)(R9*4) + MOVL R8, 24(SP)(R11*4) + MOVL DI, 16408(SP)(R10*4) + MOVL R13, 16408(SP)(R12*4) + ADDQ $0x01, SI + SUBQ $0x01, R8 + +index_loop_encodeSnappyBetterBlockAsm10B: + CMPQ SI, R8 + JAE search_loop_encodeSnappyBetterBlockAsm10B + MOVQ (DX)(SI*1), DI + MOVQ (DX)(R8*1), R9 + SHLQ $0x10, DI + IMULQ BX, DI + SHRQ $0x34, DI + SHLQ $0x10, R9 + IMULQ BX, R9 + SHRQ $0x34, R9 + MOVL SI, 24(SP)(DI*4) + MOVL R8, 24(SP)(R9*4) + ADDQ $0x02, SI + SUBQ $0x02, R8 + JMP index_loop_encodeSnappyBetterBlockAsm10B emit_remainder_encodeSnappyBetterBlockAsm10B: MOVQ src_len+32(FP), CX @@ -16563,8 +16572,8 @@ zero_loop_encodeSnappyBetterBlockAsm8B: MOVL $0x00000000, 12(SP) MOVQ src_len+32(FP), CX LEAQ -9(CX), DX - LEAQ -8(CX), SI - MOVL SI, 8(SP) + LEAQ -8(CX), BX + MOVL BX, 8(SP) SHRQ $0x05, CX SUBL CX, DX LEAQ (AX)(DX*1), DX @@ -16574,297 +16583,307 @@ zero_loop_encodeSnappyBetterBlockAsm8B: MOVQ src_base+24(FP), DX search_loop_encodeSnappyBetterBlockAsm8B: - MOVL CX, SI - SUBL 12(SP), SI - SHRL $0x04, SI - LEAL 1(CX)(SI*1), SI - CMPL SI, 8(SP) + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x04, BX + LEAL 1(CX)(BX*1), BX + CMPL BX, 8(SP) JGE emit_remainder_encodeSnappyBetterBlockAsm8B - MOVQ (DX)(CX*1), DI - MOVL SI, 20(SP) - MOVQ $0x0000cf1bbcdcbf9b, R9 - MOVQ $0x9e3779b1, SI - MOVQ DI, R10 - MOVQ DI, R11 - SHLQ $0x10, R10 - IMULQ R9, R10 - SHRQ $0x36, R10 - SHLQ $0x20, R11 - IMULQ SI, R11 - SHRQ $0x38, R11 - MOVL 24(SP)(R10*4), SI - MOVL 4120(SP)(R11*4), R8 - MOVL CX, 24(SP)(R10*4) - MOVL CX, 4120(SP)(R11*4) - CMPL (DX)(SI*1), DI + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ $0x9e3779b1, BX + MOVQ SI, R9 + MOVQ SI, R10 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x36, R9 + SHLQ $0x20, R10 + IMULQ BX, R10 + SHRQ $0x38, R10 + MOVL 24(SP)(R9*4), BX + MOVL 4120(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + MOVL CX, 4120(SP)(R10*4) + MOVQ (DX)(BX*1), R9 + MOVQ (DX)(DI*1), R10 + CMPQ R9, SI JEQ candidate_match_encodeSnappyBetterBlockAsm8B - CMPL (DX)(R8*1), DI - JEQ candidateS_match_encodeSnappyBetterBlockAsm8B - MOVL 20(SP), CX - JMP search_loop_encodeSnappyBetterBlockAsm8B + CMPQ R10, SI + JNE no_short_found_encodeSnappyBetterBlockAsm8B + MOVL DI, BX + JMP candidate_match_encodeSnappyBetterBlockAsm8B + +no_short_found_encodeSnappyBetterBlockAsm8B: + CMPL R9, SI + JEQ candidate_match_encodeSnappyBetterBlockAsm8B + CMPL R10, SI + JEQ candidateS_match_encodeSnappyBetterBlockAsm8B + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBetterBlockAsm8B candidateS_match_encodeSnappyBetterBlockAsm8B: - SHRQ $0x08, DI - MOVQ DI, R10 - SHLQ $0x10, R10 - IMULQ R9, R10 - SHRQ $0x36, R10 - MOVL 24(SP)(R10*4), SI + SHRQ $0x08, SI + MOVQ SI, R9 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x36, R9 + MOVL 24(SP)(R9*4), BX INCL CX - MOVL CX, 24(SP)(R10*4) - CMPL (DX)(SI*1), DI + MOVL CX, 24(SP)(R9*4) + CMPL (DX)(BX*1), SI JEQ candidate_match_encodeSnappyBetterBlockAsm8B DECL CX - MOVL R8, SI + MOVL DI, BX candidate_match_encodeSnappyBetterBlockAsm8B: - MOVL 12(SP), DI - TESTL SI, SI + MOVL 12(SP), SI + TESTL BX, BX JZ match_extend_back_end_encodeSnappyBetterBlockAsm8B match_extend_back_loop_encodeSnappyBetterBlockAsm8B: - CMPL CX, DI + CMPL CX, SI JLE match_extend_back_end_encodeSnappyBetterBlockAsm8B - MOVB -1(DX)(SI*1), BL + MOVB -1(DX)(BX*1), DI MOVB -1(DX)(CX*1), R8 - CMPB BL, R8 + CMPB DI, R8 JNE match_extend_back_end_encodeSnappyBetterBlockAsm8B LEAL -1(CX), CX - DECL SI + DECL BX JZ match_extend_back_end_encodeSnappyBetterBlockAsm8B JMP match_extend_back_loop_encodeSnappyBetterBlockAsm8B match_extend_back_end_encodeSnappyBetterBlockAsm8B: - MOVL CX, DI - SUBL 12(SP), DI - LEAQ 3(AX)(DI*1), DI - CMPQ DI, (SP) + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 3(AX)(SI*1), SI + CMPQ SI, (SP) JL match_dst_size_check_encodeSnappyBetterBlockAsm8B MOVQ $0x00000000, ret+48(FP) RET match_dst_size_check_encodeSnappyBetterBlockAsm8B: - MOVL CX, DI + MOVL CX, SI ADDL $0x04, CX - ADDL $0x04, SI - MOVQ src_len+32(FP), R8 - SUBL CX, R8 - LEAQ (DX)(CX*1), R9 - LEAQ (DX)(SI*1), R10 + ADDL $0x04, BX + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), R9 // matchLen - XORL R12, R12 - CMPL R8, $0x08 + XORL R11, R11 + CMPL DI, $0x08 JL matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm8B matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm8B: - MOVQ (R9)(R12*1), R11 - XORQ (R10)(R12*1), R11 - TESTQ R11, R11 + MOVQ (R8)(R11*1), R10 + XORQ (R9)(R11*1), R10 + TESTQ R10, R10 JZ matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm8B #ifdef GOAMD64_v3 - TZCNTQ R11, R11 + TZCNTQ R10, R10 #else - BSFQ R11, R11 + BSFQ R10, R10 #endif - SARQ $0x03, R11 - LEAL (R12)(R11*1), R12 + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 JMP match_nolit_end_encodeSnappyBetterBlockAsm8B matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm8B: - LEAL -8(R8), R8 - LEAL 8(R12), R12 - CMPL R8, $0x08 + LEAL -8(DI), DI + LEAL 8(R11), R11 + CMPL DI, $0x08 JGE matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm8B JZ match_nolit_end_encodeSnappyBetterBlockAsm8B matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm8B: - CMPL R8, $0x04 + CMPL DI, $0x04 JL matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B - MOVL (R9)(R12*1), R11 - CMPL (R10)(R12*1), R11 + MOVL (R8)(R11*1), R10 + CMPL (R9)(R11*1), R10 JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B - SUBL $0x04, R8 - LEAL 4(R12), R12 + SUBL $0x04, DI + LEAL 4(R11), R11 matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B: - CMPL R8, $0x02 + CMPL DI, $0x02 JL matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B - MOVW (R9)(R12*1), R11 - CMPW (R10)(R12*1), R11 + MOVW (R8)(R11*1), R10 + CMPW (R9)(R11*1), R10 JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B - SUBL $0x02, R8 - LEAL 2(R12), R12 + SUBL $0x02, DI + LEAL 2(R11), R11 matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B: - CMPL R8, $0x01 + CMPL DI, $0x01 JL match_nolit_end_encodeSnappyBetterBlockAsm8B - MOVB (R9)(R12*1), R11 - CMPB (R10)(R12*1), R11 + MOVB (R8)(R11*1), R10 + CMPB (R9)(R11*1), R10 JNE match_nolit_end_encodeSnappyBetterBlockAsm8B - LEAL 1(R12), R12 + LEAL 1(R11), R11 match_nolit_end_encodeSnappyBetterBlockAsm8B: - MOVL CX, R8 - SUBL SI, R8 + MOVL CX, DI + SUBL BX, DI // Check if repeat - MOVL R8, 16(SP) - MOVL 12(SP), SI - CMPL SI, DI + MOVL DI, 16(SP) + MOVL 12(SP), BX + CMPL BX, SI JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm8B - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (DX)(SI*1), R10 - SUBL SI, R9 - LEAL -1(R9), SI - CMPL SI, $0x3c + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c JLT one_byte_match_emit_encodeSnappyBetterBlockAsm8B - CMPL SI, $0x00000100 + CMPL BX, $0x00000100 JLT two_bytes_match_emit_encodeSnappyBetterBlockAsm8B MOVB $0xf4, (AX) - MOVW SI, 1(AX) + MOVW BX, 1(AX) ADDQ $0x03, AX JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm8B two_bytes_match_emit_encodeSnappyBetterBlockAsm8B: MOVB $0xf0, (AX) - MOVB SI, 1(AX) + MOVB BL, 1(AX) ADDQ $0x02, AX - CMPL SI, $0x40 + CMPL BX, $0x40 JL memmove_match_emit_encodeSnappyBetterBlockAsm8B JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm8B one_byte_match_emit_encodeSnappyBetterBlockAsm8B: - SHLB $0x02, SI - MOVB SI, (AX) + SHLB $0x02, BL + MOVB BL, (AX) ADDQ $0x01, AX memmove_match_emit_encodeSnappyBetterBlockAsm8B: - LEAQ (AX)(R9*1), SI + LEAQ (AX)(R8*1), BX // genMemMoveShort - CMPQ R9, $0x08 + CMPQ R8, $0x08 JLE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8 - CMPQ R9, $0x10 + CMPQ R8, $0x10 JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8through16 - CMPQ R9, $0x20 + CMPQ R8, $0x20 JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_17through32 JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_33through64 emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8: - MOVQ (R10), R11 - MOVQ R11, (AX) + MOVQ (R9), R10 + MOVQ R10, (AX) JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8through16: - MOVQ (R10), R11 - MOVQ -8(R10)(R9*1), R10 - MOVQ R11, (AX) - MOVQ R10, -8(AX)(R9*1) + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_17through32: - MOVOU (R10), X0 - MOVOU -16(R10)(R9*1), X1 + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 MOVOU X0, (AX) - MOVOU X1, -16(AX)(R9*1) + MOVOU X1, -16(AX)(R8*1) JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_33through64: - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B: - MOVQ SI, AX + MOVQ BX, AX JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm8B memmove_long_match_emit_encodeSnappyBetterBlockAsm8B: - LEAQ (AX)(R9*1), SI + LEAQ (AX)(R8*1), BX // genMemMoveLong - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVQ R9, R13 - SHRQ $0x05, R13 - MOVQ AX, R11 - ANDL $0x0000001f, R11 - MOVQ $0x00000040, R14 - SUBQ R11, R14 - DECQ R13 + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R13 + SUBQ R10, R13 + DECQ R12 JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32 - LEAQ -32(R10)(R14*1), R11 - LEAQ -32(AX)(R14*1), R15 + LEAQ -32(R9)(R13*1), R10 + LEAQ -32(AX)(R13*1), R14 emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_big_loop_back: - MOVOU (R11), X4 - MOVOU 16(R11), X5 - MOVOA X4, (R15) - MOVOA X5, 16(R15) - ADDQ $0x20, R15 - ADDQ $0x20, R11 - ADDQ $0x20, R14 - DECQ R13 + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R10 + ADDQ $0x20, R13 + DECQ R12 JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_big_loop_back emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32: - MOVOU -32(R10)(R14*1), X4 - MOVOU -16(R10)(R14*1), X5 - MOVOA X4, -32(AX)(R14*1) - MOVOA X5, -16(AX)(R14*1) - ADDQ $0x20, R14 - CMPQ R9, R14 + MOVOU -32(R9)(R13*1), X4 + MOVOU -16(R9)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32 MOVOU X0, (AX) MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) - MOVQ SI, AX + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX emit_literal_done_match_emit_encodeSnappyBetterBlockAsm8B: - ADDL R12, CX - ADDL $0x04, R12 + ADDL R11, CX + ADDL $0x04, R11 MOVL CX, 12(SP) // emitCopy two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm8B: - CMPL R12, $0x40 + CMPL R11, $0x40 JLE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm8B MOVB $0xee, (AX) - MOVW R8, 1(AX) - LEAL -60(R12), R12 + MOVW DI, 1(AX) + LEAL -60(R11), R11 ADDQ $0x03, AX JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm8B two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm8B: - CMPL R12, $0x0c + MOVL R11, BX + SHLL $0x02, BX + CMPL R11, $0x0c JGE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm8B - MOVB $0x01, BL - LEAL -16(BX)(R12*4), R12 - MOVB R8, 1(AX) - SHRL $0x08, R8 - SHLL $0x05, R8 - ORL R8, R12 - MOVB R12, (AX) + LEAL -15(BX), BX + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, BX + MOVB BL, (AX) ADDQ $0x02, AX JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm8B emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm8B: - MOVB $0x02, BL - LEAL -4(BX)(R12*4), R12 - MOVB R12, (AX) - MOVW R8, 1(AX) + LEAL -2(BX), BX + MOVB BL, (AX) + MOVW DI, 1(AX) ADDQ $0x03, AX match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm8B: @@ -16876,54 +16895,51 @@ match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm8B: RET match_nolit_dst_ok_encodeSnappyBetterBlockAsm8B: - MOVQ $0x0000cf1bbcdcbf9b, SI - MOVQ $0x9e3779b1, R8 - INCL DI - MOVQ (DX)(DI*1), R9 - MOVQ R9, R10 - MOVQ R9, R11 - MOVQ R9, R12 - SHRQ $0x08, R11 - MOVQ R11, R13 - SHRQ $0x10, R12 - LEAL 1(DI), R14 - LEAL 2(DI), R15 - MOVQ -2(DX)(CX*1), R9 - SHLQ $0x10, R10 - IMULQ SI, R10 - SHRQ $0x36, R10 - SHLQ $0x10, R13 - IMULQ SI, R13 - SHRQ $0x36, R13 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x38, R11 + MOVQ $0x0000cf1bbcdcbf9b, BX + MOVQ $0x9e3779b1, DI + LEAQ 1(SI), SI + LEAQ -2(CX), R8 + MOVQ (DX)(SI*1), R9 + MOVQ 1(DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + MOVQ 1(DX)(R8*1), R12 + SHLQ $0x10, R9 + IMULQ BX, R9 + SHRQ $0x36, R9 + SHLQ $0x20, R10 + IMULQ DI, R10 + SHRQ $0x38, R10 + SHLQ $0x10, R11 + IMULQ BX, R11 + SHRQ $0x36, R11 SHLQ $0x20, R12 - IMULQ R8, R12 + IMULQ DI, R12 SHRQ $0x38, R12 - MOVL DI, 24(SP)(R10*4) - MOVL R14, 24(SP)(R13*4) - MOVL R14, 4120(SP)(R11*4) - MOVL R15, 4120(SP)(R12*4) - MOVQ R9, R10 - MOVQ R9, R11 - SHRQ $0x08, R11 - MOVQ R11, R13 - LEAL -2(CX), R9 - LEAL -1(CX), DI - SHLQ $0x10, R10 - IMULQ SI, R10 - SHRQ $0x36, R10 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x38, R11 - SHLQ $0x10, R13 - IMULQ SI, R13 - SHRQ $0x36, R13 - MOVL R9, 24(SP)(R10*4) - MOVL DI, 4120(SP)(R11*4) - MOVL DI, 24(SP)(R13*4) - JMP search_loop_encodeSnappyBetterBlockAsm8B + LEAQ 1(SI), DI + LEAQ 1(R8), R13 + MOVL SI, 24(SP)(R9*4) + MOVL R8, 24(SP)(R11*4) + MOVL DI, 4120(SP)(R10*4) + MOVL R13, 4120(SP)(R12*4) + ADDQ $0x01, SI + SUBQ $0x01, R8 + +index_loop_encodeSnappyBetterBlockAsm8B: + CMPQ SI, R8 + JAE search_loop_encodeSnappyBetterBlockAsm8B + MOVQ (DX)(SI*1), DI + MOVQ (DX)(R8*1), R9 + SHLQ $0x10, DI + IMULQ BX, DI + SHRQ $0x36, DI + SHLQ $0x10, R9 + IMULQ BX, R9 + SHRQ $0x36, R9 + MOVL SI, 24(SP)(DI*4) + MOVL R8, 24(SP)(R9*4) + ADDQ $0x02, SI + SUBQ $0x02, R8 + JMP index_loop_encodeSnappyBetterBlockAsm8B emit_remainder_encodeSnappyBetterBlockAsm8B: MOVQ src_len+32(FP), CX @@ -17082,698 +17098,3194 @@ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm8B: MOVQ AX, ret+48(FP) RET -// func emitLiteral(dst []byte, lit []byte) int -// Requires: SSE2 -TEXT ·emitLiteral(SB), NOSPLIT, $0-56 - MOVQ lit_len+32(FP), DX - MOVQ dst_base+0(FP), AX - MOVQ lit_base+24(FP), CX - TESTQ DX, DX - JZ emit_literal_end_standalone_skip - MOVL DX, BX - LEAL -1(DX), SI - CMPL SI, $0x3c - JLT one_byte_standalone - CMPL SI, $0x00000100 - JLT two_bytes_standalone - CMPL SI, $0x00010000 - JLT three_bytes_standalone - CMPL SI, $0x01000000 - JLT four_bytes_standalone - MOVB $0xfc, (AX) - MOVL SI, 1(AX) - ADDQ $0x05, BX - ADDQ $0x05, AX - JMP memmove_long_standalone +// func calcBlockSize(src []byte) int +// Requires: BMI, SSE2 +TEXT ·calcBlockSize(SB), $32792-32 + XORQ AX, AX + MOVQ $0x00000100, CX + LEAQ 24(SP), DX + PXOR X0, X0 -four_bytes_standalone: +zero_loop_calcBlockSize: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_calcBlockSize + MOVL $0x00000000, 12(SP) + MOVQ src_len+8(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), BX + MOVL BX, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL CX, 16(SP) + MOVQ src_base+0(FP), DX + +search_loop_calcBlockSize: + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x05, BX + LEAL 4(CX)(BX*1), BX + CMPL BX, 8(SP) + JGE emit_remainder_calcBlockSize + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ SI, R9 + MOVQ SI, R10 + SHRQ $0x08, R10 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x33, R9 + SHLQ $0x10, R10 + IMULQ R8, R10 + SHRQ $0x33, R10 + MOVL 24(SP)(R9*4), BX + MOVL 24(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + LEAL 1(CX), R9 + MOVL R9, 24(SP)(R10*4) + MOVQ SI, R9 + SHRQ $0x10, R9 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x33, R9 + MOVL CX, R8 + SUBL 16(SP), R8 + MOVL 1(DX)(R8*1), R10 + MOVQ SI, R8 + SHRQ $0x08, R8 + CMPL R8, R10 + JNE no_repeat_found_calcBlockSize + LEAL 1(CX), SI + MOVL 12(SP), BX + MOVL SI, DI + SUBL 16(SP), DI + JZ repeat_extend_back_end_calcBlockSize + +repeat_extend_back_loop_calcBlockSize: + CMPL SI, BX + JLE repeat_extend_back_end_calcBlockSize + MOVB -1(DX)(DI*1), R8 + MOVB -1(DX)(SI*1), R9 + CMPB R8, R9 + JNE repeat_extend_back_end_calcBlockSize + LEAL -1(SI), SI + DECL DI + JNZ repeat_extend_back_loop_calcBlockSize + +repeat_extend_back_end_calcBlockSize: + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_repeat_emit_calcBlockSize MOVL SI, DI - SHRL $0x10, DI - MOVB $0xf8, (AX) - MOVW SI, 1(AX) - MOVB DI, 3(AX) - ADDQ $0x04, BX + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R8 + SUBL BX, DI + LEAL -1(DI), BX + CMPL BX, $0x3c + JLT one_byte_repeat_emit_calcBlockSize + CMPL BX, $0x00000100 + JLT two_bytes_repeat_emit_calcBlockSize + CMPL BX, $0x00010000 + JLT three_bytes_repeat_emit_calcBlockSize + CMPL BX, $0x01000000 + JLT four_bytes_repeat_emit_calcBlockSize + ADDQ $0x05, AX + JMP memmove_long_repeat_emit_calcBlockSize + +four_bytes_repeat_emit_calcBlockSize: ADDQ $0x04, AX - JMP memmove_long_standalone + JMP memmove_long_repeat_emit_calcBlockSize -three_bytes_standalone: - MOVB $0xf4, (AX) - MOVW SI, 1(AX) - ADDQ $0x03, BX +three_bytes_repeat_emit_calcBlockSize: ADDQ $0x03, AX - JMP memmove_long_standalone + JMP memmove_long_repeat_emit_calcBlockSize -two_bytes_standalone: - MOVB $0xf0, (AX) - MOVB SI, 1(AX) - ADDQ $0x02, BX +two_bytes_repeat_emit_calcBlockSize: ADDQ $0x02, AX - CMPL SI, $0x40 - JL memmove_standalone - JMP memmove_long_standalone + CMPL BX, $0x40 + JL memmove_repeat_emit_calcBlockSize + JMP memmove_long_repeat_emit_calcBlockSize -one_byte_standalone: - SHLB $0x02, SI - MOVB SI, (AX) - ADDQ $0x01, BX +one_byte_repeat_emit_calcBlockSize: ADDQ $0x01, AX -memmove_standalone: - // genMemMoveShort - CMPQ DX, $0x03 - JB emit_lit_memmove_standalone_memmove_move_1or2 - JE emit_lit_memmove_standalone_memmove_move_3 - CMPQ DX, $0x08 - JB emit_lit_memmove_standalone_memmove_move_4through7 - CMPQ DX, $0x10 - JBE emit_lit_memmove_standalone_memmove_move_8through16 - CMPQ DX, $0x20 - JBE emit_lit_memmove_standalone_memmove_move_17through32 - JMP emit_lit_memmove_standalone_memmove_move_33through64 +memmove_repeat_emit_calcBlockSize: + LEAQ (AX)(DI*1), AX + JMP emit_literal_done_repeat_emit_calcBlockSize -emit_lit_memmove_standalone_memmove_move_1or2: - MOVB (CX), SI - MOVB -1(CX)(DX*1), CL - MOVB SI, (AX) - MOVB CL, -1(AX)(DX*1) - JMP emit_literal_end_standalone +memmove_long_repeat_emit_calcBlockSize: + LEAQ (AX)(DI*1), AX -emit_lit_memmove_standalone_memmove_move_3: - MOVW (CX), SI - MOVB 2(CX), CL - MOVW SI, (AX) - MOVB CL, 2(AX) - JMP emit_literal_end_standalone +emit_literal_done_repeat_emit_calcBlockSize: + ADDL $0x05, CX + MOVL CX, BX + SUBL 16(SP), BX + MOVQ src_len+8(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), BX -emit_lit_memmove_standalone_memmove_move_4through7: - MOVL (CX), SI - MOVL -4(CX)(DX*1), CX - MOVL SI, (AX) - MOVL CX, -4(AX)(DX*1) - JMP emit_literal_end_standalone + // matchLen + XORL R10, R10 + CMPL DI, $0x08 + JL matchlen_match4_repeat_extend_calcBlockSize -emit_lit_memmove_standalone_memmove_move_8through16: - MOVQ (CX), SI - MOVQ -8(CX)(DX*1), CX - MOVQ SI, (AX) - MOVQ CX, -8(AX)(DX*1) - JMP emit_literal_end_standalone +matchlen_loopback_repeat_extend_calcBlockSize: + MOVQ (R8)(R10*1), R9 + XORQ (BX)(R10*1), R9 + TESTQ R9, R9 + JZ matchlen_loop_repeat_extend_calcBlockSize -emit_lit_memmove_standalone_memmove_move_17through32: - MOVOU (CX), X0 - MOVOU -16(CX)(DX*1), X1 - MOVOU X0, (AX) - MOVOU X1, -16(AX)(DX*1) - JMP emit_literal_end_standalone +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 -emit_lit_memmove_standalone_memmove_move_33through64: - MOVOU (CX), X0 - MOVOU 16(CX), X1 - MOVOU -32(CX)(DX*1), X2 - MOVOU -16(CX)(DX*1), X3 - MOVOU X0, (AX) - MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(DX*1) - MOVOU X3, -16(AX)(DX*1) - JMP emit_literal_end_standalone - JMP emit_literal_end_standalone +#else + BSFQ R9, R9 -memmove_long_standalone: - // genMemMoveLong - MOVOU (CX), X0 - MOVOU 16(CX), X1 - MOVOU -32(CX)(DX*1), X2 - MOVOU -16(CX)(DX*1), X3 - MOVQ DX, DI - SHRQ $0x05, DI - MOVQ AX, SI - ANDL $0x0000001f, SI - MOVQ $0x00000040, R8 - SUBQ SI, R8 - DECQ DI - JA emit_lit_memmove_long_standalonelarge_forward_sse_loop_32 - LEAQ -32(CX)(R8*1), SI - LEAQ -32(AX)(R8*1), R9 +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP repeat_extend_forward_end_calcBlockSize -emit_lit_memmove_long_standalonelarge_big_loop_back: - MOVOU (SI), X4 - MOVOU 16(SI), X5 - MOVOA X4, (R9) - MOVOA X5, 16(R9) - ADDQ $0x20, R9 - ADDQ $0x20, SI - ADDQ $0x20, R8 - DECQ DI - JNA emit_lit_memmove_long_standalonelarge_big_loop_back +matchlen_loop_repeat_extend_calcBlockSize: + LEAL -8(DI), DI + LEAL 8(R10), R10 + CMPL DI, $0x08 + JGE matchlen_loopback_repeat_extend_calcBlockSize + JZ repeat_extend_forward_end_calcBlockSize -emit_lit_memmove_long_standalonelarge_forward_sse_loop_32: - MOVOU -32(CX)(R8*1), X4 - MOVOU -16(CX)(R8*1), X5 - MOVOA X4, -32(AX)(R8*1) - MOVOA X5, -16(AX)(R8*1) - ADDQ $0x20, R8 - CMPQ DX, R8 - JAE emit_lit_memmove_long_standalonelarge_forward_sse_loop_32 - MOVOU X0, (AX) - MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(DX*1) - MOVOU X3, -16(AX)(DX*1) - JMP emit_literal_end_standalone - JMP emit_literal_end_standalone +matchlen_match4_repeat_extend_calcBlockSize: + CMPL DI, $0x04 + JL matchlen_match2_repeat_extend_calcBlockSize + MOVL (R8)(R10*1), R9 + CMPL (BX)(R10*1), R9 + JNE matchlen_match2_repeat_extend_calcBlockSize + SUBL $0x04, DI + LEAL 4(R10), R10 -emit_literal_end_standalone_skip: - XORQ BX, BX +matchlen_match2_repeat_extend_calcBlockSize: + CMPL DI, $0x02 + JL matchlen_match1_repeat_extend_calcBlockSize + MOVW (R8)(R10*1), R9 + CMPW (BX)(R10*1), R9 + JNE matchlen_match1_repeat_extend_calcBlockSize + SUBL $0x02, DI + LEAL 2(R10), R10 -emit_literal_end_standalone: - MOVQ BX, ret+48(FP) - RET +matchlen_match1_repeat_extend_calcBlockSize: + CMPL DI, $0x01 + JL repeat_extend_forward_end_calcBlockSize + MOVB (R8)(R10*1), R9 + CMPB (BX)(R10*1), R9 + JNE repeat_extend_forward_end_calcBlockSize + LEAL 1(R10), R10 -// func emitRepeat(dst []byte, offset int, length int) int -TEXT ·emitRepeat(SB), NOSPLIT, $0-48 - XORQ BX, BX - MOVQ dst_base+0(FP), AX - MOVQ offset+24(FP), CX - MOVQ length+32(FP), DX +repeat_extend_forward_end_calcBlockSize: + ADDL R10, CX + MOVL CX, BX + SUBL SI, BX + MOVL 16(SP), SI - // emitRepeat -emit_repeat_again_standalone: - MOVL DX, SI - LEAL -4(DX), DX - CMPL SI, $0x08 - JLE repeat_two_standalone - CMPL SI, $0x0c - JGE cant_repeat_two_offset_standalone - CMPL CX, $0x00000800 - JLT repeat_two_offset_standalone - -cant_repeat_two_offset_standalone: - CMPL DX, $0x00000104 - JLT repeat_three_standalone - CMPL DX, $0x00010100 - JLT repeat_four_standalone - CMPL DX, $0x0100ffff - JLT repeat_five_standalone - LEAL -16842747(DX), DX - MOVW $0x001d, (AX) - MOVW $0xfffb, 2(AX) - MOVB $0xff, 4(AX) - ADDQ $0x05, AX - ADDQ $0x05, BX - JMP emit_repeat_again_standalone + // emitCopy + CMPL SI, $0x00010000 + JL two_byte_offset_repeat_as_copy_calcBlockSize -repeat_five_standalone: - LEAL -65536(DX), DX - MOVL DX, CX - MOVW $0x001d, (AX) - MOVW DX, 2(AX) - SARL $0x10, CX - MOVB CL, 4(AX) - ADDQ $0x05, BX +four_bytes_loop_back_repeat_as_copy_calcBlockSize: + CMPL BX, $0x40 + JLE four_bytes_remain_repeat_as_copy_calcBlockSize + LEAL -64(BX), BX ADDQ $0x05, AX - JMP gen_emit_repeat_end - -repeat_four_standalone: - LEAL -256(DX), DX - MOVW $0x0019, (AX) - MOVW DX, 2(AX) - ADDQ $0x04, BX - ADDQ $0x04, AX - JMP gen_emit_repeat_end + CMPL BX, $0x04 + JL four_bytes_remain_repeat_as_copy_calcBlockSize + JMP four_bytes_loop_back_repeat_as_copy_calcBlockSize + +four_bytes_remain_repeat_as_copy_calcBlockSize: + TESTL BX, BX + JZ repeat_end_emit_calcBlockSize + XORL BX, BX + ADDQ $0x05, AX + JMP repeat_end_emit_calcBlockSize -repeat_three_standalone: - LEAL -4(DX), DX - MOVW $0x0015, (AX) - MOVB DL, 2(AX) - ADDQ $0x03, BX +two_byte_offset_repeat_as_copy_calcBlockSize: + CMPL BX, $0x40 + JLE two_byte_offset_short_repeat_as_copy_calcBlockSize + LEAL -60(BX), BX ADDQ $0x03, AX - JMP gen_emit_repeat_end + JMP two_byte_offset_repeat_as_copy_calcBlockSize -repeat_two_standalone: - SHLL $0x02, DX - ORL $0x01, DX - MOVW DX, (AX) - ADDQ $0x02, BX +two_byte_offset_short_repeat_as_copy_calcBlockSize: + MOVL BX, DI + SHLL $0x02, DI + CMPL BX, $0x0c + JGE emit_copy_three_repeat_as_copy_calcBlockSize + CMPL SI, $0x00000800 + JGE emit_copy_three_repeat_as_copy_calcBlockSize ADDQ $0x02, AX - JMP gen_emit_repeat_end + JMP repeat_end_emit_calcBlockSize -repeat_two_offset_standalone: - XORQ SI, SI - LEAL 1(SI)(DX*4), DX - MOVB CL, 1(AX) - SARL $0x08, CX - SHLL $0x05, CX - ORL CX, DX - MOVB DL, (AX) - ADDQ $0x02, BX - ADDQ $0x02, AX +emit_copy_three_repeat_as_copy_calcBlockSize: + ADDQ $0x03, AX -gen_emit_repeat_end: - MOVQ BX, ret+40(FP) - RET +repeat_end_emit_calcBlockSize: + MOVL CX, 12(SP) + JMP search_loop_calcBlockSize + +no_repeat_found_calcBlockSize: + CMPL (DX)(BX*1), SI + JEQ candidate_match_calcBlockSize + SHRQ $0x08, SI + MOVL 24(SP)(R9*4), BX + LEAL 2(CX), R8 + CMPL (DX)(DI*1), SI + JEQ candidate2_match_calcBlockSize + MOVL R8, 24(SP)(R9*4) + SHRQ $0x08, SI + CMPL (DX)(BX*1), SI + JEQ candidate3_match_calcBlockSize + MOVL 20(SP), CX + JMP search_loop_calcBlockSize -// func emitCopy(dst []byte, offset int, length int) int -TEXT ·emitCopy(SB), NOSPLIT, $0-48 - XORQ BX, BX - MOVQ dst_base+0(FP), AX - MOVQ offset+24(FP), CX - MOVQ length+32(FP), DX +candidate3_match_calcBlockSize: + ADDL $0x02, CX + JMP candidate_match_calcBlockSize - // emitCopy - CMPL CX, $0x00010000 - JL two_byte_offset_standalone +candidate2_match_calcBlockSize: + MOVL R8, 24(SP)(R9*4) + INCL CX + MOVL DI, BX -four_bytes_loop_back_standalone: - CMPL DX, $0x40 - JLE four_bytes_remain_standalone - MOVB $0xff, (AX) - MOVL CX, 1(AX) - LEAL -64(DX), DX - ADDQ $0x05, BX - ADDQ $0x05, AX - CMPL DX, $0x04 - JL four_bytes_remain_standalone +candidate_match_calcBlockSize: + MOVL 12(SP), SI + TESTL BX, BX + JZ match_extend_back_end_calcBlockSize - // emitRepeat -emit_repeat_again_standalone_emit_copy: - MOVL DX, SI - LEAL -4(DX), DX - CMPL SI, $0x08 - JLE repeat_two_standalone_emit_copy - CMPL SI, $0x0c - JGE cant_repeat_two_offset_standalone_emit_copy - CMPL CX, $0x00000800 - JLT repeat_two_offset_standalone_emit_copy +match_extend_back_loop_calcBlockSize: + CMPL CX, SI + JLE match_extend_back_end_calcBlockSize + MOVB -1(DX)(BX*1), DI + MOVB -1(DX)(CX*1), R8 + CMPB DI, R8 + JNE match_extend_back_end_calcBlockSize + LEAL -1(CX), CX + DECL BX + JZ match_extend_back_end_calcBlockSize + JMP match_extend_back_loop_calcBlockSize -cant_repeat_two_offset_standalone_emit_copy: - CMPL DX, $0x00000104 - JLT repeat_three_standalone_emit_copy - CMPL DX, $0x00010100 - JLT repeat_four_standalone_emit_copy - CMPL DX, $0x0100ffff - JLT repeat_five_standalone_emit_copy - LEAL -16842747(DX), DX - MOVW $0x001d, (AX) - MOVW $0xfffb, 2(AX) - MOVB $0xff, 4(AX) - ADDQ $0x05, AX - ADDQ $0x05, BX - JMP emit_repeat_again_standalone_emit_copy +match_extend_back_end_calcBlockSize: + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 5(AX)(SI*1), SI + CMPQ SI, (SP) + JL match_dst_size_check_calcBlockSize + MOVQ $0x00000000, ret+24(FP) + RET -repeat_five_standalone_emit_copy: - LEAL -65536(DX), DX - MOVL DX, CX - MOVW $0x001d, (AX) - MOVW DX, 2(AX) - SARL $0x10, CX - MOVB CL, 4(AX) - ADDQ $0x05, BX +match_dst_size_check_calcBlockSize: + MOVL CX, SI + MOVL 12(SP), DI + CMPL DI, SI + JEQ emit_literal_done_match_emit_calcBlockSize + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(DI*1), SI + SUBL DI, R8 + LEAL -1(R8), SI + CMPL SI, $0x3c + JLT one_byte_match_emit_calcBlockSize + CMPL SI, $0x00000100 + JLT two_bytes_match_emit_calcBlockSize + CMPL SI, $0x00010000 + JLT three_bytes_match_emit_calcBlockSize + CMPL SI, $0x01000000 + JLT four_bytes_match_emit_calcBlockSize ADDQ $0x05, AX - JMP gen_emit_copy_end + JMP memmove_long_match_emit_calcBlockSize -repeat_four_standalone_emit_copy: - LEAL -256(DX), DX - MOVW $0x0019, (AX) - MOVW DX, 2(AX) - ADDQ $0x04, BX +four_bytes_match_emit_calcBlockSize: ADDQ $0x04, AX - JMP gen_emit_copy_end + JMP memmove_long_match_emit_calcBlockSize -repeat_three_standalone_emit_copy: - LEAL -4(DX), DX - MOVW $0x0015, (AX) - MOVB DL, 2(AX) - ADDQ $0x03, BX +three_bytes_match_emit_calcBlockSize: ADDQ $0x03, AX - JMP gen_emit_copy_end + JMP memmove_long_match_emit_calcBlockSize -repeat_two_standalone_emit_copy: - SHLL $0x02, DX - ORL $0x01, DX - MOVW DX, (AX) - ADDQ $0x02, BX +two_bytes_match_emit_calcBlockSize: ADDQ $0x02, AX - JMP gen_emit_copy_end + CMPL SI, $0x40 + JL memmove_match_emit_calcBlockSize + JMP memmove_long_match_emit_calcBlockSize -repeat_two_offset_standalone_emit_copy: - XORQ SI, SI - LEAL 1(SI)(DX*4), DX - MOVB CL, 1(AX) - SARL $0x08, CX - SHLL $0x05, CX - ORL CX, DX - MOVB DL, (AX) - ADDQ $0x02, BX - ADDQ $0x02, AX - JMP gen_emit_copy_end - JMP four_bytes_loop_back_standalone +one_byte_match_emit_calcBlockSize: + ADDQ $0x01, AX -four_bytes_remain_standalone: - TESTL DX, DX - JZ gen_emit_copy_end - MOVB $0x03, SI - LEAL -4(SI)(DX*4), DX - MOVB DL, (AX) - MOVL CX, 1(AX) - ADDQ $0x05, BX - ADDQ $0x05, AX - JMP gen_emit_copy_end +memmove_match_emit_calcBlockSize: + LEAQ (AX)(R8*1), AX + JMP emit_literal_done_match_emit_calcBlockSize -two_byte_offset_standalone: - CMPL DX, $0x40 - JLE two_byte_offset_short_standalone - CMPL CX, $0x00000800 - JAE long_offset_short_standalone - MOVL $0x00000001, SI - LEAL 16(SI), SI - MOVB CL, 1(AX) - MOVL CX, DI - SHRL $0x08, DI - SHLL $0x05, DI - ORL DI, SI - MOVB SI, (AX) - ADDQ $0x02, BX - ADDQ $0x02, AX - SUBL $0x08, DX +memmove_long_match_emit_calcBlockSize: + LEAQ (AX)(R8*1), AX - // emitRepeat - LEAL -4(DX), DX - JMP cant_repeat_two_offset_standalone_emit_copy_short_2b +emit_literal_done_match_emit_calcBlockSize: +match_nolit_loop_calcBlockSize: + MOVL CX, SI + SUBL BX, SI + MOVL SI, 16(SP) + ADDL $0x04, CX + ADDL $0x04, BX + MOVQ src_len+8(FP), SI + SUBL CX, SI + LEAQ (DX)(CX*1), DI + LEAQ (DX)(BX*1), BX -emit_repeat_again_standalone_emit_copy_short_2b: - MOVL DX, SI - LEAL -4(DX), DX + // matchLen + XORL R9, R9 CMPL SI, $0x08 - JLE repeat_two_standalone_emit_copy_short_2b - CMPL SI, $0x0c - JGE cant_repeat_two_offset_standalone_emit_copy_short_2b - CMPL CX, $0x00000800 - JLT repeat_two_offset_standalone_emit_copy_short_2b + JL matchlen_match4_match_nolit_calcBlockSize -cant_repeat_two_offset_standalone_emit_copy_short_2b: - CMPL DX, $0x00000104 - JLT repeat_three_standalone_emit_copy_short_2b - CMPL DX, $0x00010100 - JLT repeat_four_standalone_emit_copy_short_2b - CMPL DX, $0x0100ffff - JLT repeat_five_standalone_emit_copy_short_2b - LEAL -16842747(DX), DX - MOVW $0x001d, (AX) - MOVW $0xfffb, 2(AX) - MOVB $0xff, 4(AX) - ADDQ $0x05, AX - ADDQ $0x05, BX - JMP emit_repeat_again_standalone_emit_copy_short_2b +matchlen_loopback_match_nolit_calcBlockSize: + MOVQ (DI)(R9*1), R8 + XORQ (BX)(R9*1), R8 + TESTQ R8, R8 + JZ matchlen_loop_match_nolit_calcBlockSize -repeat_five_standalone_emit_copy_short_2b: - LEAL -65536(DX), DX - MOVL DX, CX - MOVW $0x001d, (AX) - MOVW DX, 2(AX) - SARL $0x10, CX - MOVB CL, 4(AX) - ADDQ $0x05, BX +#ifdef GOAMD64_v3 + TZCNTQ R8, R8 + +#else + BSFQ R8, R8 + +#endif + SARQ $0x03, R8 + LEAL (R9)(R8*1), R9 + JMP match_nolit_end_calcBlockSize + +matchlen_loop_match_nolit_calcBlockSize: + LEAL -8(SI), SI + LEAL 8(R9), R9 + CMPL SI, $0x08 + JGE matchlen_loopback_match_nolit_calcBlockSize + JZ match_nolit_end_calcBlockSize + +matchlen_match4_match_nolit_calcBlockSize: + CMPL SI, $0x04 + JL matchlen_match2_match_nolit_calcBlockSize + MOVL (DI)(R9*1), R8 + CMPL (BX)(R9*1), R8 + JNE matchlen_match2_match_nolit_calcBlockSize + SUBL $0x04, SI + LEAL 4(R9), R9 + +matchlen_match2_match_nolit_calcBlockSize: + CMPL SI, $0x02 + JL matchlen_match1_match_nolit_calcBlockSize + MOVW (DI)(R9*1), R8 + CMPW (BX)(R9*1), R8 + JNE matchlen_match1_match_nolit_calcBlockSize + SUBL $0x02, SI + LEAL 2(R9), R9 + +matchlen_match1_match_nolit_calcBlockSize: + CMPL SI, $0x01 + JL match_nolit_end_calcBlockSize + MOVB (DI)(R9*1), R8 + CMPB (BX)(R9*1), R8 + JNE match_nolit_end_calcBlockSize + LEAL 1(R9), R9 + +match_nolit_end_calcBlockSize: + ADDL R9, CX + MOVL 16(SP), BX + ADDL $0x04, R9 + MOVL CX, 12(SP) + + // emitCopy + CMPL BX, $0x00010000 + JL two_byte_offset_match_nolit_calcBlockSize + +four_bytes_loop_back_match_nolit_calcBlockSize: + CMPL R9, $0x40 + JLE four_bytes_remain_match_nolit_calcBlockSize + LEAL -64(R9), R9 ADDQ $0x05, AX - JMP gen_emit_copy_end + CMPL R9, $0x04 + JL four_bytes_remain_match_nolit_calcBlockSize + JMP four_bytes_loop_back_match_nolit_calcBlockSize -repeat_four_standalone_emit_copy_short_2b: - LEAL -256(DX), DX - MOVW $0x0019, (AX) - MOVW DX, 2(AX) - ADDQ $0x04, BX - ADDQ $0x04, AX - JMP gen_emit_copy_end +four_bytes_remain_match_nolit_calcBlockSize: + TESTL R9, R9 + JZ match_nolit_emitcopy_end_calcBlockSize + XORL BX, BX + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_calcBlockSize -repeat_three_standalone_emit_copy_short_2b: - LEAL -4(DX), DX - MOVW $0x0015, (AX) - MOVB DL, 2(AX) - ADDQ $0x03, BX +two_byte_offset_match_nolit_calcBlockSize: + CMPL R9, $0x40 + JLE two_byte_offset_short_match_nolit_calcBlockSize + LEAL -60(R9), R9 ADDQ $0x03, AX - JMP gen_emit_copy_end + JMP two_byte_offset_match_nolit_calcBlockSize -repeat_two_standalone_emit_copy_short_2b: - SHLL $0x02, DX - ORL $0x01, DX - MOVW DX, (AX) - ADDQ $0x02, BX - ADDQ $0x02, AX - JMP gen_emit_copy_end - -repeat_two_offset_standalone_emit_copy_short_2b: - XORQ SI, SI - LEAL 1(SI)(DX*4), DX - MOVB CL, 1(AX) - SARL $0x08, CX - SHLL $0x05, CX - ORL CX, DX - MOVB DL, (AX) - ADDQ $0x02, BX +two_byte_offset_short_match_nolit_calcBlockSize: + MOVL R9, SI + SHLL $0x02, SI + CMPL R9, $0x0c + JGE emit_copy_three_match_nolit_calcBlockSize + CMPL BX, $0x00000800 + JGE emit_copy_three_match_nolit_calcBlockSize ADDQ $0x02, AX - JMP gen_emit_copy_end + JMP match_nolit_emitcopy_end_calcBlockSize -long_offset_short_standalone: - MOVB $0xee, (AX) - MOVW CX, 1(AX) - LEAL -60(DX), DX +emit_copy_three_match_nolit_calcBlockSize: ADDQ $0x03, AX - ADDQ $0x03, BX - // emitRepeat -emit_repeat_again_standalone_emit_copy_short: - MOVL DX, SI - LEAL -4(DX), DX - CMPL SI, $0x08 - JLE repeat_two_standalone_emit_copy_short - CMPL SI, $0x0c - JGE cant_repeat_two_offset_standalone_emit_copy_short - CMPL CX, $0x00000800 - JLT repeat_two_offset_standalone_emit_copy_short +match_nolit_emitcopy_end_calcBlockSize: + CMPL CX, 8(SP) + JGE emit_remainder_calcBlockSize + MOVQ -2(DX)(CX*1), SI + CMPQ AX, (SP) + JL match_nolit_dst_ok_calcBlockSize + MOVQ $0x00000000, ret+24(FP) + RET -cant_repeat_two_offset_standalone_emit_copy_short: - CMPL DX, $0x00000104 - JLT repeat_three_standalone_emit_copy_short - CMPL DX, $0x00010100 - JLT repeat_four_standalone_emit_copy_short - CMPL DX, $0x0100ffff - JLT repeat_five_standalone_emit_copy_short - LEAL -16842747(DX), DX - MOVW $0x001d, (AX) - MOVW $0xfffb, 2(AX) - MOVB $0xff, 4(AX) - ADDQ $0x05, AX - ADDQ $0x05, BX - JMP emit_repeat_again_standalone_emit_copy_short +match_nolit_dst_ok_calcBlockSize: + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ SI, DI + SHRQ $0x10, SI + MOVQ SI, BX + SHLQ $0x10, DI + IMULQ R8, DI + SHRQ $0x33, DI + SHLQ $0x10, BX + IMULQ R8, BX + SHRQ $0x33, BX + LEAL -2(CX), R8 + LEAQ 24(SP)(BX*4), R9 + MOVL (R9), BX + MOVL R8, 24(SP)(DI*4) + MOVL CX, (R9) + CMPL (DX)(BX*1), SI + JEQ match_nolit_loop_calcBlockSize + INCL CX + JMP search_loop_calcBlockSize -repeat_five_standalone_emit_copy_short: - LEAL -65536(DX), DX - MOVL DX, CX - MOVW $0x001d, (AX) - MOVW DX, 2(AX) - SARL $0x10, CX - MOVB CL, 4(AX) - ADDQ $0x05, BX +emit_remainder_calcBlockSize: + MOVQ src_len+8(FP), CX + SUBL 12(SP), CX + LEAQ 5(AX)(CX*1), CX + CMPQ CX, (SP) + JL emit_remainder_ok_calcBlockSize + MOVQ $0x00000000, ret+24(FP) + RET + +emit_remainder_ok_calcBlockSize: + MOVQ src_len+8(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_calcBlockSize + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), CX + CMPL CX, $0x3c + JLT one_byte_emit_remainder_calcBlockSize + CMPL CX, $0x00000100 + JLT two_bytes_emit_remainder_calcBlockSize + CMPL CX, $0x00010000 + JLT three_bytes_emit_remainder_calcBlockSize + CMPL CX, $0x01000000 + JLT four_bytes_emit_remainder_calcBlockSize ADDQ $0x05, AX - JMP gen_emit_copy_end + JMP memmove_long_emit_remainder_calcBlockSize -repeat_four_standalone_emit_copy_short: - LEAL -256(DX), DX - MOVW $0x0019, (AX) - MOVW DX, 2(AX) - ADDQ $0x04, BX +four_bytes_emit_remainder_calcBlockSize: ADDQ $0x04, AX - JMP gen_emit_copy_end + JMP memmove_long_emit_remainder_calcBlockSize -repeat_three_standalone_emit_copy_short: - LEAL -4(DX), DX - MOVW $0x0015, (AX) - MOVB DL, 2(AX) - ADDQ $0x03, BX +three_bytes_emit_remainder_calcBlockSize: ADDQ $0x03, AX - JMP gen_emit_copy_end + JMP memmove_long_emit_remainder_calcBlockSize -repeat_two_standalone_emit_copy_short: - SHLL $0x02, DX - ORL $0x01, DX - MOVW DX, (AX) - ADDQ $0x02, BX +two_bytes_emit_remainder_calcBlockSize: ADDQ $0x02, AX - JMP gen_emit_copy_end + CMPL CX, $0x40 + JL memmove_emit_remainder_calcBlockSize + JMP memmove_long_emit_remainder_calcBlockSize -repeat_two_offset_standalone_emit_copy_short: - XORQ SI, SI - LEAL 1(SI)(DX*4), DX - MOVB CL, 1(AX) - SARL $0x08, CX - SHLL $0x05, CX - ORL CX, DX - MOVB DL, (AX) - ADDQ $0x02, BX - ADDQ $0x02, AX - JMP gen_emit_copy_end - JMP two_byte_offset_standalone +one_byte_emit_remainder_calcBlockSize: + ADDQ $0x01, AX -two_byte_offset_short_standalone: - CMPL DX, $0x0c - JGE emit_copy_three_standalone - CMPL CX, $0x00000800 - JGE emit_copy_three_standalone - MOVB $0x01, SI - LEAL -16(SI)(DX*4), DX - MOVB CL, 1(AX) - SHRL $0x08, CX - SHLL $0x05, CX - ORL CX, DX - MOVB DL, (AX) - ADDQ $0x02, BX - ADDQ $0x02, AX - JMP gen_emit_copy_end +memmove_emit_remainder_calcBlockSize: + LEAQ (AX)(SI*1), AX + JMP emit_literal_done_emit_remainder_calcBlockSize -emit_copy_three_standalone: - MOVB $0x02, SI - LEAL -4(SI)(DX*4), DX - MOVB DL, (AX) - MOVW CX, 1(AX) - ADDQ $0x03, BX - ADDQ $0x03, AX +memmove_long_emit_remainder_calcBlockSize: + LEAQ (AX)(SI*1), AX -gen_emit_copy_end: - MOVQ BX, ret+40(FP) +emit_literal_done_emit_remainder_calcBlockSize: + MOVQ AX, ret+24(FP) RET -// func emitCopyNoRepeat(dst []byte, offset int, length int) int -TEXT ·emitCopyNoRepeat(SB), NOSPLIT, $0-48 - XORQ BX, BX - MOVQ dst_base+0(FP), AX - MOVQ offset+24(FP), CX - MOVQ length+32(FP), DX - - // emitCopy - CMPL CX, $0x00010000 - JL two_byte_offset_standalone_snappy - -four_bytes_loop_back_standalone_snappy: - CMPL DX, $0x40 - JLE four_bytes_remain_standalone_snappy - MOVB $0xff, (AX) - MOVL CX, 1(AX) - LEAL -64(DX), DX - ADDQ $0x05, BX - ADDQ $0x05, AX - CMPL DX, $0x04 - JL four_bytes_remain_standalone_snappy - JMP four_bytes_loop_back_standalone_snappy - -four_bytes_remain_standalone_snappy: - TESTL DX, DX - JZ gen_emit_copy_end_snappy - MOVB $0x03, SI - LEAL -4(SI)(DX*4), DX - MOVB DL, (AX) - MOVL CX, 1(AX) - ADDQ $0x05, BX - ADDQ $0x05, AX - JMP gen_emit_copy_end_snappy +// func calcBlockSizeSmall(src []byte) int +// Requires: BMI, SSE2 +TEXT ·calcBlockSizeSmall(SB), $2072-32 + XORQ AX, AX + MOVQ $0x00000010, CX + LEAQ 24(SP), DX + PXOR X0, X0 -two_byte_offset_standalone_snappy: - CMPL DX, $0x40 - JLE two_byte_offset_short_standalone_snappy - MOVB $0xee, (AX) - MOVW CX, 1(AX) - LEAL -60(DX), DX +zero_loop_calcBlockSizeSmall: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_calcBlockSizeSmall + MOVL $0x00000000, 12(SP) + MOVQ src_len+8(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), BX + MOVL BX, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL CX, 16(SP) + MOVQ src_base+0(FP), DX + +search_loop_calcBlockSizeSmall: + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x04, BX + LEAL 4(CX)(BX*1), BX + CMPL BX, 8(SP) + JGE emit_remainder_calcBlockSizeSmall + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x9e3779b1, R8 + MOVQ SI, R9 + MOVQ SI, R10 + SHRQ $0x08, R10 + SHLQ $0x20, R9 + IMULQ R8, R9 + SHRQ $0x37, R9 + SHLQ $0x20, R10 + IMULQ R8, R10 + SHRQ $0x37, R10 + MOVL 24(SP)(R9*4), BX + MOVL 24(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + LEAL 1(CX), R9 + MOVL R9, 24(SP)(R10*4) + MOVQ SI, R9 + SHRQ $0x10, R9 + SHLQ $0x20, R9 + IMULQ R8, R9 + SHRQ $0x37, R9 + MOVL CX, R8 + SUBL 16(SP), R8 + MOVL 1(DX)(R8*1), R10 + MOVQ SI, R8 + SHRQ $0x08, R8 + CMPL R8, R10 + JNE no_repeat_found_calcBlockSizeSmall + LEAL 1(CX), SI + MOVL 12(SP), BX + MOVL SI, DI + SUBL 16(SP), DI + JZ repeat_extend_back_end_calcBlockSizeSmall + +repeat_extend_back_loop_calcBlockSizeSmall: + CMPL SI, BX + JLE repeat_extend_back_end_calcBlockSizeSmall + MOVB -1(DX)(DI*1), R8 + MOVB -1(DX)(SI*1), R9 + CMPB R8, R9 + JNE repeat_extend_back_end_calcBlockSizeSmall + LEAL -1(SI), SI + DECL DI + JNZ repeat_extend_back_loop_calcBlockSizeSmall + +repeat_extend_back_end_calcBlockSizeSmall: + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_repeat_emit_calcBlockSizeSmall + MOVL SI, DI + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R8 + SUBL BX, DI + LEAL -1(DI), BX + CMPL BX, $0x3c + JLT one_byte_repeat_emit_calcBlockSizeSmall + CMPL BX, $0x00000100 + JLT two_bytes_repeat_emit_calcBlockSizeSmall ADDQ $0x03, AX - ADDQ $0x03, BX - JMP two_byte_offset_standalone_snappy + JMP memmove_long_repeat_emit_calcBlockSizeSmall -two_byte_offset_short_standalone_snappy: - CMPL DX, $0x0c - JGE emit_copy_three_standalone_snappy - CMPL CX, $0x00000800 - JGE emit_copy_three_standalone_snappy - MOVB $0x01, SI - LEAL -16(SI)(DX*4), DX - MOVB CL, 1(AX) - SHRL $0x08, CX - SHLL $0x05, CX - ORL CX, DX - MOVB DL, (AX) - ADDQ $0x02, BX +two_bytes_repeat_emit_calcBlockSizeSmall: ADDQ $0x02, AX - JMP gen_emit_copy_end_snappy + CMPL BX, $0x40 + JL memmove_repeat_emit_calcBlockSizeSmall + JMP memmove_long_repeat_emit_calcBlockSizeSmall -emit_copy_three_standalone_snappy: - MOVB $0x02, SI - LEAL -4(SI)(DX*4), DX - MOVB DL, (AX) - MOVW CX, 1(AX) - ADDQ $0x03, BX - ADDQ $0x03, AX +one_byte_repeat_emit_calcBlockSizeSmall: + ADDQ $0x01, AX -gen_emit_copy_end_snappy: - MOVQ BX, ret+40(FP) - RET +memmove_repeat_emit_calcBlockSizeSmall: + LEAQ (AX)(DI*1), AX + JMP emit_literal_done_repeat_emit_calcBlockSizeSmall -// func matchLen(a []byte, b []byte) int -// Requires: BMI -TEXT ·matchLen(SB), NOSPLIT, $0-56 - MOVQ a_base+0(FP), AX - MOVQ b_base+24(FP), CX - MOVQ a_len+8(FP), DX +memmove_long_repeat_emit_calcBlockSizeSmall: + LEAQ (AX)(DI*1), AX + +emit_literal_done_repeat_emit_calcBlockSizeSmall: + ADDL $0x05, CX + MOVL CX, BX + SUBL 16(SP), BX + MOVQ src_len+8(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), BX // matchLen - XORL SI, SI - CMPL DX, $0x08 - JL matchlen_match4_standalone + XORL R10, R10 + CMPL DI, $0x08 + JL matchlen_match4_repeat_extend_calcBlockSizeSmall -matchlen_loopback_standalone: - MOVQ (AX)(SI*1), BX - XORQ (CX)(SI*1), BX - TESTQ BX, BX - JZ matchlen_loop_standalone +matchlen_loopback_repeat_extend_calcBlockSizeSmall: + MOVQ (R8)(R10*1), R9 + XORQ (BX)(R10*1), R9 + TESTQ R9, R9 + JZ matchlen_loop_repeat_extend_calcBlockSizeSmall #ifdef GOAMD64_v3 - TZCNTQ BX, BX + TZCNTQ R9, R9 #else - BSFQ BX, BX + BSFQ R9, R9 #endif - SARQ $0x03, BX - LEAL (SI)(BX*1), SI - JMP gen_match_len_end + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP repeat_extend_forward_end_calcBlockSizeSmall -matchlen_loop_standalone: - LEAL -8(DX), DX - LEAL 8(SI), SI - CMPL DX, $0x08 - JGE matchlen_loopback_standalone - JZ gen_match_len_end +matchlen_loop_repeat_extend_calcBlockSizeSmall: + LEAL -8(DI), DI + LEAL 8(R10), R10 + CMPL DI, $0x08 + JGE matchlen_loopback_repeat_extend_calcBlockSizeSmall + JZ repeat_extend_forward_end_calcBlockSizeSmall -matchlen_match4_standalone: - CMPL DX, $0x04 - JL matchlen_match2_standalone - MOVL (AX)(SI*1), BX - CMPL (CX)(SI*1), BX - JNE matchlen_match2_standalone - SUBL $0x04, DX - LEAL 4(SI), SI +matchlen_match4_repeat_extend_calcBlockSizeSmall: + CMPL DI, $0x04 + JL matchlen_match2_repeat_extend_calcBlockSizeSmall + MOVL (R8)(R10*1), R9 + CMPL (BX)(R10*1), R9 + JNE matchlen_match2_repeat_extend_calcBlockSizeSmall + SUBL $0x04, DI + LEAL 4(R10), R10 + +matchlen_match2_repeat_extend_calcBlockSizeSmall: + CMPL DI, $0x02 + JL matchlen_match1_repeat_extend_calcBlockSizeSmall + MOVW (R8)(R10*1), R9 + CMPW (BX)(R10*1), R9 + JNE matchlen_match1_repeat_extend_calcBlockSizeSmall + SUBL $0x02, DI + LEAL 2(R10), R10 + +matchlen_match1_repeat_extend_calcBlockSizeSmall: + CMPL DI, $0x01 + JL repeat_extend_forward_end_calcBlockSizeSmall + MOVB (R8)(R10*1), R9 + CMPB (BX)(R10*1), R9 + JNE repeat_extend_forward_end_calcBlockSizeSmall + LEAL 1(R10), R10 + +repeat_extend_forward_end_calcBlockSizeSmall: + ADDL R10, CX + MOVL CX, BX + SUBL SI, BX + MOVL 16(SP), SI + + // emitCopy +two_byte_offset_repeat_as_copy_calcBlockSizeSmall: + CMPL BX, $0x40 + JLE two_byte_offset_short_repeat_as_copy_calcBlockSizeSmall + LEAL -60(BX), BX + ADDQ $0x03, AX + JMP two_byte_offset_repeat_as_copy_calcBlockSizeSmall + +two_byte_offset_short_repeat_as_copy_calcBlockSizeSmall: + MOVL BX, SI + SHLL $0x02, SI + CMPL BX, $0x0c + JGE emit_copy_three_repeat_as_copy_calcBlockSizeSmall + ADDQ $0x02, AX + JMP repeat_end_emit_calcBlockSizeSmall + +emit_copy_three_repeat_as_copy_calcBlockSizeSmall: + ADDQ $0x03, AX + +repeat_end_emit_calcBlockSizeSmall: + MOVL CX, 12(SP) + JMP search_loop_calcBlockSizeSmall + +no_repeat_found_calcBlockSizeSmall: + CMPL (DX)(BX*1), SI + JEQ candidate_match_calcBlockSizeSmall + SHRQ $0x08, SI + MOVL 24(SP)(R9*4), BX + LEAL 2(CX), R8 + CMPL (DX)(DI*1), SI + JEQ candidate2_match_calcBlockSizeSmall + MOVL R8, 24(SP)(R9*4) + SHRQ $0x08, SI + CMPL (DX)(BX*1), SI + JEQ candidate3_match_calcBlockSizeSmall + MOVL 20(SP), CX + JMP search_loop_calcBlockSizeSmall + +candidate3_match_calcBlockSizeSmall: + ADDL $0x02, CX + JMP candidate_match_calcBlockSizeSmall + +candidate2_match_calcBlockSizeSmall: + MOVL R8, 24(SP)(R9*4) + INCL CX + MOVL DI, BX + +candidate_match_calcBlockSizeSmall: + MOVL 12(SP), SI + TESTL BX, BX + JZ match_extend_back_end_calcBlockSizeSmall + +match_extend_back_loop_calcBlockSizeSmall: + CMPL CX, SI + JLE match_extend_back_end_calcBlockSizeSmall + MOVB -1(DX)(BX*1), DI + MOVB -1(DX)(CX*1), R8 + CMPB DI, R8 + JNE match_extend_back_end_calcBlockSizeSmall + LEAL -1(CX), CX + DECL BX + JZ match_extend_back_end_calcBlockSizeSmall + JMP match_extend_back_loop_calcBlockSizeSmall + +match_extend_back_end_calcBlockSizeSmall: + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 3(AX)(SI*1), SI + CMPQ SI, (SP) + JL match_dst_size_check_calcBlockSizeSmall + MOVQ $0x00000000, ret+24(FP) + RET + +match_dst_size_check_calcBlockSizeSmall: + MOVL CX, SI + MOVL 12(SP), DI + CMPL DI, SI + JEQ emit_literal_done_match_emit_calcBlockSizeSmall + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(DI*1), SI + SUBL DI, R8 + LEAL -1(R8), SI + CMPL SI, $0x3c + JLT one_byte_match_emit_calcBlockSizeSmall + CMPL SI, $0x00000100 + JLT two_bytes_match_emit_calcBlockSizeSmall + ADDQ $0x03, AX + JMP memmove_long_match_emit_calcBlockSizeSmall + +two_bytes_match_emit_calcBlockSizeSmall: + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_match_emit_calcBlockSizeSmall + JMP memmove_long_match_emit_calcBlockSizeSmall + +one_byte_match_emit_calcBlockSizeSmall: + ADDQ $0x01, AX + +memmove_match_emit_calcBlockSizeSmall: + LEAQ (AX)(R8*1), AX + JMP emit_literal_done_match_emit_calcBlockSizeSmall + +memmove_long_match_emit_calcBlockSizeSmall: + LEAQ (AX)(R8*1), AX + +emit_literal_done_match_emit_calcBlockSizeSmall: +match_nolit_loop_calcBlockSizeSmall: + MOVL CX, SI + SUBL BX, SI + MOVL SI, 16(SP) + ADDL $0x04, CX + ADDL $0x04, BX + MOVQ src_len+8(FP), SI + SUBL CX, SI + LEAQ (DX)(CX*1), DI + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R9, R9 + CMPL SI, $0x08 + JL matchlen_match4_match_nolit_calcBlockSizeSmall + +matchlen_loopback_match_nolit_calcBlockSizeSmall: + MOVQ (DI)(R9*1), R8 + XORQ (BX)(R9*1), R8 + TESTQ R8, R8 + JZ matchlen_loop_match_nolit_calcBlockSizeSmall + +#ifdef GOAMD64_v3 + TZCNTQ R8, R8 + +#else + BSFQ R8, R8 + +#endif + SARQ $0x03, R8 + LEAL (R9)(R8*1), R9 + JMP match_nolit_end_calcBlockSizeSmall + +matchlen_loop_match_nolit_calcBlockSizeSmall: + LEAL -8(SI), SI + LEAL 8(R9), R9 + CMPL SI, $0x08 + JGE matchlen_loopback_match_nolit_calcBlockSizeSmall + JZ match_nolit_end_calcBlockSizeSmall + +matchlen_match4_match_nolit_calcBlockSizeSmall: + CMPL SI, $0x04 + JL matchlen_match2_match_nolit_calcBlockSizeSmall + MOVL (DI)(R9*1), R8 + CMPL (BX)(R9*1), R8 + JNE matchlen_match2_match_nolit_calcBlockSizeSmall + SUBL $0x04, SI + LEAL 4(R9), R9 + +matchlen_match2_match_nolit_calcBlockSizeSmall: + CMPL SI, $0x02 + JL matchlen_match1_match_nolit_calcBlockSizeSmall + MOVW (DI)(R9*1), R8 + CMPW (BX)(R9*1), R8 + JNE matchlen_match1_match_nolit_calcBlockSizeSmall + SUBL $0x02, SI + LEAL 2(R9), R9 + +matchlen_match1_match_nolit_calcBlockSizeSmall: + CMPL SI, $0x01 + JL match_nolit_end_calcBlockSizeSmall + MOVB (DI)(R9*1), R8 + CMPB (BX)(R9*1), R8 + JNE match_nolit_end_calcBlockSizeSmall + LEAL 1(R9), R9 + +match_nolit_end_calcBlockSizeSmall: + ADDL R9, CX + MOVL 16(SP), BX + ADDL $0x04, R9 + MOVL CX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_calcBlockSizeSmall: + CMPL R9, $0x40 + JLE two_byte_offset_short_match_nolit_calcBlockSizeSmall + LEAL -60(R9), R9 + ADDQ $0x03, AX + JMP two_byte_offset_match_nolit_calcBlockSizeSmall + +two_byte_offset_short_match_nolit_calcBlockSizeSmall: + MOVL R9, BX + SHLL $0x02, BX + CMPL R9, $0x0c + JGE emit_copy_three_match_nolit_calcBlockSizeSmall + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_calcBlockSizeSmall + +emit_copy_three_match_nolit_calcBlockSizeSmall: + ADDQ $0x03, AX + +match_nolit_emitcopy_end_calcBlockSizeSmall: + CMPL CX, 8(SP) + JGE emit_remainder_calcBlockSizeSmall + MOVQ -2(DX)(CX*1), SI + CMPQ AX, (SP) + JL match_nolit_dst_ok_calcBlockSizeSmall + MOVQ $0x00000000, ret+24(FP) + RET + +match_nolit_dst_ok_calcBlockSizeSmall: + MOVQ $0x9e3779b1, R8 + MOVQ SI, DI + SHRQ $0x10, SI + MOVQ SI, BX + SHLQ $0x20, DI + IMULQ R8, DI + SHRQ $0x37, DI + SHLQ $0x20, BX + IMULQ R8, BX + SHRQ $0x37, BX + LEAL -2(CX), R8 + LEAQ 24(SP)(BX*4), R9 + MOVL (R9), BX + MOVL R8, 24(SP)(DI*4) + MOVL CX, (R9) + CMPL (DX)(BX*1), SI + JEQ match_nolit_loop_calcBlockSizeSmall + INCL CX + JMP search_loop_calcBlockSizeSmall + +emit_remainder_calcBlockSizeSmall: + MOVQ src_len+8(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JL emit_remainder_ok_calcBlockSizeSmall + MOVQ $0x00000000, ret+24(FP) + RET + +emit_remainder_ok_calcBlockSizeSmall: + MOVQ src_len+8(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_calcBlockSizeSmall + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), CX + CMPL CX, $0x3c + JLT one_byte_emit_remainder_calcBlockSizeSmall + CMPL CX, $0x00000100 + JLT two_bytes_emit_remainder_calcBlockSizeSmall + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_calcBlockSizeSmall + +two_bytes_emit_remainder_calcBlockSizeSmall: + ADDQ $0x02, AX + CMPL CX, $0x40 + JL memmove_emit_remainder_calcBlockSizeSmall + JMP memmove_long_emit_remainder_calcBlockSizeSmall + +one_byte_emit_remainder_calcBlockSizeSmall: + ADDQ $0x01, AX + +memmove_emit_remainder_calcBlockSizeSmall: + LEAQ (AX)(SI*1), AX + JMP emit_literal_done_emit_remainder_calcBlockSizeSmall + +memmove_long_emit_remainder_calcBlockSizeSmall: + LEAQ (AX)(SI*1), AX + +emit_literal_done_emit_remainder_calcBlockSizeSmall: + MOVQ AX, ret+24(FP) + RET + +// func emitLiteral(dst []byte, lit []byte) int +// Requires: SSE2 +TEXT ·emitLiteral(SB), NOSPLIT, $0-56 + MOVQ lit_len+32(FP), DX + MOVQ dst_base+0(FP), AX + MOVQ lit_base+24(FP), CX + TESTQ DX, DX + JZ emit_literal_end_standalone_skip + MOVL DX, BX + LEAL -1(DX), SI + CMPL SI, $0x3c + JLT one_byte_standalone + CMPL SI, $0x00000100 + JLT two_bytes_standalone + CMPL SI, $0x00010000 + JLT three_bytes_standalone + CMPL SI, $0x01000000 + JLT four_bytes_standalone + MOVB $0xfc, (AX) + MOVL SI, 1(AX) + ADDQ $0x05, BX + ADDQ $0x05, AX + JMP memmove_long_standalone + +four_bytes_standalone: + MOVL SI, DI + SHRL $0x10, DI + MOVB $0xf8, (AX) + MOVW SI, 1(AX) + MOVB DI, 3(AX) + ADDQ $0x04, BX + ADDQ $0x04, AX + JMP memmove_long_standalone + +three_bytes_standalone: + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, BX + ADDQ $0x03, AX + JMP memmove_long_standalone + +two_bytes_standalone: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + CMPL SI, $0x40 + JL memmove_standalone + JMP memmove_long_standalone + +one_byte_standalone: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, BX + ADDQ $0x01, AX + +memmove_standalone: + // genMemMoveShort + CMPQ DX, $0x03 + JB emit_lit_memmove_standalone_memmove_move_1or2 + JE emit_lit_memmove_standalone_memmove_move_3 + CMPQ DX, $0x08 + JB emit_lit_memmove_standalone_memmove_move_4through7 + CMPQ DX, $0x10 + JBE emit_lit_memmove_standalone_memmove_move_8through16 + CMPQ DX, $0x20 + JBE emit_lit_memmove_standalone_memmove_move_17through32 + JMP emit_lit_memmove_standalone_memmove_move_33through64 + +emit_lit_memmove_standalone_memmove_move_1or2: + MOVB (CX), SI + MOVB -1(CX)(DX*1), CL + MOVB SI, (AX) + MOVB CL, -1(AX)(DX*1) + JMP emit_literal_end_standalone + +emit_lit_memmove_standalone_memmove_move_3: + MOVW (CX), SI + MOVB 2(CX), CL + MOVW SI, (AX) + MOVB CL, 2(AX) + JMP emit_literal_end_standalone + +emit_lit_memmove_standalone_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(DX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(DX*1) + JMP emit_literal_end_standalone + +emit_lit_memmove_standalone_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(DX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(DX*1) + JMP emit_literal_end_standalone + +emit_lit_memmove_standalone_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(DX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(DX*1) + JMP emit_literal_end_standalone + +emit_lit_memmove_standalone_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(DX*1), X2 + MOVOU -16(CX)(DX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(DX*1) + MOVOU X3, -16(AX)(DX*1) + JMP emit_literal_end_standalone + JMP emit_literal_end_standalone + +memmove_long_standalone: + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(DX*1), X2 + MOVOU -16(CX)(DX*1), X3 + MOVQ DX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_standalonelarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_standalonelarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_standalonelarge_big_loop_back + +emit_lit_memmove_long_standalonelarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ DX, R8 + JAE emit_lit_memmove_long_standalonelarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(DX*1) + MOVOU X3, -16(AX)(DX*1) + JMP emit_literal_end_standalone + JMP emit_literal_end_standalone + +emit_literal_end_standalone_skip: + XORQ BX, BX + +emit_literal_end_standalone: + MOVQ BX, ret+48(FP) + RET + +// func emitRepeat(dst []byte, offset int, length int) int +TEXT ·emitRepeat(SB), NOSPLIT, $0-48 + XORQ BX, BX + MOVQ dst_base+0(FP), AX + MOVQ offset+24(FP), CX + MOVQ length+32(FP), DX + + // emitRepeat +emit_repeat_again_standalone: + MOVL DX, SI + LEAL -4(DX), DX + CMPL SI, $0x08 + JLE repeat_two_standalone + CMPL SI, $0x0c + JGE cant_repeat_two_offset_standalone + CMPL CX, $0x00000800 + JLT repeat_two_offset_standalone + +cant_repeat_two_offset_standalone: + CMPL DX, $0x00000104 + JLT repeat_three_standalone + CMPL DX, $0x00010100 + JLT repeat_four_standalone + CMPL DX, $0x0100ffff + JLT repeat_five_standalone + LEAL -16842747(DX), DX + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + ADDQ $0x05, BX + JMP emit_repeat_again_standalone + +repeat_five_standalone: + LEAL -65536(DX), DX + MOVL DX, CX + MOVW $0x001d, (AX) + MOVW DX, 2(AX) + SARL $0x10, CX + MOVB CL, 4(AX) + ADDQ $0x05, BX + ADDQ $0x05, AX + JMP gen_emit_repeat_end + +repeat_four_standalone: + LEAL -256(DX), DX + MOVW $0x0019, (AX) + MOVW DX, 2(AX) + ADDQ $0x04, BX + ADDQ $0x04, AX + JMP gen_emit_repeat_end + +repeat_three_standalone: + LEAL -4(DX), DX + MOVW $0x0015, (AX) + MOVB DL, 2(AX) + ADDQ $0x03, BX + ADDQ $0x03, AX + JMP gen_emit_repeat_end + +repeat_two_standalone: + SHLL $0x02, DX + ORL $0x01, DX + MOVW DX, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_repeat_end + +repeat_two_offset_standalone: + XORQ SI, SI + LEAL 1(SI)(DX*4), DX + MOVB CL, 1(AX) + SARL $0x08, CX + SHLL $0x05, CX + ORL CX, DX + MOVB DL, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + +gen_emit_repeat_end: + MOVQ BX, ret+40(FP) + RET + +// func emitCopy(dst []byte, offset int, length int) int +TEXT ·emitCopy(SB), NOSPLIT, $0-48 + XORQ BX, BX + MOVQ dst_base+0(FP), AX + MOVQ offset+24(FP), CX + MOVQ length+32(FP), DX + + // emitCopy + CMPL CX, $0x00010000 + JL two_byte_offset_standalone + CMPL DX, $0x40 + JLE four_bytes_remain_standalone + MOVB $0xff, (AX) + MOVL CX, 1(AX) + LEAL -64(DX), DX + ADDQ $0x05, BX + ADDQ $0x05, AX + CMPL DX, $0x04 + JL four_bytes_remain_standalone + + // emitRepeat +emit_repeat_again_standalone_emit_copy: + MOVL DX, SI + LEAL -4(DX), DX + CMPL SI, $0x08 + JLE repeat_two_standalone_emit_copy + CMPL SI, $0x0c + JGE cant_repeat_two_offset_standalone_emit_copy + CMPL CX, $0x00000800 + JLT repeat_two_offset_standalone_emit_copy + +cant_repeat_two_offset_standalone_emit_copy: + CMPL DX, $0x00000104 + JLT repeat_three_standalone_emit_copy + CMPL DX, $0x00010100 + JLT repeat_four_standalone_emit_copy + CMPL DX, $0x0100ffff + JLT repeat_five_standalone_emit_copy + LEAL -16842747(DX), DX + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + ADDQ $0x05, BX + JMP emit_repeat_again_standalone_emit_copy + +repeat_five_standalone_emit_copy: + LEAL -65536(DX), DX + MOVL DX, CX + MOVW $0x001d, (AX) + MOVW DX, 2(AX) + SARL $0x10, CX + MOVB CL, 4(AX) + ADDQ $0x05, BX + ADDQ $0x05, AX + JMP gen_emit_copy_end + +repeat_four_standalone_emit_copy: + LEAL -256(DX), DX + MOVW $0x0019, (AX) + MOVW DX, 2(AX) + ADDQ $0x04, BX + ADDQ $0x04, AX + JMP gen_emit_copy_end + +repeat_three_standalone_emit_copy: + LEAL -4(DX), DX + MOVW $0x0015, (AX) + MOVB DL, 2(AX) + ADDQ $0x03, BX + ADDQ $0x03, AX + JMP gen_emit_copy_end + +repeat_two_standalone_emit_copy: + SHLL $0x02, DX + ORL $0x01, DX + MOVW DX, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end + +repeat_two_offset_standalone_emit_copy: + XORQ SI, SI + LEAL 1(SI)(DX*4), DX + MOVB CL, 1(AX) + SARL $0x08, CX + SHLL $0x05, CX + ORL CX, DX + MOVB DL, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end + +four_bytes_remain_standalone: + TESTL DX, DX + JZ gen_emit_copy_end + XORL SI, SI + LEAL -1(SI)(DX*4), DX + MOVB DL, (AX) + MOVL CX, 1(AX) + ADDQ $0x05, BX + ADDQ $0x05, AX + JMP gen_emit_copy_end + +two_byte_offset_standalone: + CMPL DX, $0x40 + JLE two_byte_offset_short_standalone + CMPL CX, $0x00000800 + JAE long_offset_short_standalone + MOVL $0x00000001, SI + LEAL 16(SI), SI + MOVB CL, 1(AX) + MOVL CX, DI + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + SUBL $0x08, DX + + // emitRepeat + LEAL -4(DX), DX + JMP cant_repeat_two_offset_standalone_emit_copy_short_2b + +emit_repeat_again_standalone_emit_copy_short_2b: + MOVL DX, SI + LEAL -4(DX), DX + CMPL SI, $0x08 + JLE repeat_two_standalone_emit_copy_short_2b + CMPL SI, $0x0c + JGE cant_repeat_two_offset_standalone_emit_copy_short_2b + CMPL CX, $0x00000800 + JLT repeat_two_offset_standalone_emit_copy_short_2b + +cant_repeat_two_offset_standalone_emit_copy_short_2b: + CMPL DX, $0x00000104 + JLT repeat_three_standalone_emit_copy_short_2b + CMPL DX, $0x00010100 + JLT repeat_four_standalone_emit_copy_short_2b + CMPL DX, $0x0100ffff + JLT repeat_five_standalone_emit_copy_short_2b + LEAL -16842747(DX), DX + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + ADDQ $0x05, BX + JMP emit_repeat_again_standalone_emit_copy_short_2b + +repeat_five_standalone_emit_copy_short_2b: + LEAL -65536(DX), DX + MOVL DX, CX + MOVW $0x001d, (AX) + MOVW DX, 2(AX) + SARL $0x10, CX + MOVB CL, 4(AX) + ADDQ $0x05, BX + ADDQ $0x05, AX + JMP gen_emit_copy_end + +repeat_four_standalone_emit_copy_short_2b: + LEAL -256(DX), DX + MOVW $0x0019, (AX) + MOVW DX, 2(AX) + ADDQ $0x04, BX + ADDQ $0x04, AX + JMP gen_emit_copy_end + +repeat_three_standalone_emit_copy_short_2b: + LEAL -4(DX), DX + MOVW $0x0015, (AX) + MOVB DL, 2(AX) + ADDQ $0x03, BX + ADDQ $0x03, AX + JMP gen_emit_copy_end + +repeat_two_standalone_emit_copy_short_2b: + SHLL $0x02, DX + ORL $0x01, DX + MOVW DX, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end + +repeat_two_offset_standalone_emit_copy_short_2b: + XORQ SI, SI + LEAL 1(SI)(DX*4), DX + MOVB CL, 1(AX) + SARL $0x08, CX + SHLL $0x05, CX + ORL CX, DX + MOVB DL, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end + +long_offset_short_standalone: + MOVB $0xee, (AX) + MOVW CX, 1(AX) + LEAL -60(DX), DX + ADDQ $0x03, AX + ADDQ $0x03, BX + + // emitRepeat +emit_repeat_again_standalone_emit_copy_short: + MOVL DX, SI + LEAL -4(DX), DX + CMPL SI, $0x08 + JLE repeat_two_standalone_emit_copy_short + CMPL SI, $0x0c + JGE cant_repeat_two_offset_standalone_emit_copy_short + CMPL CX, $0x00000800 + JLT repeat_two_offset_standalone_emit_copy_short + +cant_repeat_two_offset_standalone_emit_copy_short: + CMPL DX, $0x00000104 + JLT repeat_three_standalone_emit_copy_short + CMPL DX, $0x00010100 + JLT repeat_four_standalone_emit_copy_short + CMPL DX, $0x0100ffff + JLT repeat_five_standalone_emit_copy_short + LEAL -16842747(DX), DX + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + ADDQ $0x05, BX + JMP emit_repeat_again_standalone_emit_copy_short + +repeat_five_standalone_emit_copy_short: + LEAL -65536(DX), DX + MOVL DX, CX + MOVW $0x001d, (AX) + MOVW DX, 2(AX) + SARL $0x10, CX + MOVB CL, 4(AX) + ADDQ $0x05, BX + ADDQ $0x05, AX + JMP gen_emit_copy_end + +repeat_four_standalone_emit_copy_short: + LEAL -256(DX), DX + MOVW $0x0019, (AX) + MOVW DX, 2(AX) + ADDQ $0x04, BX + ADDQ $0x04, AX + JMP gen_emit_copy_end + +repeat_three_standalone_emit_copy_short: + LEAL -4(DX), DX + MOVW $0x0015, (AX) + MOVB DL, 2(AX) + ADDQ $0x03, BX + ADDQ $0x03, AX + JMP gen_emit_copy_end + +repeat_two_standalone_emit_copy_short: + SHLL $0x02, DX + ORL $0x01, DX + MOVW DX, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end + +repeat_two_offset_standalone_emit_copy_short: + XORQ SI, SI + LEAL 1(SI)(DX*4), DX + MOVB CL, 1(AX) + SARL $0x08, CX + SHLL $0x05, CX + ORL CX, DX + MOVB DL, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end + +two_byte_offset_short_standalone: + MOVL DX, SI + SHLL $0x02, SI + CMPL DX, $0x0c + JGE emit_copy_three_standalone + CMPL CX, $0x00000800 + JGE emit_copy_three_standalone + LEAL -15(SI), SI + MOVB CL, 1(AX) + SHRL $0x08, CX + SHLL $0x05, CX + ORL CX, SI + MOVB SI, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end + +emit_copy_three_standalone: + LEAL -2(SI), SI + MOVB SI, (AX) + MOVW CX, 1(AX) + ADDQ $0x03, BX + ADDQ $0x03, AX + +gen_emit_copy_end: + MOVQ BX, ret+40(FP) + RET + +// func emitCopyNoRepeat(dst []byte, offset int, length int) int +TEXT ·emitCopyNoRepeat(SB), NOSPLIT, $0-48 + XORQ BX, BX + MOVQ dst_base+0(FP), AX + MOVQ offset+24(FP), CX + MOVQ length+32(FP), DX + + // emitCopy + CMPL CX, $0x00010000 + JL two_byte_offset_standalone_snappy + +four_bytes_loop_back_standalone_snappy: + CMPL DX, $0x40 + JLE four_bytes_remain_standalone_snappy + MOVB $0xff, (AX) + MOVL CX, 1(AX) + LEAL -64(DX), DX + ADDQ $0x05, BX + ADDQ $0x05, AX + CMPL DX, $0x04 + JL four_bytes_remain_standalone_snappy + JMP four_bytes_loop_back_standalone_snappy + +four_bytes_remain_standalone_snappy: + TESTL DX, DX + JZ gen_emit_copy_end_snappy + XORL SI, SI + LEAL -1(SI)(DX*4), DX + MOVB DL, (AX) + MOVL CX, 1(AX) + ADDQ $0x05, BX + ADDQ $0x05, AX + JMP gen_emit_copy_end_snappy + +two_byte_offset_standalone_snappy: + CMPL DX, $0x40 + JLE two_byte_offset_short_standalone_snappy + MOVB $0xee, (AX) + MOVW CX, 1(AX) + LEAL -60(DX), DX + ADDQ $0x03, AX + ADDQ $0x03, BX + JMP two_byte_offset_standalone_snappy + +two_byte_offset_short_standalone_snappy: + MOVL DX, SI + SHLL $0x02, SI + CMPL DX, $0x0c + JGE emit_copy_three_standalone_snappy + CMPL CX, $0x00000800 + JGE emit_copy_three_standalone_snappy + LEAL -15(SI), SI + MOVB CL, 1(AX) + SHRL $0x08, CX + SHLL $0x05, CX + ORL CX, SI + MOVB SI, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end_snappy + +emit_copy_three_standalone_snappy: + LEAL -2(SI), SI + MOVB SI, (AX) + MOVW CX, 1(AX) + ADDQ $0x03, BX + ADDQ $0x03, AX + +gen_emit_copy_end_snappy: + MOVQ BX, ret+40(FP) + RET + +// func matchLen(a []byte, b []byte) int +// Requires: BMI +TEXT ·matchLen(SB), NOSPLIT, $0-56 + MOVQ a_base+0(FP), AX + MOVQ b_base+24(FP), CX + MOVQ a_len+8(FP), DX + + // matchLen + XORL SI, SI + CMPL DX, $0x08 + JL matchlen_match4_standalone + +matchlen_loopback_standalone: + MOVQ (AX)(SI*1), BX + XORQ (CX)(SI*1), BX + TESTQ BX, BX + JZ matchlen_loop_standalone + +#ifdef GOAMD64_v3 + TZCNTQ BX, BX + +#else + BSFQ BX, BX + +#endif + SARQ $0x03, BX + LEAL (SI)(BX*1), SI + JMP gen_match_len_end + +matchlen_loop_standalone: + LEAL -8(DX), DX + LEAL 8(SI), SI + CMPL DX, $0x08 + JGE matchlen_loopback_standalone + JZ gen_match_len_end + +matchlen_match4_standalone: + CMPL DX, $0x04 + JL matchlen_match2_standalone + MOVL (AX)(SI*1), BX + CMPL (CX)(SI*1), BX + JNE matchlen_match2_standalone + SUBL $0x04, DX + LEAL 4(SI), SI + +matchlen_match2_standalone: + CMPL DX, $0x02 + JL matchlen_match1_standalone + MOVW (AX)(SI*1), BX + CMPW (CX)(SI*1), BX + JNE matchlen_match1_standalone + SUBL $0x02, DX + LEAL 2(SI), SI + +matchlen_match1_standalone: + CMPL DX, $0x01 + JL gen_match_len_end + MOVB (AX)(SI*1), BL + CMPB (CX)(SI*1), BL + JNE gen_match_len_end + LEAL 1(SI), SI + +gen_match_len_end: + MOVQ SI, ret+48(FP) + RET + +// func cvtLZ4BlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) +// Requires: SSE2 +TEXT ·cvtLZ4BlockAsm(SB), NOSPLIT, $0-64 + XORQ SI, SI + MOVQ dst_base+0(FP), AX + MOVQ dst_len+8(FP), CX + MOVQ src_base+24(FP), DX + MOVQ src_len+32(FP), BX + LEAQ (DX)(BX*1), BX + LEAQ -10(AX)(CX*1), CX + XORQ DI, DI + +lz4_s2_loop: + CMPQ DX, BX + JAE lz4_s2_corrupt + CMPQ AX, CX + JAE lz4_s2_dstfull + MOVBQZX (DX), R8 + MOVQ R8, R9 + MOVQ R8, R10 + SHRQ $0x04, R9 + ANDQ $0x0f, R10 + CMPQ R8, $0xf0 + JB lz4_s2_ll_end + +lz4_s2_ll_loop: + INCQ DX + CMPQ DX, BX + JAE lz4_s2_corrupt + MOVBQZX (DX), R8 + ADDQ R8, R9 + CMPQ R8, $0xff + JEQ lz4_s2_ll_loop + +lz4_s2_ll_end: + LEAQ (DX)(R9*1), R8 + ADDQ $0x04, R10 + CMPQ R8, BX + JAE lz4_s2_corrupt + INCQ DX + INCQ R8 + TESTQ R9, R9 + JZ lz4_s2_lits_done + LEAQ (AX)(R9*1), R11 + CMPQ R11, CX + JAE lz4_s2_dstfull + ADDQ R9, SI + LEAL -1(R9), R11 + CMPL R11, $0x3c + JLT one_byte_lz4_s2 + CMPL R11, $0x00000100 + JLT two_bytes_lz4_s2 + CMPL R11, $0x00010000 + JLT three_bytes_lz4_s2 + CMPL R11, $0x01000000 + JLT four_bytes_lz4_s2 + MOVB $0xfc, (AX) + MOVL R11, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_lz4_s2 + +four_bytes_lz4_s2: + MOVL R11, R12 + SHRL $0x10, R12 + MOVB $0xf8, (AX) + MOVW R11, 1(AX) + MOVB R12, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_lz4_s2 + +three_bytes_lz4_s2: + MOVB $0xf4, (AX) + MOVW R11, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_lz4_s2 + +two_bytes_lz4_s2: + MOVB $0xf0, (AX) + MOVB R11, 1(AX) + ADDQ $0x02, AX + CMPL R11, $0x40 + JL memmove_lz4_s2 + JMP memmove_long_lz4_s2 + +one_byte_lz4_s2: + SHLB $0x02, R11 + MOVB R11, (AX) + ADDQ $0x01, AX + +memmove_lz4_s2: + LEAQ (AX)(R9*1), R11 + + // genMemMoveShort + CMPQ R9, $0x08 + JLE emit_lit_memmove_lz4_s2_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_lz4_s2_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_lz4_s2_memmove_move_17through32 + JMP emit_lit_memmove_lz4_s2_memmove_move_33through64 + +emit_lit_memmove_lz4_s2_memmove_move_8: + MOVQ (DX), R12 + MOVQ R12, (AX) + JMP memmove_end_copy_lz4_s2 + +emit_lit_memmove_lz4_s2_memmove_move_8through16: + MOVQ (DX), R12 + MOVQ -8(DX)(R9*1), DX + MOVQ R12, (AX) + MOVQ DX, -8(AX)(R9*1) + JMP memmove_end_copy_lz4_s2 + +emit_lit_memmove_lz4_s2_memmove_move_17through32: + MOVOU (DX), X0 + MOVOU -16(DX)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_lz4_s2 + +emit_lit_memmove_lz4_s2_memmove_move_33through64: + MOVOU (DX), X0 + MOVOU 16(DX), X1 + MOVOU -32(DX)(R9*1), X2 + MOVOU -16(DX)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_lz4_s2: + MOVQ R11, AX + JMP lz4_s2_lits_emit_done + +memmove_long_lz4_s2: + LEAQ (AX)(R9*1), R11 + + // genMemMoveLong + MOVOU (DX), X0 + MOVOU 16(DX), X1 + MOVOU -32(DX)(R9*1), X2 + MOVOU -16(DX)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ AX, R12 + ANDL $0x0000001f, R12 + MOVQ $0x00000040, R14 + SUBQ R12, R14 + DECQ R13 + JA emit_lit_memmove_long_lz4_s2large_forward_sse_loop_32 + LEAQ -32(DX)(R14*1), R12 + LEAQ -32(AX)(R14*1), R15 + +emit_lit_memmove_long_lz4_s2large_big_loop_back: + MOVOU (R12), X4 + MOVOU 16(R12), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R12 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_lz4_s2large_big_loop_back + +emit_lit_memmove_long_lz4_s2large_forward_sse_loop_32: + MOVOU -32(DX)(R14*1), X4 + MOVOU -16(DX)(R14*1), X5 + MOVOA X4, -32(AX)(R14*1) + MOVOA X5, -16(AX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_lz4_s2large_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ R11, AX + +lz4_s2_lits_emit_done: + MOVQ R8, DX + +lz4_s2_lits_done: + CMPQ DX, BX + JNE lz4_s2_match + CMPQ R10, $0x04 + JEQ lz4_s2_done + JMP lz4_s2_corrupt + +lz4_s2_match: + LEAQ 2(DX), R8 + CMPQ R8, BX + JAE lz4_s2_corrupt + MOVWQZX (DX), R9 + MOVQ R8, DX + TESTQ R9, R9 + JZ lz4_s2_corrupt + CMPQ R9, SI + JA lz4_s2_corrupt + CMPQ R10, $0x13 + JNE lz4_s2_ml_done + +lz4_s2_ml_loop: + MOVBQZX (DX), R8 + INCQ DX + ADDQ R8, R10 + CMPQ DX, BX + JAE lz4_s2_corrupt + CMPQ R8, $0xff + JEQ lz4_s2_ml_loop + +lz4_s2_ml_done: + ADDQ R10, SI + CMPQ R9, DI + JNE lz4_s2_docopy + + // emitRepeat +emit_repeat_again_lz4_s2: + MOVL R10, R8 + LEAL -4(R10), R10 + CMPL R8, $0x08 + JLE repeat_two_lz4_s2 + CMPL R8, $0x0c + JGE cant_repeat_two_offset_lz4_s2 + CMPL R9, $0x00000800 + JLT repeat_two_offset_lz4_s2 + +cant_repeat_two_offset_lz4_s2: + CMPL R10, $0x00000104 + JLT repeat_three_lz4_s2 + CMPL R10, $0x00010100 + JLT repeat_four_lz4_s2 + CMPL R10, $0x0100ffff + JLT repeat_five_lz4_s2 + LEAL -16842747(R10), R10 + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_lz4_s2 + +repeat_five_lz4_s2: + LEAL -65536(R10), R10 + MOVL R10, R9 + MOVW $0x001d, (AX) + MOVW R10, 2(AX) + SARL $0x10, R9 + MOVB R9, 4(AX) + ADDQ $0x05, AX + JMP lz4_s2_loop + +repeat_four_lz4_s2: + LEAL -256(R10), R10 + MOVW $0x0019, (AX) + MOVW R10, 2(AX) + ADDQ $0x04, AX + JMP lz4_s2_loop + +repeat_three_lz4_s2: + LEAL -4(R10), R10 + MOVW $0x0015, (AX) + MOVB R10, 2(AX) + ADDQ $0x03, AX + JMP lz4_s2_loop + +repeat_two_lz4_s2: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (AX) + ADDQ $0x02, AX + JMP lz4_s2_loop + +repeat_two_offset_lz4_s2: + XORQ R8, R8 + LEAL 1(R8)(R10*4), R10 + MOVB R9, 1(AX) + SARL $0x08, R9 + SHLL $0x05, R9 + ORL R9, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP lz4_s2_loop + +lz4_s2_docopy: + MOVQ R9, DI + + // emitCopy + CMPL R10, $0x40 + JLE two_byte_offset_short_lz4_s2 + CMPL R9, $0x00000800 + JAE long_offset_short_lz4_s2 + MOVL $0x00000001, R8 + LEAL 16(R8), R8 + MOVB R9, 1(AX) + MOVL R9, R11 + SHRL $0x08, R11 + SHLL $0x05, R11 + ORL R11, R8 + MOVB R8, (AX) + ADDQ $0x02, AX + SUBL $0x08, R10 + + // emitRepeat + LEAL -4(R10), R10 + JMP cant_repeat_two_offset_lz4_s2_emit_copy_short_2b + +emit_repeat_again_lz4_s2_emit_copy_short_2b: + MOVL R10, R8 + LEAL -4(R10), R10 + CMPL R8, $0x08 + JLE repeat_two_lz4_s2_emit_copy_short_2b + CMPL R8, $0x0c + JGE cant_repeat_two_offset_lz4_s2_emit_copy_short_2b + CMPL R9, $0x00000800 + JLT repeat_two_offset_lz4_s2_emit_copy_short_2b + +cant_repeat_two_offset_lz4_s2_emit_copy_short_2b: + CMPL R10, $0x00000104 + JLT repeat_three_lz4_s2_emit_copy_short_2b + CMPL R10, $0x00010100 + JLT repeat_four_lz4_s2_emit_copy_short_2b + CMPL R10, $0x0100ffff + JLT repeat_five_lz4_s2_emit_copy_short_2b + LEAL -16842747(R10), R10 + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_lz4_s2_emit_copy_short_2b + +repeat_five_lz4_s2_emit_copy_short_2b: + LEAL -65536(R10), R10 + MOVL R10, R9 + MOVW $0x001d, (AX) + MOVW R10, 2(AX) + SARL $0x10, R9 + MOVB R9, 4(AX) + ADDQ $0x05, AX + JMP lz4_s2_loop + +repeat_four_lz4_s2_emit_copy_short_2b: + LEAL -256(R10), R10 + MOVW $0x0019, (AX) + MOVW R10, 2(AX) + ADDQ $0x04, AX + JMP lz4_s2_loop + +repeat_three_lz4_s2_emit_copy_short_2b: + LEAL -4(R10), R10 + MOVW $0x0015, (AX) + MOVB R10, 2(AX) + ADDQ $0x03, AX + JMP lz4_s2_loop + +repeat_two_lz4_s2_emit_copy_short_2b: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (AX) + ADDQ $0x02, AX + JMP lz4_s2_loop + +repeat_two_offset_lz4_s2_emit_copy_short_2b: + XORQ R8, R8 + LEAL 1(R8)(R10*4), R10 + MOVB R9, 1(AX) + SARL $0x08, R9 + SHLL $0x05, R9 + ORL R9, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP lz4_s2_loop + +long_offset_short_lz4_s2: + MOVB $0xee, (AX) + MOVW R9, 1(AX) + LEAL -60(R10), R10 + ADDQ $0x03, AX + + // emitRepeat +emit_repeat_again_lz4_s2_emit_copy_short: + MOVL R10, R8 + LEAL -4(R10), R10 + CMPL R8, $0x08 + JLE repeat_two_lz4_s2_emit_copy_short + CMPL R8, $0x0c + JGE cant_repeat_two_offset_lz4_s2_emit_copy_short + CMPL R9, $0x00000800 + JLT repeat_two_offset_lz4_s2_emit_copy_short + +cant_repeat_two_offset_lz4_s2_emit_copy_short: + CMPL R10, $0x00000104 + JLT repeat_three_lz4_s2_emit_copy_short + CMPL R10, $0x00010100 + JLT repeat_four_lz4_s2_emit_copy_short + CMPL R10, $0x0100ffff + JLT repeat_five_lz4_s2_emit_copy_short + LEAL -16842747(R10), R10 + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_lz4_s2_emit_copy_short + +repeat_five_lz4_s2_emit_copy_short: + LEAL -65536(R10), R10 + MOVL R10, R9 + MOVW $0x001d, (AX) + MOVW R10, 2(AX) + SARL $0x10, R9 + MOVB R9, 4(AX) + ADDQ $0x05, AX + JMP lz4_s2_loop + +repeat_four_lz4_s2_emit_copy_short: + LEAL -256(R10), R10 + MOVW $0x0019, (AX) + MOVW R10, 2(AX) + ADDQ $0x04, AX + JMP lz4_s2_loop + +repeat_three_lz4_s2_emit_copy_short: + LEAL -4(R10), R10 + MOVW $0x0015, (AX) + MOVB R10, 2(AX) + ADDQ $0x03, AX + JMP lz4_s2_loop + +repeat_two_lz4_s2_emit_copy_short: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (AX) + ADDQ $0x02, AX + JMP lz4_s2_loop + +repeat_two_offset_lz4_s2_emit_copy_short: + XORQ R8, R8 + LEAL 1(R8)(R10*4), R10 + MOVB R9, 1(AX) + SARL $0x08, R9 + SHLL $0x05, R9 + ORL R9, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP lz4_s2_loop + +two_byte_offset_short_lz4_s2: + MOVL R10, R8 + SHLL $0x02, R8 + CMPL R10, $0x0c + JGE emit_copy_three_lz4_s2 + CMPL R9, $0x00000800 + JGE emit_copy_three_lz4_s2 + LEAL -15(R8), R8 + MOVB R9, 1(AX) + SHRL $0x08, R9 + SHLL $0x05, R9 + ORL R9, R8 + MOVB R8, (AX) + ADDQ $0x02, AX + JMP lz4_s2_loop + +emit_copy_three_lz4_s2: + LEAL -2(R8), R8 + MOVB R8, (AX) + MOVW R9, 1(AX) + ADDQ $0x03, AX + JMP lz4_s2_loop + +lz4_s2_done: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ SI, uncompressed+48(FP) + MOVQ AX, dstUsed+56(FP) + RET + +lz4_s2_corrupt: + XORQ AX, AX + LEAQ -1(AX), SI + MOVQ SI, uncompressed+48(FP) + RET + +lz4_s2_dstfull: + XORQ AX, AX + LEAQ -2(AX), SI + MOVQ SI, uncompressed+48(FP) + RET + +// func cvtLZ4sBlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) +// Requires: SSE2 +TEXT ·cvtLZ4sBlockAsm(SB), NOSPLIT, $0-64 + XORQ SI, SI + MOVQ dst_base+0(FP), AX + MOVQ dst_len+8(FP), CX + MOVQ src_base+24(FP), DX + MOVQ src_len+32(FP), BX + LEAQ (DX)(BX*1), BX + LEAQ -10(AX)(CX*1), CX + XORQ DI, DI + +lz4s_s2_loop: + CMPQ DX, BX + JAE lz4s_s2_corrupt + CMPQ AX, CX + JAE lz4s_s2_dstfull + MOVBQZX (DX), R8 + MOVQ R8, R9 + MOVQ R8, R10 + SHRQ $0x04, R9 + ANDQ $0x0f, R10 + CMPQ R8, $0xf0 + JB lz4s_s2_ll_end + +lz4s_s2_ll_loop: + INCQ DX + CMPQ DX, BX + JAE lz4s_s2_corrupt + MOVBQZX (DX), R8 + ADDQ R8, R9 + CMPQ R8, $0xff + JEQ lz4s_s2_ll_loop + +lz4s_s2_ll_end: + LEAQ (DX)(R9*1), R8 + ADDQ $0x03, R10 + CMPQ R8, BX + JAE lz4s_s2_corrupt + INCQ DX + INCQ R8 + TESTQ R9, R9 + JZ lz4s_s2_lits_done + LEAQ (AX)(R9*1), R11 + CMPQ R11, CX + JAE lz4s_s2_dstfull + ADDQ R9, SI + LEAL -1(R9), R11 + CMPL R11, $0x3c + JLT one_byte_lz4s_s2 + CMPL R11, $0x00000100 + JLT two_bytes_lz4s_s2 + CMPL R11, $0x00010000 + JLT three_bytes_lz4s_s2 + CMPL R11, $0x01000000 + JLT four_bytes_lz4s_s2 + MOVB $0xfc, (AX) + MOVL R11, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_lz4s_s2 + +four_bytes_lz4s_s2: + MOVL R11, R12 + SHRL $0x10, R12 + MOVB $0xf8, (AX) + MOVW R11, 1(AX) + MOVB R12, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_lz4s_s2 + +three_bytes_lz4s_s2: + MOVB $0xf4, (AX) + MOVW R11, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_lz4s_s2 + +two_bytes_lz4s_s2: + MOVB $0xf0, (AX) + MOVB R11, 1(AX) + ADDQ $0x02, AX + CMPL R11, $0x40 + JL memmove_lz4s_s2 + JMP memmove_long_lz4s_s2 + +one_byte_lz4s_s2: + SHLB $0x02, R11 + MOVB R11, (AX) + ADDQ $0x01, AX + +memmove_lz4s_s2: + LEAQ (AX)(R9*1), R11 + + // genMemMoveShort + CMPQ R9, $0x08 + JLE emit_lit_memmove_lz4s_s2_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_lz4s_s2_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_lz4s_s2_memmove_move_17through32 + JMP emit_lit_memmove_lz4s_s2_memmove_move_33through64 + +emit_lit_memmove_lz4s_s2_memmove_move_8: + MOVQ (DX), R12 + MOVQ R12, (AX) + JMP memmove_end_copy_lz4s_s2 + +emit_lit_memmove_lz4s_s2_memmove_move_8through16: + MOVQ (DX), R12 + MOVQ -8(DX)(R9*1), DX + MOVQ R12, (AX) + MOVQ DX, -8(AX)(R9*1) + JMP memmove_end_copy_lz4s_s2 + +emit_lit_memmove_lz4s_s2_memmove_move_17through32: + MOVOU (DX), X0 + MOVOU -16(DX)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_lz4s_s2 + +emit_lit_memmove_lz4s_s2_memmove_move_33through64: + MOVOU (DX), X0 + MOVOU 16(DX), X1 + MOVOU -32(DX)(R9*1), X2 + MOVOU -16(DX)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_lz4s_s2: + MOVQ R11, AX + JMP lz4s_s2_lits_emit_done + +memmove_long_lz4s_s2: + LEAQ (AX)(R9*1), R11 + + // genMemMoveLong + MOVOU (DX), X0 + MOVOU 16(DX), X1 + MOVOU -32(DX)(R9*1), X2 + MOVOU -16(DX)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ AX, R12 + ANDL $0x0000001f, R12 + MOVQ $0x00000040, R14 + SUBQ R12, R14 + DECQ R13 + JA emit_lit_memmove_long_lz4s_s2large_forward_sse_loop_32 + LEAQ -32(DX)(R14*1), R12 + LEAQ -32(AX)(R14*1), R15 + +emit_lit_memmove_long_lz4s_s2large_big_loop_back: + MOVOU (R12), X4 + MOVOU 16(R12), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R12 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_lz4s_s2large_big_loop_back + +emit_lit_memmove_long_lz4s_s2large_forward_sse_loop_32: + MOVOU -32(DX)(R14*1), X4 + MOVOU -16(DX)(R14*1), X5 + MOVOA X4, -32(AX)(R14*1) + MOVOA X5, -16(AX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_lz4s_s2large_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ R11, AX + +lz4s_s2_lits_emit_done: + MOVQ R8, DX + +lz4s_s2_lits_done: + CMPQ DX, BX + JNE lz4s_s2_match + CMPQ R10, $0x03 + JEQ lz4s_s2_done + JMP lz4s_s2_corrupt + +lz4s_s2_match: + CMPQ R10, $0x03 + JEQ lz4s_s2_loop + LEAQ 2(DX), R8 + CMPQ R8, BX + JAE lz4s_s2_corrupt + MOVWQZX (DX), R9 + MOVQ R8, DX + TESTQ R9, R9 + JZ lz4s_s2_corrupt + CMPQ R9, SI + JA lz4s_s2_corrupt + CMPQ R10, $0x12 + JNE lz4s_s2_ml_done + +lz4s_s2_ml_loop: + MOVBQZX (DX), R8 + INCQ DX + ADDQ R8, R10 + CMPQ DX, BX + JAE lz4s_s2_corrupt + CMPQ R8, $0xff + JEQ lz4s_s2_ml_loop + +lz4s_s2_ml_done: + ADDQ R10, SI + CMPQ R9, DI + JNE lz4s_s2_docopy + + // emitRepeat +emit_repeat_again_lz4_s2: + MOVL R10, R8 + LEAL -4(R10), R10 + CMPL R8, $0x08 + JLE repeat_two_lz4_s2 + CMPL R8, $0x0c + JGE cant_repeat_two_offset_lz4_s2 + CMPL R9, $0x00000800 + JLT repeat_two_offset_lz4_s2 + +cant_repeat_two_offset_lz4_s2: + CMPL R10, $0x00000104 + JLT repeat_three_lz4_s2 + CMPL R10, $0x00010100 + JLT repeat_four_lz4_s2 + CMPL R10, $0x0100ffff + JLT repeat_five_lz4_s2 + LEAL -16842747(R10), R10 + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_lz4_s2 + +repeat_five_lz4_s2: + LEAL -65536(R10), R10 + MOVL R10, R9 + MOVW $0x001d, (AX) + MOVW R10, 2(AX) + SARL $0x10, R9 + MOVB R9, 4(AX) + ADDQ $0x05, AX + JMP lz4s_s2_loop + +repeat_four_lz4_s2: + LEAL -256(R10), R10 + MOVW $0x0019, (AX) + MOVW R10, 2(AX) + ADDQ $0x04, AX + JMP lz4s_s2_loop + +repeat_three_lz4_s2: + LEAL -4(R10), R10 + MOVW $0x0015, (AX) + MOVB R10, 2(AX) + ADDQ $0x03, AX + JMP lz4s_s2_loop + +repeat_two_lz4_s2: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (AX) + ADDQ $0x02, AX + JMP lz4s_s2_loop + +repeat_two_offset_lz4_s2: + XORQ R8, R8 + LEAL 1(R8)(R10*4), R10 + MOVB R9, 1(AX) + SARL $0x08, R9 + SHLL $0x05, R9 + ORL R9, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP lz4s_s2_loop + +lz4s_s2_docopy: + MOVQ R9, DI + + // emitCopy + CMPL R10, $0x40 + JLE two_byte_offset_short_lz4_s2 + CMPL R9, $0x00000800 + JAE long_offset_short_lz4_s2 + MOVL $0x00000001, R8 + LEAL 16(R8), R8 + MOVB R9, 1(AX) + MOVL R9, R11 + SHRL $0x08, R11 + SHLL $0x05, R11 + ORL R11, R8 + MOVB R8, (AX) + ADDQ $0x02, AX + SUBL $0x08, R10 + + // emitRepeat + LEAL -4(R10), R10 + JMP cant_repeat_two_offset_lz4_s2_emit_copy_short_2b + +emit_repeat_again_lz4_s2_emit_copy_short_2b: + MOVL R10, R8 + LEAL -4(R10), R10 + CMPL R8, $0x08 + JLE repeat_two_lz4_s2_emit_copy_short_2b + CMPL R8, $0x0c + JGE cant_repeat_two_offset_lz4_s2_emit_copy_short_2b + CMPL R9, $0x00000800 + JLT repeat_two_offset_lz4_s2_emit_copy_short_2b + +cant_repeat_two_offset_lz4_s2_emit_copy_short_2b: + CMPL R10, $0x00000104 + JLT repeat_three_lz4_s2_emit_copy_short_2b + CMPL R10, $0x00010100 + JLT repeat_four_lz4_s2_emit_copy_short_2b + CMPL R10, $0x0100ffff + JLT repeat_five_lz4_s2_emit_copy_short_2b + LEAL -16842747(R10), R10 + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_lz4_s2_emit_copy_short_2b + +repeat_five_lz4_s2_emit_copy_short_2b: + LEAL -65536(R10), R10 + MOVL R10, R9 + MOVW $0x001d, (AX) + MOVW R10, 2(AX) + SARL $0x10, R9 + MOVB R9, 4(AX) + ADDQ $0x05, AX + JMP lz4s_s2_loop + +repeat_four_lz4_s2_emit_copy_short_2b: + LEAL -256(R10), R10 + MOVW $0x0019, (AX) + MOVW R10, 2(AX) + ADDQ $0x04, AX + JMP lz4s_s2_loop + +repeat_three_lz4_s2_emit_copy_short_2b: + LEAL -4(R10), R10 + MOVW $0x0015, (AX) + MOVB R10, 2(AX) + ADDQ $0x03, AX + JMP lz4s_s2_loop + +repeat_two_lz4_s2_emit_copy_short_2b: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (AX) + ADDQ $0x02, AX + JMP lz4s_s2_loop + +repeat_two_offset_lz4_s2_emit_copy_short_2b: + XORQ R8, R8 + LEAL 1(R8)(R10*4), R10 + MOVB R9, 1(AX) + SARL $0x08, R9 + SHLL $0x05, R9 + ORL R9, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP lz4s_s2_loop + +long_offset_short_lz4_s2: + MOVB $0xee, (AX) + MOVW R9, 1(AX) + LEAL -60(R10), R10 + ADDQ $0x03, AX -matchlen_match2_standalone: - CMPL DX, $0x02 - JL matchlen_match1_standalone - MOVW (AX)(SI*1), BX - CMPW (CX)(SI*1), BX - JNE matchlen_match1_standalone - SUBL $0x02, DX - LEAL 2(SI), SI + // emitRepeat +emit_repeat_again_lz4_s2_emit_copy_short: + MOVL R10, R8 + LEAL -4(R10), R10 + CMPL R8, $0x08 + JLE repeat_two_lz4_s2_emit_copy_short + CMPL R8, $0x0c + JGE cant_repeat_two_offset_lz4_s2_emit_copy_short + CMPL R9, $0x00000800 + JLT repeat_two_offset_lz4_s2_emit_copy_short -matchlen_match1_standalone: - CMPL DX, $0x01 - JL gen_match_len_end - MOVB (AX)(SI*1), BL - CMPB (CX)(SI*1), BL - JNE gen_match_len_end - LEAL 1(SI), SI +cant_repeat_two_offset_lz4_s2_emit_copy_short: + CMPL R10, $0x00000104 + JLT repeat_three_lz4_s2_emit_copy_short + CMPL R10, $0x00010100 + JLT repeat_four_lz4_s2_emit_copy_short + CMPL R10, $0x0100ffff + JLT repeat_five_lz4_s2_emit_copy_short + LEAL -16842747(R10), R10 + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_lz4_s2_emit_copy_short -gen_match_len_end: - MOVQ SI, ret+48(FP) +repeat_five_lz4_s2_emit_copy_short: + LEAL -65536(R10), R10 + MOVL R10, R9 + MOVW $0x001d, (AX) + MOVW R10, 2(AX) + SARL $0x10, R9 + MOVB R9, 4(AX) + ADDQ $0x05, AX + JMP lz4s_s2_loop + +repeat_four_lz4_s2_emit_copy_short: + LEAL -256(R10), R10 + MOVW $0x0019, (AX) + MOVW R10, 2(AX) + ADDQ $0x04, AX + JMP lz4s_s2_loop + +repeat_three_lz4_s2_emit_copy_short: + LEAL -4(R10), R10 + MOVW $0x0015, (AX) + MOVB R10, 2(AX) + ADDQ $0x03, AX + JMP lz4s_s2_loop + +repeat_two_lz4_s2_emit_copy_short: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (AX) + ADDQ $0x02, AX + JMP lz4s_s2_loop + +repeat_two_offset_lz4_s2_emit_copy_short: + XORQ R8, R8 + LEAL 1(R8)(R10*4), R10 + MOVB R9, 1(AX) + SARL $0x08, R9 + SHLL $0x05, R9 + ORL R9, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP lz4s_s2_loop + +two_byte_offset_short_lz4_s2: + MOVL R10, R8 + SHLL $0x02, R8 + CMPL R10, $0x0c + JGE emit_copy_three_lz4_s2 + CMPL R9, $0x00000800 + JGE emit_copy_three_lz4_s2 + LEAL -15(R8), R8 + MOVB R9, 1(AX) + SHRL $0x08, R9 + SHLL $0x05, R9 + ORL R9, R8 + MOVB R8, (AX) + ADDQ $0x02, AX + JMP lz4s_s2_loop + +emit_copy_three_lz4_s2: + LEAL -2(R8), R8 + MOVB R8, (AX) + MOVW R9, 1(AX) + ADDQ $0x03, AX + JMP lz4s_s2_loop + +lz4s_s2_done: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ SI, uncompressed+48(FP) + MOVQ AX, dstUsed+56(FP) + RET + +lz4s_s2_corrupt: + XORQ AX, AX + LEAQ -1(AX), SI + MOVQ SI, uncompressed+48(FP) + RET + +lz4s_s2_dstfull: + XORQ AX, AX + LEAQ -2(AX), SI + MOVQ SI, uncompressed+48(FP) + RET + +// func cvtLZ4BlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) +// Requires: SSE2 +TEXT ·cvtLZ4BlockSnappyAsm(SB), NOSPLIT, $0-64 + XORQ SI, SI + MOVQ dst_base+0(FP), AX + MOVQ dst_len+8(FP), CX + MOVQ src_base+24(FP), DX + MOVQ src_len+32(FP), BX + LEAQ (DX)(BX*1), BX + LEAQ -10(AX)(CX*1), CX + +lz4_snappy_loop: + CMPQ DX, BX + JAE lz4_snappy_corrupt + CMPQ AX, CX + JAE lz4_snappy_dstfull + MOVBQZX (DX), DI + MOVQ DI, R8 + MOVQ DI, R9 + SHRQ $0x04, R8 + ANDQ $0x0f, R9 + CMPQ DI, $0xf0 + JB lz4_snappy_ll_end + +lz4_snappy_ll_loop: + INCQ DX + CMPQ DX, BX + JAE lz4_snappy_corrupt + MOVBQZX (DX), DI + ADDQ DI, R8 + CMPQ DI, $0xff + JEQ lz4_snappy_ll_loop + +lz4_snappy_ll_end: + LEAQ (DX)(R8*1), DI + ADDQ $0x04, R9 + CMPQ DI, BX + JAE lz4_snappy_corrupt + INCQ DX + INCQ DI + TESTQ R8, R8 + JZ lz4_snappy_lits_done + LEAQ (AX)(R8*1), R10 + CMPQ R10, CX + JAE lz4_snappy_dstfull + ADDQ R8, SI + LEAL -1(R8), R10 + CMPL R10, $0x3c + JLT one_byte_lz4_snappy + CMPL R10, $0x00000100 + JLT two_bytes_lz4_snappy + CMPL R10, $0x00010000 + JLT three_bytes_lz4_snappy + CMPL R10, $0x01000000 + JLT four_bytes_lz4_snappy + MOVB $0xfc, (AX) + MOVL R10, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_lz4_snappy + +four_bytes_lz4_snappy: + MOVL R10, R11 + SHRL $0x10, R11 + MOVB $0xf8, (AX) + MOVW R10, 1(AX) + MOVB R11, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_lz4_snappy + +three_bytes_lz4_snappy: + MOVB $0xf4, (AX) + MOVW R10, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_lz4_snappy + +two_bytes_lz4_snappy: + MOVB $0xf0, (AX) + MOVB R10, 1(AX) + ADDQ $0x02, AX + CMPL R10, $0x40 + JL memmove_lz4_snappy + JMP memmove_long_lz4_snappy + +one_byte_lz4_snappy: + SHLB $0x02, R10 + MOVB R10, (AX) + ADDQ $0x01, AX + +memmove_lz4_snappy: + LEAQ (AX)(R8*1), R10 + + // genMemMoveShort + CMPQ R8, $0x08 + JLE emit_lit_memmove_lz4_snappy_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_lz4_snappy_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_lz4_snappy_memmove_move_17through32 + JMP emit_lit_memmove_lz4_snappy_memmove_move_33through64 + +emit_lit_memmove_lz4_snappy_memmove_move_8: + MOVQ (DX), R11 + MOVQ R11, (AX) + JMP memmove_end_copy_lz4_snappy + +emit_lit_memmove_lz4_snappy_memmove_move_8through16: + MOVQ (DX), R11 + MOVQ -8(DX)(R8*1), DX + MOVQ R11, (AX) + MOVQ DX, -8(AX)(R8*1) + JMP memmove_end_copy_lz4_snappy + +emit_lit_memmove_lz4_snappy_memmove_move_17through32: + MOVOU (DX), X0 + MOVOU -16(DX)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_lz4_snappy + +emit_lit_memmove_lz4_snappy_memmove_move_33through64: + MOVOU (DX), X0 + MOVOU 16(DX), X1 + MOVOU -32(DX)(R8*1), X2 + MOVOU -16(DX)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_lz4_snappy: + MOVQ R10, AX + JMP lz4_snappy_lits_emit_done + +memmove_long_lz4_snappy: + LEAQ (AX)(R8*1), R10 + + // genMemMoveLong + MOVOU (DX), X0 + MOVOU 16(DX), X1 + MOVOU -32(DX)(R8*1), X2 + MOVOU -16(DX)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R13 + SUBQ R11, R13 + DECQ R12 + JA emit_lit_memmove_long_lz4_snappylarge_forward_sse_loop_32 + LEAQ -32(DX)(R13*1), R11 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_lz4_snappylarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R11 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_lz4_snappylarge_big_loop_back + +emit_lit_memmove_long_lz4_snappylarge_forward_sse_loop_32: + MOVOU -32(DX)(R13*1), X4 + MOVOU -16(DX)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 + JAE emit_lit_memmove_long_lz4_snappylarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ R10, AX + +lz4_snappy_lits_emit_done: + MOVQ DI, DX + +lz4_snappy_lits_done: + CMPQ DX, BX + JNE lz4_snappy_match + CMPQ R9, $0x04 + JEQ lz4_snappy_done + JMP lz4_snappy_corrupt + +lz4_snappy_match: + LEAQ 2(DX), DI + CMPQ DI, BX + JAE lz4_snappy_corrupt + MOVWQZX (DX), R8 + MOVQ DI, DX + TESTQ R8, R8 + JZ lz4_snappy_corrupt + CMPQ R8, SI + JA lz4_snappy_corrupt + CMPQ R9, $0x13 + JNE lz4_snappy_ml_done + +lz4_snappy_ml_loop: + MOVBQZX (DX), DI + INCQ DX + ADDQ DI, R9 + CMPQ DX, BX + JAE lz4_snappy_corrupt + CMPQ DI, $0xff + JEQ lz4_snappy_ml_loop + +lz4_snappy_ml_done: + ADDQ R9, SI + + // emitCopy +two_byte_offset_lz4_s2: + CMPL R9, $0x40 + JLE two_byte_offset_short_lz4_s2 + MOVB $0xee, (AX) + MOVW R8, 1(AX) + LEAL -60(R9), R9 + ADDQ $0x03, AX + CMPQ AX, CX + JAE lz4_snappy_loop + JMP two_byte_offset_lz4_s2 + +two_byte_offset_short_lz4_s2: + MOVL R9, DI + SHLL $0x02, DI + CMPL R9, $0x0c + JGE emit_copy_three_lz4_s2 + CMPL R8, $0x00000800 + JGE emit_copy_three_lz4_s2 + LEAL -15(DI), DI + MOVB R8, 1(AX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, DI + MOVB DI, (AX) + ADDQ $0x02, AX + JMP lz4_snappy_loop + +emit_copy_three_lz4_s2: + LEAL -2(DI), DI + MOVB DI, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + JMP lz4_snappy_loop + +lz4_snappy_done: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ SI, uncompressed+48(FP) + MOVQ AX, dstUsed+56(FP) + RET + +lz4_snappy_corrupt: + XORQ AX, AX + LEAQ -1(AX), SI + MOVQ SI, uncompressed+48(FP) + RET + +lz4_snappy_dstfull: + XORQ AX, AX + LEAQ -2(AX), SI + MOVQ SI, uncompressed+48(FP) + RET + +// func cvtLZ4sBlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) +// Requires: SSE2 +TEXT ·cvtLZ4sBlockSnappyAsm(SB), NOSPLIT, $0-64 + XORQ SI, SI + MOVQ dst_base+0(FP), AX + MOVQ dst_len+8(FP), CX + MOVQ src_base+24(FP), DX + MOVQ src_len+32(FP), BX + LEAQ (DX)(BX*1), BX + LEAQ -10(AX)(CX*1), CX + +lz4s_snappy_loop: + CMPQ DX, BX + JAE lz4s_snappy_corrupt + CMPQ AX, CX + JAE lz4s_snappy_dstfull + MOVBQZX (DX), DI + MOVQ DI, R8 + MOVQ DI, R9 + SHRQ $0x04, R8 + ANDQ $0x0f, R9 + CMPQ DI, $0xf0 + JB lz4s_snappy_ll_end + +lz4s_snappy_ll_loop: + INCQ DX + CMPQ DX, BX + JAE lz4s_snappy_corrupt + MOVBQZX (DX), DI + ADDQ DI, R8 + CMPQ DI, $0xff + JEQ lz4s_snappy_ll_loop + +lz4s_snappy_ll_end: + LEAQ (DX)(R8*1), DI + ADDQ $0x03, R9 + CMPQ DI, BX + JAE lz4s_snappy_corrupt + INCQ DX + INCQ DI + TESTQ R8, R8 + JZ lz4s_snappy_lits_done + LEAQ (AX)(R8*1), R10 + CMPQ R10, CX + JAE lz4s_snappy_dstfull + ADDQ R8, SI + LEAL -1(R8), R10 + CMPL R10, $0x3c + JLT one_byte_lz4s_snappy + CMPL R10, $0x00000100 + JLT two_bytes_lz4s_snappy + CMPL R10, $0x00010000 + JLT three_bytes_lz4s_snappy + CMPL R10, $0x01000000 + JLT four_bytes_lz4s_snappy + MOVB $0xfc, (AX) + MOVL R10, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_lz4s_snappy + +four_bytes_lz4s_snappy: + MOVL R10, R11 + SHRL $0x10, R11 + MOVB $0xf8, (AX) + MOVW R10, 1(AX) + MOVB R11, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_lz4s_snappy + +three_bytes_lz4s_snappy: + MOVB $0xf4, (AX) + MOVW R10, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_lz4s_snappy + +two_bytes_lz4s_snappy: + MOVB $0xf0, (AX) + MOVB R10, 1(AX) + ADDQ $0x02, AX + CMPL R10, $0x40 + JL memmove_lz4s_snappy + JMP memmove_long_lz4s_snappy + +one_byte_lz4s_snappy: + SHLB $0x02, R10 + MOVB R10, (AX) + ADDQ $0x01, AX + +memmove_lz4s_snappy: + LEAQ (AX)(R8*1), R10 + + // genMemMoveShort + CMPQ R8, $0x08 + JLE emit_lit_memmove_lz4s_snappy_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_lz4s_snappy_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_lz4s_snappy_memmove_move_17through32 + JMP emit_lit_memmove_lz4s_snappy_memmove_move_33through64 + +emit_lit_memmove_lz4s_snappy_memmove_move_8: + MOVQ (DX), R11 + MOVQ R11, (AX) + JMP memmove_end_copy_lz4s_snappy + +emit_lit_memmove_lz4s_snappy_memmove_move_8through16: + MOVQ (DX), R11 + MOVQ -8(DX)(R8*1), DX + MOVQ R11, (AX) + MOVQ DX, -8(AX)(R8*1) + JMP memmove_end_copy_lz4s_snappy + +emit_lit_memmove_lz4s_snappy_memmove_move_17through32: + MOVOU (DX), X0 + MOVOU -16(DX)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_lz4s_snappy + +emit_lit_memmove_lz4s_snappy_memmove_move_33through64: + MOVOU (DX), X0 + MOVOU 16(DX), X1 + MOVOU -32(DX)(R8*1), X2 + MOVOU -16(DX)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_lz4s_snappy: + MOVQ R10, AX + JMP lz4s_snappy_lits_emit_done + +memmove_long_lz4s_snappy: + LEAQ (AX)(R8*1), R10 + + // genMemMoveLong + MOVOU (DX), X0 + MOVOU 16(DX), X1 + MOVOU -32(DX)(R8*1), X2 + MOVOU -16(DX)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R13 + SUBQ R11, R13 + DECQ R12 + JA emit_lit_memmove_long_lz4s_snappylarge_forward_sse_loop_32 + LEAQ -32(DX)(R13*1), R11 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_lz4s_snappylarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R11 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_lz4s_snappylarge_big_loop_back + +emit_lit_memmove_long_lz4s_snappylarge_forward_sse_loop_32: + MOVOU -32(DX)(R13*1), X4 + MOVOU -16(DX)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 + JAE emit_lit_memmove_long_lz4s_snappylarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ R10, AX + +lz4s_snappy_lits_emit_done: + MOVQ DI, DX + +lz4s_snappy_lits_done: + CMPQ DX, BX + JNE lz4s_snappy_match + CMPQ R9, $0x03 + JEQ lz4s_snappy_done + JMP lz4s_snappy_corrupt + +lz4s_snappy_match: + CMPQ R9, $0x03 + JEQ lz4s_snappy_loop + LEAQ 2(DX), DI + CMPQ DI, BX + JAE lz4s_snappy_corrupt + MOVWQZX (DX), R8 + MOVQ DI, DX + TESTQ R8, R8 + JZ lz4s_snappy_corrupt + CMPQ R8, SI + JA lz4s_snappy_corrupt + CMPQ R9, $0x12 + JNE lz4s_snappy_ml_done + +lz4s_snappy_ml_loop: + MOVBQZX (DX), DI + INCQ DX + ADDQ DI, R9 + CMPQ DX, BX + JAE lz4s_snappy_corrupt + CMPQ DI, $0xff + JEQ lz4s_snappy_ml_loop + +lz4s_snappy_ml_done: + ADDQ R9, SI + + // emitCopy +two_byte_offset_lz4_s2: + CMPL R9, $0x40 + JLE two_byte_offset_short_lz4_s2 + MOVB $0xee, (AX) + MOVW R8, 1(AX) + LEAL -60(R9), R9 + ADDQ $0x03, AX + CMPQ AX, CX + JAE lz4s_snappy_loop + JMP two_byte_offset_lz4_s2 + +two_byte_offset_short_lz4_s2: + MOVL R9, DI + SHLL $0x02, DI + CMPL R9, $0x0c + JGE emit_copy_three_lz4_s2 + CMPL R8, $0x00000800 + JGE emit_copy_three_lz4_s2 + LEAL -15(DI), DI + MOVB R8, 1(AX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, DI + MOVB DI, (AX) + ADDQ $0x02, AX + JMP lz4s_snappy_loop + +emit_copy_three_lz4_s2: + LEAL -2(DI), DI + MOVB DI, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + JMP lz4s_snappy_loop + +lz4s_snappy_done: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ SI, uncompressed+48(FP) + MOVQ AX, dstUsed+56(FP) + RET + +lz4s_snappy_corrupt: + XORQ AX, AX + LEAQ -1(AX), SI + MOVQ SI, uncompressed+48(FP) + RET + +lz4s_snappy_dstfull: + XORQ AX, AX + LEAQ -2(AX), SI + MOVQ SI, uncompressed+48(FP) RET diff --git a/vendor/github.com/klauspost/compress/s2/lz4convert.go b/vendor/github.com/klauspost/compress/s2/lz4convert.go new file mode 100644 index 000000000..46ed908e3 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/lz4convert.go @@ -0,0 +1,585 @@ +// Copyright (c) 2022 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "encoding/binary" + "errors" + "fmt" +) + +// LZ4Converter provides conversion from LZ4 blocks as defined here: +// https://github.com/lz4/lz4/blob/dev/doc/lz4_Block_format.md +type LZ4Converter struct { +} + +// ErrDstTooSmall is returned when provided destination is too small. +var ErrDstTooSmall = errors.New("s2: destination too small") + +// ConvertBlock will convert an LZ4 block and append it as an S2 +// block without block length to dst. +// The uncompressed size is returned as well. +// dst must have capacity to contain the entire compressed block. +func (l *LZ4Converter) ConvertBlock(dst, src []byte) ([]byte, int, error) { + if len(src) == 0 { + return dst, 0, nil + } + const debug = false + const inline = true + const lz4MinMatch = 4 + + s, d := 0, len(dst) + dst = dst[:cap(dst)] + if !debug && hasAmd64Asm { + res, sz := cvtLZ4BlockAsm(dst[d:], src) + if res < 0 { + const ( + errCorrupt = -1 + errDstTooSmall = -2 + ) + switch res { + case errCorrupt: + return nil, 0, ErrCorrupt + case errDstTooSmall: + return nil, 0, ErrDstTooSmall + default: + return nil, 0, fmt.Errorf("unexpected result: %d", res) + } + } + if d+sz > len(dst) { + return nil, 0, ErrDstTooSmall + } + return dst[:d+sz], res, nil + } + + dLimit := len(dst) - 10 + var lastOffset uint16 + var uncompressed int + if debug { + fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst)) + } + + for { + if s >= len(src) { + return dst[:d], 0, ErrCorrupt + } + // Read literal info + token := src[s] + ll := int(token >> 4) + ml := int(lz4MinMatch + (token & 0xf)) + + // If upper nibble is 15, literal length is extended + if token >= 0xf0 { + for { + s++ + if s >= len(src) { + if debug { + fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return dst[:d], 0, ErrCorrupt + } + val := src[s] + ll += int(val) + if val != 255 { + break + } + } + } + // Skip past token + if s+ll >= len(src) { + if debug { + fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src)) + } + return nil, 0, ErrCorrupt + } + s++ + if ll > 0 { + if d+ll > dLimit { + return nil, 0, ErrDstTooSmall + } + if debug { + fmt.Printf("emit %d literals\n", ll) + } + d += emitLiteralGo(dst[d:], src[s:s+ll]) + s += ll + uncompressed += ll + } + + // Check if we are done... + if s == len(src) && ml == lz4MinMatch { + break + } + // 2 byte offset + if s >= len(src)-2 { + if debug { + fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2) + } + return nil, 0, ErrCorrupt + } + offset := binary.LittleEndian.Uint16(src[s:]) + s += 2 + if offset == 0 { + if debug { + fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s) + } + return nil, 0, ErrCorrupt + } + if int(offset) > uncompressed { + if debug { + fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed) + } + return nil, 0, ErrCorrupt + } + + if ml == lz4MinMatch+15 { + for { + if s >= len(src) { + if debug { + fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + val := src[s] + s++ + ml += int(val) + if val != 255 { + if s >= len(src) { + if debug { + fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + break + } + } + } + if offset == lastOffset { + if debug { + fmt.Printf("emit repeat, length: %d, offset: %d\n", ml, offset) + } + if !inline { + d += emitRepeat16(dst[d:], offset, ml) + } else { + length := ml + dst := dst[d:] + for len(dst) > 5 { + // Repeat offset, make length cheaper + length -= 4 + if length <= 4 { + dst[0] = uint8(length)<<2 | tagCopy1 + dst[1] = 0 + d += 2 + break + } + if length < 8 && offset < 2048 { + // Encode WITH offset + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1 + d += 2 + break + } + if length < (1<<8)+4 { + length -= 4 + dst[2] = uint8(length) + dst[1] = 0 + dst[0] = 5<<2 | tagCopy1 + d += 3 + break + } + if length < (1<<16)+(1<<8) { + length -= 1 << 8 + dst[3] = uint8(length >> 8) + dst[2] = uint8(length >> 0) + dst[1] = 0 + dst[0] = 6<<2 | tagCopy1 + d += 4 + break + } + const maxRepeat = (1 << 24) - 1 + length -= 1 << 16 + left := 0 + if length > maxRepeat { + left = length - maxRepeat + 4 + length = maxRepeat - 4 + } + dst[4] = uint8(length >> 16) + dst[3] = uint8(length >> 8) + dst[2] = uint8(length >> 0) + dst[1] = 0 + dst[0] = 7<<2 | tagCopy1 + if left > 0 { + d += 5 + emitRepeat16(dst[5:], offset, left) + break + } + d += 5 + break + } + } + } else { + if debug { + fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset) + } + if !inline { + d += emitCopy16(dst[d:], offset, ml) + } else { + length := ml + dst := dst[d:] + for len(dst) > 5 { + // Offset no more than 2 bytes. + if length > 64 { + off := 3 + if offset < 2048 { + // emit 8 bytes as tagCopy1, rest as repeats. + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1 + length -= 8 + off = 2 + } else { + // Emit a length 60 copy, encoded as 3 bytes. + // Emit remaining as repeat value (minimum 4 bytes). + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = 59<<2 | tagCopy2 + length -= 60 + } + // Emit remaining as repeats, at least 4 bytes remain. + d += off + emitRepeat16(dst[off:], offset, length) + break + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = uint8(length-1)<<2 | tagCopy2 + d += 3 + break + } + // Emit the remaining copy, encoded as 2 bytes. + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + d += 2 + break + } + } + lastOffset = offset + } + uncompressed += ml + if d > dLimit { + return nil, 0, ErrDstTooSmall + } + } + + return dst[:d], uncompressed, nil +} + +// ConvertBlockSnappy will convert an LZ4 block and append it +// as a Snappy block without block length to dst. +// The uncompressed size is returned as well. +// dst must have capacity to contain the entire compressed block. +func (l *LZ4Converter) ConvertBlockSnappy(dst, src []byte) ([]byte, int, error) { + if len(src) == 0 { + return dst, 0, nil + } + const debug = false + const lz4MinMatch = 4 + + s, d := 0, len(dst) + dst = dst[:cap(dst)] + // Use assembly when possible + if !debug && hasAmd64Asm { + res, sz := cvtLZ4BlockSnappyAsm(dst[d:], src) + if res < 0 { + const ( + errCorrupt = -1 + errDstTooSmall = -2 + ) + switch res { + case errCorrupt: + return nil, 0, ErrCorrupt + case errDstTooSmall: + return nil, 0, ErrDstTooSmall + default: + return nil, 0, fmt.Errorf("unexpected result: %d", res) + } + } + if d+sz > len(dst) { + return nil, 0, ErrDstTooSmall + } + return dst[:d+sz], res, nil + } + + dLimit := len(dst) - 10 + var uncompressed int + if debug { + fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst)) + } + + for { + if s >= len(src) { + return nil, 0, ErrCorrupt + } + // Read literal info + token := src[s] + ll := int(token >> 4) + ml := int(lz4MinMatch + (token & 0xf)) + + // If upper nibble is 15, literal length is extended + if token >= 0xf0 { + for { + s++ + if s >= len(src) { + if debug { + fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + val := src[s] + ll += int(val) + if val != 255 { + break + } + } + } + // Skip past token + if s+ll >= len(src) { + if debug { + fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src)) + } + return nil, 0, ErrCorrupt + } + s++ + if ll > 0 { + if d+ll > dLimit { + return nil, 0, ErrDstTooSmall + } + if debug { + fmt.Printf("emit %d literals\n", ll) + } + d += emitLiteralGo(dst[d:], src[s:s+ll]) + s += ll + uncompressed += ll + } + + // Check if we are done... + if s == len(src) && ml == lz4MinMatch { + break + } + // 2 byte offset + if s >= len(src)-2 { + if debug { + fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2) + } + return nil, 0, ErrCorrupt + } + offset := binary.LittleEndian.Uint16(src[s:]) + s += 2 + if offset == 0 { + if debug { + fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s) + } + return nil, 0, ErrCorrupt + } + if int(offset) > uncompressed { + if debug { + fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed) + } + return nil, 0, ErrCorrupt + } + + if ml == lz4MinMatch+15 { + for { + if s >= len(src) { + if debug { + fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + val := src[s] + s++ + ml += int(val) + if val != 255 { + if s >= len(src) { + if debug { + fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + break + } + } + } + if debug { + fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset) + } + length := ml + // d += emitCopyNoRepeat(dst[d:], int(offset), ml) + for length > 0 { + if d >= dLimit { + return nil, 0, ErrDstTooSmall + } + + // Offset no more than 2 bytes. + if length > 64 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[d+2] = uint8(offset >> 8) + dst[d+1] = uint8(offset) + dst[d+0] = 63<<2 | tagCopy2 + length -= 64 + d += 3 + continue + } + if length >= 12 || offset >= 2048 || length < 4 { + // Emit the remaining copy, encoded as 3 bytes. + dst[d+2] = uint8(offset >> 8) + dst[d+1] = uint8(offset) + dst[d+0] = uint8(length-1)<<2 | tagCopy2 + d += 3 + break + } + // Emit the remaining copy, encoded as 2 bytes. + dst[d+1] = uint8(offset) + dst[d+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + d += 2 + break + } + uncompressed += ml + if d > dLimit { + return nil, 0, ErrDstTooSmall + } + } + + return dst[:d], uncompressed, nil +} + +// emitRepeat writes a repeat chunk and returns the number of bytes written. +// Length must be at least 4 and < 1<<24 +func emitRepeat16(dst []byte, offset uint16, length int) int { + // Repeat offset, make length cheaper + length -= 4 + if length <= 4 { + dst[0] = uint8(length)<<2 | tagCopy1 + dst[1] = 0 + return 2 + } + if length < 8 && offset < 2048 { + // Encode WITH offset + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1 + return 2 + } + if length < (1<<8)+4 { + length -= 4 + dst[2] = uint8(length) + dst[1] = 0 + dst[0] = 5<<2 | tagCopy1 + return 3 + } + if length < (1<<16)+(1<<8) { + length -= 1 << 8 + dst[3] = uint8(length >> 8) + dst[2] = uint8(length >> 0) + dst[1] = 0 + dst[0] = 6<<2 | tagCopy1 + return 4 + } + const maxRepeat = (1 << 24) - 1 + length -= 1 << 16 + left := 0 + if length > maxRepeat { + left = length - maxRepeat + 4 + length = maxRepeat - 4 + } + dst[4] = uint8(length >> 16) + dst[3] = uint8(length >> 8) + dst[2] = uint8(length >> 0) + dst[1] = 0 + dst[0] = 7<<2 | tagCopy1 + if left > 0 { + return 5 + emitRepeat16(dst[5:], offset, left) + } + return 5 +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= math.MaxUint16 +// 4 <= length && length <= math.MaxUint32 +func emitCopy16(dst []byte, offset uint16, length int) int { + // Offset no more than 2 bytes. + if length > 64 { + off := 3 + if offset < 2048 { + // emit 8 bytes as tagCopy1, rest as repeats. + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1 + length -= 8 + off = 2 + } else { + // Emit a length 60 copy, encoded as 3 bytes. + // Emit remaining as repeat value (minimum 4 bytes). + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = 59<<2 | tagCopy2 + length -= 60 + } + // Emit remaining as repeats, at least 4 bytes remain. + return off + emitRepeat16(dst[off:], offset, length) + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = uint8(length-1)<<2 | tagCopy2 + return 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + return 2 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 0 <= len(lit) && len(lit) <= math.MaxUint32 +func emitLiteralGo(dst, lit []byte) int { + if len(lit) == 0 { + return 0 + } + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[1] = uint8(n) + dst[0] = 60<<2 | tagLiteral + i = 2 + case n < 1<<16: + dst[2] = uint8(n >> 8) + dst[1] = uint8(n) + dst[0] = 61<<2 | tagLiteral + i = 3 + case n < 1<<24: + dst[3] = uint8(n >> 16) + dst[2] = uint8(n >> 8) + dst[1] = uint8(n) + dst[0] = 62<<2 | tagLiteral + i = 4 + default: + dst[4] = uint8(n >> 24) + dst[3] = uint8(n >> 16) + dst[2] = uint8(n >> 8) + dst[1] = uint8(n) + dst[0] = 63<<2 | tagLiteral + i = 5 + } + return i + copy(dst[i:], lit) +} diff --git a/vendor/github.com/klauspost/compress/s2/lz4sconvert.go b/vendor/github.com/klauspost/compress/s2/lz4sconvert.go new file mode 100644 index 000000000..000f39719 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/lz4sconvert.go @@ -0,0 +1,467 @@ +// Copyright (c) 2022 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "encoding/binary" + "fmt" +) + +// LZ4sConverter provides conversion from LZ4s. +// (Intel modified LZ4 Blocks) +// https://cdrdv2-public.intel.com/743912/743912-qat-programmers-guide-v2.0.pdf +// LZ4s is a variant of LZ4 block format. LZ4s should be considered as an intermediate compressed block format. +// The LZ4s format is selected when the application sets the compType to CPA_DC_LZ4S in CpaDcSessionSetupData. +// The LZ4s block returned by the Intel® QAT hardware can be used by an external +// software post-processing to generate other compressed data formats. +// The following table lists the differences between LZ4 and LZ4s block format. LZ4s block format uses +// the same high-level formatting as LZ4 block format with the following encoding changes: +// For Min Match of 4 bytes, Copy length value 1-15 means length 4-18 with 18 bytes adding an extra byte. +// ONLY "Min match of 4 bytes" is supported. +type LZ4sConverter struct { +} + +// ConvertBlock will convert an LZ4s block and append it as an S2 +// block without block length to dst. +// The uncompressed size is returned as well. +// dst must have capacity to contain the entire compressed block. +func (l *LZ4sConverter) ConvertBlock(dst, src []byte) ([]byte, int, error) { + if len(src) == 0 { + return dst, 0, nil + } + const debug = false + const inline = true + const lz4MinMatch = 3 + + s, d := 0, len(dst) + dst = dst[:cap(dst)] + if !debug && hasAmd64Asm { + res, sz := cvtLZ4sBlockAsm(dst[d:], src) + if res < 0 { + const ( + errCorrupt = -1 + errDstTooSmall = -2 + ) + switch res { + case errCorrupt: + return nil, 0, ErrCorrupt + case errDstTooSmall: + return nil, 0, ErrDstTooSmall + default: + return nil, 0, fmt.Errorf("unexpected result: %d", res) + } + } + if d+sz > len(dst) { + return nil, 0, ErrDstTooSmall + } + return dst[:d+sz], res, nil + } + + dLimit := len(dst) - 10 + var lastOffset uint16 + var uncompressed int + if debug { + fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst)) + } + + for { + if s >= len(src) { + return dst[:d], 0, ErrCorrupt + } + // Read literal info + token := src[s] + ll := int(token >> 4) + ml := int(lz4MinMatch + (token & 0xf)) + + // If upper nibble is 15, literal length is extended + if token >= 0xf0 { + for { + s++ + if s >= len(src) { + if debug { + fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return dst[:d], 0, ErrCorrupt + } + val := src[s] + ll += int(val) + if val != 255 { + break + } + } + } + // Skip past token + if s+ll >= len(src) { + if debug { + fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src)) + } + return nil, 0, ErrCorrupt + } + s++ + if ll > 0 { + if d+ll > dLimit { + return nil, 0, ErrDstTooSmall + } + if debug { + fmt.Printf("emit %d literals\n", ll) + } + d += emitLiteralGo(dst[d:], src[s:s+ll]) + s += ll + uncompressed += ll + } + + // Check if we are done... + if ml == lz4MinMatch { + if s == len(src) { + break + } + // 0 bytes. + continue + } + // 2 byte offset + if s >= len(src)-2 { + if debug { + fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2) + } + return nil, 0, ErrCorrupt + } + offset := binary.LittleEndian.Uint16(src[s:]) + s += 2 + if offset == 0 { + if debug { + fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s) + } + return nil, 0, ErrCorrupt + } + if int(offset) > uncompressed { + if debug { + fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed) + } + return nil, 0, ErrCorrupt + } + + if ml == lz4MinMatch+15 { + for { + if s >= len(src) { + if debug { + fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + val := src[s] + s++ + ml += int(val) + if val != 255 { + if s >= len(src) { + if debug { + fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + break + } + } + } + if offset == lastOffset { + if debug { + fmt.Printf("emit repeat, length: %d, offset: %d\n", ml, offset) + } + if !inline { + d += emitRepeat16(dst[d:], offset, ml) + } else { + length := ml + dst := dst[d:] + for len(dst) > 5 { + // Repeat offset, make length cheaper + length -= 4 + if length <= 4 { + dst[0] = uint8(length)<<2 | tagCopy1 + dst[1] = 0 + d += 2 + break + } + if length < 8 && offset < 2048 { + // Encode WITH offset + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1 + d += 2 + break + } + if length < (1<<8)+4 { + length -= 4 + dst[2] = uint8(length) + dst[1] = 0 + dst[0] = 5<<2 | tagCopy1 + d += 3 + break + } + if length < (1<<16)+(1<<8) { + length -= 1 << 8 + dst[3] = uint8(length >> 8) + dst[2] = uint8(length >> 0) + dst[1] = 0 + dst[0] = 6<<2 | tagCopy1 + d += 4 + break + } + const maxRepeat = (1 << 24) - 1 + length -= 1 << 16 + left := 0 + if length > maxRepeat { + left = length - maxRepeat + 4 + length = maxRepeat - 4 + } + dst[4] = uint8(length >> 16) + dst[3] = uint8(length >> 8) + dst[2] = uint8(length >> 0) + dst[1] = 0 + dst[0] = 7<<2 | tagCopy1 + if left > 0 { + d += 5 + emitRepeat16(dst[5:], offset, left) + break + } + d += 5 + break + } + } + } else { + if debug { + fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset) + } + if !inline { + d += emitCopy16(dst[d:], offset, ml) + } else { + length := ml + dst := dst[d:] + for len(dst) > 5 { + // Offset no more than 2 bytes. + if length > 64 { + off := 3 + if offset < 2048 { + // emit 8 bytes as tagCopy1, rest as repeats. + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1 + length -= 8 + off = 2 + } else { + // Emit a length 60 copy, encoded as 3 bytes. + // Emit remaining as repeat value (minimum 4 bytes). + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = 59<<2 | tagCopy2 + length -= 60 + } + // Emit remaining as repeats, at least 4 bytes remain. + d += off + emitRepeat16(dst[off:], offset, length) + break + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = uint8(length-1)<<2 | tagCopy2 + d += 3 + break + } + // Emit the remaining copy, encoded as 2 bytes. + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + d += 2 + break + } + } + lastOffset = offset + } + uncompressed += ml + if d > dLimit { + return nil, 0, ErrDstTooSmall + } + } + + return dst[:d], uncompressed, nil +} + +// ConvertBlockSnappy will convert an LZ4s block and append it +// as a Snappy block without block length to dst. +// The uncompressed size is returned as well. +// dst must have capacity to contain the entire compressed block. +func (l *LZ4sConverter) ConvertBlockSnappy(dst, src []byte) ([]byte, int, error) { + if len(src) == 0 { + return dst, 0, nil + } + const debug = false + const lz4MinMatch = 3 + + s, d := 0, len(dst) + dst = dst[:cap(dst)] + // Use assembly when possible + if !debug && hasAmd64Asm { + res, sz := cvtLZ4sBlockSnappyAsm(dst[d:], src) + if res < 0 { + const ( + errCorrupt = -1 + errDstTooSmall = -2 + ) + switch res { + case errCorrupt: + return nil, 0, ErrCorrupt + case errDstTooSmall: + return nil, 0, ErrDstTooSmall + default: + return nil, 0, fmt.Errorf("unexpected result: %d", res) + } + } + if d+sz > len(dst) { + return nil, 0, ErrDstTooSmall + } + return dst[:d+sz], res, nil + } + + dLimit := len(dst) - 10 + var uncompressed int + if debug { + fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst)) + } + + for { + if s >= len(src) { + return nil, 0, ErrCorrupt + } + // Read literal info + token := src[s] + ll := int(token >> 4) + ml := int(lz4MinMatch + (token & 0xf)) + + // If upper nibble is 15, literal length is extended + if token >= 0xf0 { + for { + s++ + if s >= len(src) { + if debug { + fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + val := src[s] + ll += int(val) + if val != 255 { + break + } + } + } + // Skip past token + if s+ll >= len(src) { + if debug { + fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src)) + } + return nil, 0, ErrCorrupt + } + s++ + if ll > 0 { + if d+ll > dLimit { + return nil, 0, ErrDstTooSmall + } + if debug { + fmt.Printf("emit %d literals\n", ll) + } + d += emitLiteralGo(dst[d:], src[s:s+ll]) + s += ll + uncompressed += ll + } + + // Check if we are done... + if ml == lz4MinMatch { + if s == len(src) { + break + } + // 0 bytes. + continue + } + // 2 byte offset + if s >= len(src)-2 { + if debug { + fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2) + } + return nil, 0, ErrCorrupt + } + offset := binary.LittleEndian.Uint16(src[s:]) + s += 2 + if offset == 0 { + if debug { + fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s) + } + return nil, 0, ErrCorrupt + } + if int(offset) > uncompressed { + if debug { + fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed) + } + return nil, 0, ErrCorrupt + } + + if ml == lz4MinMatch+15 { + for { + if s >= len(src) { + if debug { + fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + val := src[s] + s++ + ml += int(val) + if val != 255 { + if s >= len(src) { + if debug { + fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + break + } + } + } + if debug { + fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset) + } + length := ml + // d += emitCopyNoRepeat(dst[d:], int(offset), ml) + for length > 0 { + if d >= dLimit { + return nil, 0, ErrDstTooSmall + } + + // Offset no more than 2 bytes. + if length > 64 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[d+2] = uint8(offset >> 8) + dst[d+1] = uint8(offset) + dst[d+0] = 63<<2 | tagCopy2 + length -= 64 + d += 3 + continue + } + if length >= 12 || offset >= 2048 || length < 4 { + // Emit the remaining copy, encoded as 3 bytes. + dst[d+2] = uint8(offset >> 8) + dst[d+1] = uint8(offset) + dst[d+0] = uint8(length-1)<<2 | tagCopy2 + d += 3 + break + } + // Emit the remaining copy, encoded as 2 bytes. + dst[d+1] = uint8(offset) + dst[d+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + d += 2 + break + } + uncompressed += ml + if d > dLimit { + return nil, 0, ErrDstTooSmall + } + } + + return dst[:d], uncompressed, nil +} diff --git a/vendor/github.com/klauspost/compress/s2sx.mod b/vendor/github.com/klauspost/compress/s2sx.mod new file mode 100644 index 000000000..2263853fc --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2sx.mod @@ -0,0 +1,4 @@ +module github.com/klauspost/compress + +go 1.16 + diff --git a/vendor/github.com/klauspost/compress/s2sx.sum b/vendor/github.com/klauspost/compress/s2sx.sum new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md new file mode 100644 index 000000000..65b38abed --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -0,0 +1,441 @@ +# zstd + +[Zstandard](https://facebook.github.io/zstd/) is a real-time compression algorithm, providing high compression ratios. +It offers a very wide range of compression / speed trade-off, while being backed by a very fast decoder. +A high performance compression algorithm is implemented. For now focused on speed. + +This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content. + +This package is pure Go and without use of "unsafe". + +The `zstd` package is provided as open source software using a Go standard license. + +Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors. + +For seekable zstd streams, see [this excellent package](https://github.com/SaveTheRbtz/zstd-seekable-format-go). + +## Installation + +Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`. + +[![Go Reference](https://pkg.go.dev/badge/github.com/klauspost/compress/zstd.svg)](https://pkg.go.dev/github.com/klauspost/compress/zstd) + +## Compressor + +### Status: + +STABLE - there may always be subtle bugs, a wide variety of content has been tested and the library is actively +used by several projects. This library is being [fuzz-tested](https://github.com/klauspost/compress-fuzz) for all updates. + +There may still be specific combinations of data types/size/settings that could lead to edge cases, +so as always, testing is recommended. + +For now, a high speed (fastest) and medium-fast (default) compressor has been implemented. + +* The "Fastest" compression ratio is roughly equivalent to zstd level 1. +* The "Default" compression ratio is roughly equivalent to zstd level 3 (default). +* The "Better" compression ratio is roughly equivalent to zstd level 7. +* The "Best" compression ratio is roughly equivalent to zstd level 11. + +In terms of speed, it is typically 2x as fast as the stdlib deflate/gzip in its fastest mode. +The compression ratio compared to stdlib is around level 3, but usually 3x as fast. + + +### Usage + +An Encoder can be used for either compressing a stream via the +`io.WriteCloser` interface supported by the Encoder or as multiple independent +tasks via the `EncodeAll` function. +Smaller encodes are encouraged to use the EncodeAll function. +Use `NewWriter` to create a new instance that can be used for both. + +To create a writer with default options, do like this: + +```Go +// Compress input to output. +func Compress(in io.Reader, out io.Writer) error { + enc, err := zstd.NewWriter(out) + if err != nil { + return err + } + _, err = io.Copy(enc, in) + if err != nil { + enc.Close() + return err + } + return enc.Close() +} +``` + +Now you can encode by writing data to `enc`. The output will be finished writing when `Close()` is called. +Even if your encode fails, you should still call `Close()` to release any resources that may be held up. + +The above is fine for big encodes. However, whenever possible try to *reuse* the writer. + +To reuse the encoder, you can use the `Reset(io.Writer)` function to change to another output. +This will allow the encoder to reuse all resources and avoid wasteful allocations. + +Currently stream encoding has 'light' concurrency, meaning up to 2 goroutines can be working on part +of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is likely to change +in the future. So if you want to limit concurrency for future updates, specify the concurrency +you would like. + +If you would like stream encoding to be done without spawning async goroutines, use `WithEncoderConcurrency(1)` +which will compress input as each block is completed, blocking on writes until each has completed. + +You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined +compression settings can be specified. + +#### Future Compatibility Guarantees + +This will be an evolving project. When using this package it is important to note that both the compression efficiency and speed may change. + +The goal will be to keep the default efficiency at the default zstd (level 3). +However the encoding should never be assumed to remain the same, +and you should not use hashes of compressed output for similarity checks. + +The Encoder can be assumed to produce the same output from the exact same code version. +However, the may be modes in the future that break this, +although they will not be enabled without an explicit option. + +This encoder is not designed to (and will probably never) output the exact same bitstream as the reference encoder. + +Also note, that the cgo decompressor currently does not [report all errors on invalid input](https://github.com/DataDog/zstd/issues/59), +[omits error checks](https://github.com/DataDog/zstd/issues/61), [ignores checksums](https://github.com/DataDog/zstd/issues/43) +and seems to ignore concatenated streams, even though [it is part of the spec](https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frames). + +#### Blocks + +For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`. + +`EncodeAll` will encode all input in src and append it to dst. +This function can be called concurrently. +Each call will only run on a same goroutine as the caller. + +Encoded blocks can be concatenated and the result will be the combined input stream. +Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`. + +Especially when encoding blocks you should take special care to reuse the encoder. +This will effectively make it run without allocations after a warmup period. +To make it run completely without allocations, supply a destination buffer with space for all content. + +```Go +import "github.com/klauspost/compress/zstd" + +// Create a writer that caches compressors. +// For this operation type we supply a nil Reader. +var encoder, _ = zstd.NewWriter(nil) + +// Compress a buffer. +// If you have a destination buffer, the allocation in the call can also be eliminated. +func Compress(src []byte) []byte { + return encoder.EncodeAll(src, make([]byte, 0, len(src))) +} +``` + +You can control the maximum number of concurrent encodes using the `WithEncoderConcurrency(n)` +option when creating the writer. + +Using the Encoder for both a stream and individual blocks concurrently is safe. + +### Performance + +I have collected some speed examples to compare speed and compression against other compressors. + +* `file` is the input file. +* `out` is the compressor used. `zskp` is this package. `zstd` is the Datadog cgo library. `gzstd/gzkp` is gzip standard and this library. +* `level` is the compression level used. For `zskp` level 1 is "fastest", level 2 is "default"; 3 is "better", 4 is "best". +* `insize`/`outsize` is the input/output size. +* `millis` is the number of milliseconds used for compression. +* `mb/s` is megabytes (2^20 bytes) per second. + +``` +Silesia Corpus: +http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip + +This package: +file out level insize outsize millis mb/s +silesia.tar zskp 1 211947520 73821326 634 318.47 +silesia.tar zskp 2 211947520 67655404 1508 133.96 +silesia.tar zskp 3 211947520 64746933 3000 67.37 +silesia.tar zskp 4 211947520 60073508 16926 11.94 + +cgo zstd: +silesia.tar zstd 1 211947520 73605392 543 371.56 +silesia.tar zstd 3 211947520 66793289 864 233.68 +silesia.tar zstd 6 211947520 62916450 1913 105.66 +silesia.tar zstd 9 211947520 60212393 5063 39.92 + +gzip, stdlib/this package: +silesia.tar gzstd 1 211947520 80007735 1498 134.87 +silesia.tar gzkp 1 211947520 80088272 1009 200.31 + +GOB stream of binary data. Highly compressible. +https://files.klauspost.com/compress/gob-stream.7z + +file out level insize outsize millis mb/s +gob-stream zskp 1 1911399616 233948096 3230 564.34 +gob-stream zskp 2 1911399616 203997694 4997 364.73 +gob-stream zskp 3 1911399616 173526523 13435 135.68 +gob-stream zskp 4 1911399616 162195235 47559 38.33 + +gob-stream zstd 1 1911399616 249810424 2637 691.26 +gob-stream zstd 3 1911399616 208192146 3490 522.31 +gob-stream zstd 6 1911399616 193632038 6687 272.56 +gob-stream zstd 9 1911399616 177620386 16175 112.70 + +gob-stream gzstd 1 1911399616 357382013 9046 201.49 +gob-stream gzkp 1 1911399616 359136669 4885 373.08 + +The test data for the Large Text Compression Benchmark is the first +10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. +http://mattmahoney.net/dc/textdata.html + +file out level insize outsize millis mb/s +enwik9 zskp 1 1000000000 343833605 3687 258.64 +enwik9 zskp 2 1000000000 317001237 7672 124.29 +enwik9 zskp 3 1000000000 291915823 15923 59.89 +enwik9 zskp 4 1000000000 261710291 77697 12.27 + +enwik9 zstd 1 1000000000 358072021 3110 306.65 +enwik9 zstd 3 1000000000 313734672 4784 199.35 +enwik9 zstd 6 1000000000 295138875 10290 92.68 +enwik9 zstd 9 1000000000 278348700 28549 33.40 + +enwik9 gzstd 1 1000000000 382578136 8608 110.78 +enwik9 gzkp 1 1000000000 382781160 5628 169.45 + +Highly compressible JSON file. +https://files.klauspost.com/compress/github-june-2days-2019.json.zst + +file out level insize outsize millis mb/s +github-june-2days-2019.json zskp 1 6273951764 697439532 9789 611.17 +github-june-2days-2019.json zskp 2 6273951764 610876538 18553 322.49 +github-june-2days-2019.json zskp 3 6273951764 517662858 44186 135.41 +github-june-2days-2019.json zskp 4 6273951764 464617114 165373 36.18 + +github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00 +github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57 +github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18 +github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16 + +github-june-2days-2019.json gzstd 1 6273951764 1164397768 26793 223.32 +github-june-2days-2019.json gzkp 1 6273951764 1120631856 17693 338.16 + +VM Image, Linux mint with a few installed applications: +https://files.klauspost.com/compress/rawstudio-mint14.7z + +file out level insize outsize millis mb/s +rawstudio-mint14.tar zskp 1 8558382592 3718400221 18206 448.29 +rawstudio-mint14.tar zskp 2 8558382592 3326118337 37074 220.15 +rawstudio-mint14.tar zskp 3 8558382592 3163842361 87306 93.49 +rawstudio-mint14.tar zskp 4 8558382592 2970480650 783862 10.41 + +rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27 +rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92 +rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77 +rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91 + +rawstudio-mint14.tar gzstd 1 8558382592 3926234992 51345 158.96 +rawstudio-mint14.tar gzkp 1 8558382592 3960117298 36722 222.26 + +CSV data: +https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst + +file out level insize outsize millis mb/s +nyc-taxi-data-10M.csv zskp 1 3325605752 641319332 9462 335.17 +nyc-taxi-data-10M.csv zskp 2 3325605752 588976126 17570 180.50 +nyc-taxi-data-10M.csv zskp 3 3325605752 529329260 32432 97.79 +nyc-taxi-data-10M.csv zskp 4 3325605752 474949772 138025 22.98 + +nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18 +nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07 +nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27 +nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12 + +nyc-taxi-data-10M.csv gzstd 1 3325605752 928654908 21270 149.11 +nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68 +``` + +## Decompressor + +Staus: STABLE - there may still be subtle bugs, but a wide variety of content has been tested. + +This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz), +kindly supplied by [fuzzit.dev](https://fuzzit.dev/). +The main purpose of the fuzz testing is to ensure that it is not possible to crash the decoder, +or run it past its limits with ANY input provided. + +### Usage + +The package has been designed for two main usages, big streams of data and smaller in-memory buffers. +There are two main usages of the package for these. Both of them are accessed by creating a `Decoder`. + +For streaming use a simple setup could look like this: + +```Go +import "github.com/klauspost/compress/zstd" + +func Decompress(in io.Reader, out io.Writer) error { + d, err := zstd.NewReader(in) + if err != nil { + return err + } + defer d.Close() + + // Copy content... + _, err = io.Copy(out, d) + return err +} +``` + +It is important to use the "Close" function when you no longer need the Reader to stop running goroutines, +when running with default settings. +Goroutines will exit once an error has been returned, including `io.EOF` at the end of a stream. + +Streams are decoded concurrently in 4 asynchronous stages to give the best possible throughput. +However, if you prefer synchronous decompression, use `WithDecoderConcurrency(1)` which will decompress data +as it is being requested only. + +For decoding buffers, it could look something like this: + +```Go +import "github.com/klauspost/compress/zstd" + +// Create a reader that caches decompressors. +// For this operation type we supply a nil Reader. +var decoder, _ = zstd.NewReader(nil, WithDecoderConcurrency(0)) + +// Decompress a buffer. We don't supply a destination buffer, +// so it will be allocated by the decoder. +func Decompress(src []byte) ([]byte, error) { + return decoder.DecodeAll(src, nil) +} +``` + +Both of these cases should provide the functionality needed. +The decoder can be used for *concurrent* decompression of multiple buffers. +By default 4 decompressors will be created. + +It will only allow a certain number of concurrent operations to run. +To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. +It is possible to use `WithDecoderConcurrency(0)` to create GOMAXPROCS decoders. + +### Dictionaries + +Data compressed with [dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression) can be decompressed. + +Dictionaries are added individually to Decoders. +Dictionaries are generated by the `zstd --train` command and contains an initial state for the decoder. +To add a dictionary use the `WithDecoderDicts(dicts ...[]byte)` option with the dictionary data. +Several dictionaries can be added at once. + +The dictionary will be used automatically for the data that specifies them. +A re-used Decoder will still contain the dictionaries registered. + +When registering multiple dictionaries with the same ID, the last one will be used. + +It is possible to use dictionaries when compressing data. + +To enable a dictionary use `WithEncoderDict(dict []byte)`. Here only one dictionary will be used +and it will likely be used even if it doesn't improve compression. + +The used dictionary must be used to decompress the content. + +For any real gains, the dictionary should be built with similar data. +If an unsuitable dictionary is used the output may be slightly larger than using no dictionary. +Use the [zstd commandline tool](https://github.com/facebook/zstd/releases) to build a dictionary from sample data. +For information see [zstd dictionary information](https://github.com/facebook/zstd#the-case-for-small-data-compression). + +For now there is a fixed startup performance penalty for compressing content with dictionaries. +This will likely be improved over time. Just be aware to test performance when implementing. + +### Allocation-less operation + +The decoder has been designed to operate without allocations after a warmup. + +This means that you should *store* the decoder for best performance. +To re-use a stream decoder, use the `Reset(r io.Reader) error` to switch to another stream. +A decoder can safely be re-used even if the previous stream failed. + +To release the resources, you must call the `Close()` function on a decoder. +After this it can *no longer be reused*, but all running goroutines will be stopped. +So you *must* use this if you will no longer need the Reader. + +For decompressing smaller buffers a single decoder can be used. +When decoding buffers, you can supply a destination slice with length 0 and your expected capacity. +In this case no unneeded allocations should be made. + +### Concurrency + +The buffer decoder does everything on the same goroutine and does nothing concurrently. +It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that. + +The stream decoder will create goroutines that: + +1) Reads input and splits the input into blocks. +2) Decompression of literals. +3) Decompression of sequences. +4) Reconstruction of output stream. + +So effectively this also means the decoder will "read ahead" and prepare data to always be available for output. + +The concurrency level will, for streams, determine how many blocks ahead the compression will start. + +Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency. + +In practice this means that concurrency is often limited to utilizing about 3 cores effectively. + +### Benchmarks + +The first two are streaming decodes and the last are smaller inputs. + +Running on AMD Ryzen 9 3950X 16-Core Processor. AMD64 assembly used. + +``` +BenchmarkDecoderSilesia-32 5 206878840 ns/op 1024.50 MB/s 49808 B/op 43 allocs/op +BenchmarkDecoderEnwik9-32 1 1271809000 ns/op 786.28 MB/s 72048 B/op 52 allocs/op + +Concurrent blocks, performance: + +BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-32 67356 17857 ns/op 10321.96 MB/s 22.48 pct 102 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-32 266656 4421 ns/op 26823.21 MB/s 11.89 pct 19 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-32 20992 56842 ns/op 8477.17 MB/s 39.90 pct 754 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-32 27456 43932 ns/op 9714.01 MB/s 33.27 pct 524 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-32 78432 15047 ns/op 8319.15 MB/s 40.34 pct 66 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-32 65800 18436 ns/op 8249.63 MB/s 37.75 pct 88 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-32 102993 11523 ns/op 35546.09 MB/s 3.637 pct 143 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-32 1000000 1070 ns/op 95720.98 MB/s 80.53 pct 3 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-32 749802 1752 ns/op 70272.35 MB/s 100.0 pct 5 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-32 22640 52934 ns/op 13263.37 MB/s 26.25 pct 1014 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html.zst-32 226412 5232 ns/op 19572.27 MB/s 14.49 pct 20 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-32 923041 1276 ns/op 3194.71 MB/s 31.26 pct 0 B/op 0 allocs/op +``` + +This reflects the performance around May 2022, but this may be out of date. + +## Zstd inside ZIP files + +It is possible to use zstandard to compress individual files inside zip archives. +While this isn't widely supported it can be useful for internal files. + +To support the compression and decompression of these files you must register a compressor and decompressor. + +It is highly recommended registering the (de)compressors on individual zip Reader/Writer and NOT +use the global registration functions. The main reason for this is that 2 registrations from +different packages will result in a panic. + +It is a good idea to only have a single compressor and decompressor, since they can be used for multiple zip +files concurrently, and using a single instance will allow reusing some resources. + +See [this example](https://pkg.go.dev/github.com/klauspost/compress/zstd#example-ZipCompressor) for +how to compress and decompress files inside zip archives. + +# Contributions + +Contributions are always welcome. +For new features/fixes, remember to add tests and for performance enhancements include benchmarks. + +For general feedback and experience reports, feel free to open an issue or write me on [Twitter](https://twitter.com/sh0dan). + +This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare. diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go new file mode 100644 index 000000000..97299d499 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go @@ -0,0 +1,140 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "math/bits" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 // Maybe use [16]byte, but shifting is awkward. + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBits(uint32(v))) + return nil +} + +// getBits will return n bits. n can be 0. +func (b *bitReader) getBits(n uint8) int { + if n == 0 /*|| b.bitsRead >= 64 */ { + return 0 + } + return int(b.get32BitsFast(n)) +} + +// get32BitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) get32BitsFast(n uint8) uint32 { + const regMask = 64 - 1 + v := uint32((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + b.bitsRead += n + return v +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + // 2 bounds checks. + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if b.off >= 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value = (b.value << 8) | uint64(b.in[b.off-1]) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +// overread returns true if more bits have been requested than is on the stream. +func (b *bitReader) overread() bool { + return b.bitsRead > 64 +} + +// remain returns the number of bits remaining. +func (b *bitReader) remain() uint { + return b.off*8 + 64 - uint(b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if !b.finished() { + return fmt.Errorf("%d extra bits on block, should be 0", b.remain()) + } + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +func highBits(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go new file mode 100644 index 000000000..78b3c61be --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go @@ -0,0 +1,113 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package zstd + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +var bitMask32 = [32]uint32{ + 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, + 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, + 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, + 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, +} // up to 32 bits + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits32NC will add up to 31 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits32NC(value uint32, bits uint8) { + b.bitContainer |= uint64(value&bitMask32[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits64NC will add up to 64 bits. +// There must be space for 32 bits. +func (b *bitWriter) addBits64NC(value uint64, bits uint8) { + if bits <= 31 { + b.addBits32Clean(uint32(value), bits) + return + } + b.addBits32Clean(uint32(value), 32) + b.flush32() + b.addBits32Clean(uint32(value>>32), bits-32) +} + +// addBits32Clean will add up to 32 bits. +// It will not check if there is space for them. +// The input must not contain more bits than specified. +func (b *bitWriter) addBits32Clean(value uint32, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() error { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() + return nil +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go new file mode 100644 index 000000000..5f272d87f --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -0,0 +1,726 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "hash/crc32" + "io" + "os" + "path/filepath" + "sync" + + "github.com/klauspost/compress/huff0" + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +type blockType uint8 + +//go:generate stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex + +const ( + blockTypeRaw blockType = iota + blockTypeRLE + blockTypeCompressed + blockTypeReserved +) + +type literalsBlockType uint8 + +const ( + literalsBlockRaw literalsBlockType = iota + literalsBlockRLE + literalsBlockCompressed + literalsBlockTreeless +) + +const ( + // maxCompressedBlockSize is the biggest allowed compressed block size (128KB) + maxCompressedBlockSize = 128 << 10 + + compressedBlockOverAlloc = 16 + maxCompressedBlockSizeAlloc = 128<<10 + compressedBlockOverAlloc + + // Maximum possible block size (all Raw+Uncompressed). + maxBlockSize = (1 << 21) - 1 + + maxMatchLen = 131074 + maxSequences = 0x7f00 + 0xffff + + // We support slightly less than the reference decoder to be able to + // use ints on 32 bit archs. + maxOffsetBits = 30 +) + +var ( + huffDecoderPool = sync.Pool{New: func() interface{} { + return &huff0.Scratch{} + }} + + fseDecoderPool = sync.Pool{New: func() interface{} { + return &fseDecoder{} + }} +) + +type blockDec struct { + // Raw source data of the block. + data []byte + dataStorage []byte + + // Destination of the decoded data. + dst []byte + + // Buffer for literals data. + literalBuf []byte + + // Window size of the block. + WindowSize uint64 + + err error + + // Check against this crc, if hasCRC is true. + checkCRC uint32 + hasCRC bool + + // Frame to use for singlethreaded decoding. + // Should not be used by the decoder itself since parent may be another frame. + localFrame *frameDec + + sequence []seqVals + + async struct { + newHist *history + literals []byte + seqData []byte + seqSize int // Size of uncompressed sequences + fcs uint64 + } + + // Block is RLE, this is the size. + RLESize uint32 + + Type blockType + + // Is this the last block of a frame? + Last bool + + // Use less memory + lowMem bool +} + +func (b *blockDec) String() string { + if b == nil { + return "" + } + return fmt.Sprintf("Steam Size: %d, Type: %v, Last: %t, Window: %d", len(b.data), b.Type, b.Last, b.WindowSize) +} + +func newBlockDec(lowMem bool) *blockDec { + b := blockDec{ + lowMem: lowMem, + } + return &b +} + +// reset will reset the block. +// Input must be a start of a block and will be at the end of the block when returned. +func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { + b.WindowSize = windowSize + tmp, err := br.readSmall(3) + if err != nil { + println("Reading block header:", err) + return err + } + bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) + b.Last = bh&1 != 0 + b.Type = blockType((bh >> 1) & 3) + // find size. + cSize := int(bh >> 3) + maxSize := maxCompressedBlockSizeAlloc + switch b.Type { + case blockTypeReserved: + return ErrReservedBlockType + case blockTypeRLE: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } + b.RLESize = uint32(cSize) + if b.lowMem { + maxSize = cSize + } + cSize = 1 + case blockTypeCompressed: + if debugDecoder { + println("Data size on stream:", cSize) + } + b.RLESize = 0 + maxSize = maxCompressedBlockSizeAlloc + if windowSize < maxCompressedBlockSize && b.lowMem { + maxSize = int(windowSize) + compressedBlockOverAlloc + } + if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize { + if debugDecoder { + printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrCompressedSizeTooBig + } + // Empty compressed blocks must at least be 2 bytes + // for Literals_Block_Type and one for Sequences_Section_Header. + if cSize < 2 { + return ErrBlockTooSmall + } + case blockTypeRaw: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } + + b.RLESize = 0 + // We do not need a destination for raw blocks. + maxSize = -1 + default: + panic("Invalid block type") + } + + // Read block data. + if _, ok := br.(*byteBuf); !ok && cap(b.dataStorage) < cSize { + // byteBuf doesn't need a destination buffer. + if b.lowMem || cSize > maxCompressedBlockSize { + b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc) + } else { + b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc) + } + } + b.data, err = br.readBig(cSize, b.dataStorage) + if err != nil { + if debugDecoder { + println("Reading block:", err, "(", cSize, ")", len(b.data)) + printf("%T", br) + } + return err + } + if cap(b.dst) <= maxSize { + b.dst = make([]byte, 0, maxSize+1) + } + return nil +} + +// sendEOF will make the decoder send EOF on this frame. +func (b *blockDec) sendErr(err error) { + b.Last = true + b.Type = blockTypeReserved + b.err = err +} + +// Close will release resources. +// Closed blockDec cannot be reset. +func (b *blockDec) Close() { +} + +// decodeBuf +func (b *blockDec) decodeBuf(hist *history) error { + switch b.Type { + case blockTypeRLE: + if cap(b.dst) < int(b.RLESize) { + if b.lowMem { + b.dst = make([]byte, b.RLESize) + } else { + b.dst = make([]byte, maxCompressedBlockSize) + } + } + b.dst = b.dst[:b.RLESize] + v := b.data[0] + for i := range b.dst { + b.dst[i] = v + } + hist.appendKeep(b.dst) + return nil + case blockTypeRaw: + hist.appendKeep(b.data) + return nil + case blockTypeCompressed: + saved := b.dst + // Append directly to history + if hist.ignoreBuffer == 0 { + b.dst = hist.b + hist.b = nil + } else { + b.dst = b.dst[:0] + } + err := b.decodeCompressed(hist) + if debugDecoder { + println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err) + } + if hist.ignoreBuffer == 0 { + hist.b = b.dst + b.dst = saved + } else { + hist.appendKeep(b.dst) + } + return err + case blockTypeReserved: + // Used for returning errors. + return b.err + default: + panic("Invalid block type") + } +} + +func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err error) { + // There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header + if len(in) < 2 { + return in, ErrBlockTooSmall + } + + litType := literalsBlockType(in[0] & 3) + var litRegenSize int + var litCompSize int + sizeFormat := (in[0] >> 2) & 3 + var fourStreams bool + var literals []byte + switch litType { + case literalsBlockRaw, literalsBlockRLE: + switch sizeFormat { + case 0, 2: + // Regenerated_Size uses 5 bits (0-31). Literals_Section_Header uses 1 byte. + litRegenSize = int(in[0] >> 3) + in = in[1:] + case 1: + // Regenerated_Size uses 12 bits (0-4095). Literals_Section_Header uses 2 bytes. + litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + in = in[2:] + case 3: + // Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes. + if len(in) < 3 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12) + in = in[3:] + } + case literalsBlockCompressed, literalsBlockTreeless: + switch sizeFormat { + case 0, 1: + // Both Regenerated_Size and Compressed_Size use 10 bits (0-1023). + if len(in) < 3 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + litRegenSize = int(n & 1023) + litCompSize = int(n >> 10) + fourStreams = sizeFormat == 1 + in = in[3:] + case 2: + fourStreams = true + if len(in) < 4 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + litRegenSize = int(n & 16383) + litCompSize = int(n >> 14) + in = in[4:] + case 3: + fourStreams = true + if len(in) < 5 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28) + litRegenSize = int(n & 262143) + litCompSize = int(n >> 18) + in = in[5:] + } + } + if debugDecoder { + println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams) + } + if litRegenSize > int(b.WindowSize) || litRegenSize > maxCompressedBlockSize { + return in, ErrWindowSizeExceeded + } + + switch litType { + case literalsBlockRaw: + if len(in) < litRegenSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize) + return in, ErrBlockTooSmall + } + literals = in[:litRegenSize] + in = in[litRegenSize:] + //printf("Found %d uncompressed literals\n", litRegenSize) + case literalsBlockRLE: + if len(in) < 1 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1) + return in, ErrBlockTooSmall + } + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, litRegenSize, litRegenSize+compressedBlockOverAlloc) + } else { + b.literalBuf = make([]byte, litRegenSize, maxCompressedBlockSize+compressedBlockOverAlloc) + } + } + literals = b.literalBuf[:litRegenSize] + v := in[0] + for i := range literals { + literals[i] = v + } + in = in[1:] + if debugDecoder { + printf("Found %d RLE compressed literals\n", litRegenSize) + } + case literalsBlockTreeless: + if len(in) < litCompSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) + return in, ErrBlockTooSmall + } + // Store compressed literals, so we defer decoding until we get history. + literals = in[:litCompSize] + in = in[litCompSize:] + if debugDecoder { + printf("Found %d compressed literals\n", litCompSize) + } + huff := hist.huffTree + if huff == nil { + return in, errors.New("literal block was treeless, but no history was defined") + } + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) + } else { + b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) + } + } + var err error + // Use our out buffer. + huff.MaxDecodedSize = litRegenSize + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + // Make sure we don't leak our literals buffer + if err != nil { + println("decompressing literals:", err) + return in, err + } + if len(literals) != litRegenSize { + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + + case literalsBlockCompressed: + if len(in) < litCompSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) + return in, ErrBlockTooSmall + } + literals = in[:litCompSize] + in = in[litCompSize:] + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) + } else { + b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) + } + } + huff := hist.huffTree + if huff == nil || (hist.dict != nil && huff == hist.dict.litEnc) { + huff = huffDecoderPool.Get().(*huff0.Scratch) + if huff == nil { + huff = &huff0.Scratch{} + } + } + var err error + if debugDecoder { + println("huff table input:", len(literals), "CRC:", crc32.ChecksumIEEE(literals)) + } + huff, literals, err = huff0.ReadTable(literals, huff) + if err != nil { + println("reading huffman table:", err) + return in, err + } + hist.huffTree = huff + huff.MaxDecodedSize = litRegenSize + // Use our out buffer. + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + if err != nil { + println("decoding compressed literals:", err) + return in, err + } + // Make sure we don't leak our literals buffer + if len(literals) != litRegenSize { + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + // Re-cap to get extra size. + literals = b.literalBuf[:len(literals)] + if debugDecoder { + printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) + } + } + hist.decoders.literals = literals + return in, nil +} + +// decodeCompressed will start decompressing a block. +func (b *blockDec) decodeCompressed(hist *history) error { + in := b.data + in, err := b.decodeLiterals(in, hist) + if err != nil { + return err + } + err = b.prepareSequences(in, hist) + if err != nil { + return err + } + if hist.decoders.nSeqs == 0 { + b.dst = append(b.dst, hist.decoders.literals...) + return nil + } + before := len(hist.decoders.out) + err = hist.decoders.decodeSync(hist.b[hist.ignoreBuffer:]) + if err != nil { + return err + } + if hist.decoders.maxSyncLen > 0 { + hist.decoders.maxSyncLen += uint64(before) + hist.decoders.maxSyncLen -= uint64(len(hist.decoders.out)) + } + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset + return nil +} + +func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { + if debugDecoder { + printf("prepareSequences: %d byte(s) input\n", len(in)) + } + // Decode Sequences + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section + if len(in) < 1 { + return ErrBlockTooSmall + } + var nSeqs int + seqHeader := in[0] + switch { + case seqHeader < 128: + nSeqs = int(seqHeader) + in = in[1:] + case seqHeader < 255: + if len(in) < 2 { + return ErrBlockTooSmall + } + nSeqs = int(seqHeader-128)<<8 | int(in[1]) + in = in[2:] + case seqHeader == 255: + if len(in) < 3 { + return ErrBlockTooSmall + } + nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8) + in = in[3:] + } + if nSeqs == 0 && len(in) != 0 { + // When no sequences, there should not be any more data... + if debugDecoder { + printf("prepareSequences: 0 sequences, but %d byte(s) left on stream\n", len(in)) + } + return ErrUnexpectedBlockSize + } + + var seqs = &hist.decoders + seqs.nSeqs = nSeqs + if nSeqs > 0 { + if len(in) < 1 { + return ErrBlockTooSmall + } + br := byteReader{b: in, off: 0} + compMode := br.Uint8() + br.advance(1) + if debugDecoder { + printf("Compression modes: 0b%b", compMode) + } + for i := uint(0); i < 3; i++ { + mode := seqCompMode((compMode >> (6 - i*2)) & 3) + if debugDecoder { + println("Table", tableIndex(i), "is", mode) + } + var seq *sequenceDec + switch tableIndex(i) { + case tableLiteralLengths: + seq = &seqs.litLengths + case tableOffsets: + seq = &seqs.offsets + case tableMatchLengths: + seq = &seqs.matchLengths + default: + panic("unknown table") + } + switch mode { + case compModePredefined: + if seq.fse != nil && !seq.fse.preDefined { + fseDecoderPool.Put(seq.fse) + } + seq.fse = &fsePredef[i] + case compModeRLE: + if br.remain() < 1 { + return ErrBlockTooSmall + } + v := br.Uint8() + br.advance(1) + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } + symb, err := decSymbolValue(v, symbolTableX[i]) + if err != nil { + printf("RLE Transform table (%v) error: %v", tableIndex(i), err) + return err + } + seq.fse.setRLE(symb) + if debugDecoder { + printf("RLE set to %+v, code: %v", symb, v) + } + case compModeFSE: + println("Reading table for", tableIndex(i)) + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } + err := seq.fse.readNCount(&br, uint16(maxTableSymbol[i])) + if err != nil { + println("Read table error:", err) + return err + } + err = seq.fse.transform(symbolTableX[i]) + if err != nil { + println("Transform table error:", err) + return err + } + if debugDecoder { + println("Read table ok", "symbolLen:", seq.fse.symbolLen) + } + case compModeRepeat: + seq.repeat = true + } + if br.overread() { + return io.ErrUnexpectedEOF + } + } + in = br.unread() + } + if debugDecoder { + println("Literals:", len(seqs.literals), "hash:", xxhash.Sum64(seqs.literals), "and", seqs.nSeqs, "sequences.") + } + + if nSeqs == 0 { + if len(b.sequence) > 0 { + b.sequence = b.sequence[:0] + } + return nil + } + br := seqs.br + if br == nil { + br = &bitReader{} + } + if err := br.init(in); err != nil { + return err + } + + if err := seqs.initialize(br, hist, b.dst); err != nil { + println("initializing sequences:", err) + return err + } + // Extract blocks... + if false && hist.dict == nil { + fatalErr := func(err error) { + if err != nil { + panic(err) + } + } + fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize) + var buf bytes.Buffer + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse)) + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse)) + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse)) + buf.Write(in) + os.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm) + } + + return nil +} + +func (b *blockDec) decodeSequences(hist *history) error { + if cap(b.sequence) < hist.decoders.nSeqs { + if b.lowMem { + b.sequence = make([]seqVals, 0, hist.decoders.nSeqs) + } else { + b.sequence = make([]seqVals, 0, 0x7F00+0xffff) + } + } + b.sequence = b.sequence[:hist.decoders.nSeqs] + if hist.decoders.nSeqs == 0 { + hist.decoders.seqSize = len(hist.decoders.literals) + return nil + } + hist.decoders.windowSize = hist.windowSize + hist.decoders.prevOffset = hist.recentOffsets + + err := hist.decoders.decode(b.sequence) + hist.recentOffsets = hist.decoders.prevOffset + return err +} + +func (b *blockDec) executeSequences(hist *history) error { + hbytes := hist.b + if len(hbytes) > hist.windowSize { + hbytes = hbytes[len(hbytes)-hist.windowSize:] + // We do not need history anymore. + if hist.dict != nil { + hist.dict.content = nil + } + } + hist.decoders.windowSize = hist.windowSize + hist.decoders.out = b.dst[:0] + err := hist.decoders.execute(b.sequence, hbytes) + if err != nil { + return err + } + return b.updateHistory(hist) +} + +func (b *blockDec) updateHistory(hist *history) error { + if len(b.data) > maxCompressedBlockSize { + return fmt.Errorf("compressed block size too large (%d)", len(b.data)) + } + // Set output and release references. + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset + + if b.Last { + // if last block we don't care about history. + println("Last block, no history returned") + hist.b = hist.b[:0] + return nil + } else { + hist.append(b.dst) + if debugDecoder { + println("Finished block with ", len(b.sequence), "sequences. Added", len(b.dst), "to history, now length", len(hist.b)) + } + } + hist.decoders.out, hist.decoders.literals = nil, nil + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go new file mode 100644 index 000000000..12e8f6f0b --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -0,0 +1,871 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math" + "math/bits" + + "github.com/klauspost/compress/huff0" +) + +type blockEnc struct { + size int + literals []byte + sequences []seq + coders seqCoders + litEnc *huff0.Scratch + dictLitEnc *huff0.Scratch + wr bitWriter + + extraLits int + output []byte + recentOffsets [3]uint32 + prevRecentOffsets [3]uint32 + + last bool + lowMem bool +} + +// init should be used once the block has been created. +// If called more than once, the effect is the same as calling reset. +func (b *blockEnc) init() { + if b.lowMem { + // 1K literals + if cap(b.literals) < 1<<10 { + b.literals = make([]byte, 0, 1<<10) + } + const defSeqs = 20 + if cap(b.sequences) < defSeqs { + b.sequences = make([]seq, 0, defSeqs) + } + // 1K + if cap(b.output) < 1<<10 { + b.output = make([]byte, 0, 1<<10) + } + } else { + if cap(b.literals) < maxCompressedBlockSize { + b.literals = make([]byte, 0, maxCompressedBlockSize) + } + const defSeqs = 2000 + if cap(b.sequences) < defSeqs { + b.sequences = make([]seq, 0, defSeqs) + } + if cap(b.output) < maxCompressedBlockSize { + b.output = make([]byte, 0, maxCompressedBlockSize) + } + } + + if b.coders.mlEnc == nil { + b.coders.mlEnc = &fseEncoder{} + b.coders.mlPrev = &fseEncoder{} + b.coders.ofEnc = &fseEncoder{} + b.coders.ofPrev = &fseEncoder{} + b.coders.llEnc = &fseEncoder{} + b.coders.llPrev = &fseEncoder{} + } + b.litEnc = &huff0.Scratch{WantLogLess: 4} + b.reset(nil) +} + +// initNewEncode can be used to reset offsets and encoders to the initial state. +func (b *blockEnc) initNewEncode() { + b.recentOffsets = [3]uint32{1, 4, 8} + b.litEnc.Reuse = huff0.ReusePolicyNone + b.coders.setPrev(nil, nil, nil) +} + +// reset will reset the block for a new encode, but in the same stream, +// meaning that state will be carried over, but the block content is reset. +// If a previous block is provided, the recent offsets are carried over. +func (b *blockEnc) reset(prev *blockEnc) { + b.extraLits = 0 + b.literals = b.literals[:0] + b.size = 0 + b.sequences = b.sequences[:0] + b.output = b.output[:0] + b.last = false + if prev != nil { + b.recentOffsets = prev.prevRecentOffsets + } + b.dictLitEnc = nil +} + +// reset will reset the block for a new encode, but in the same stream, +// meaning that state will be carried over, but the block content is reset. +// If a previous block is provided, the recent offsets are carried over. +func (b *blockEnc) swapEncoders(prev *blockEnc) { + b.coders.swap(&prev.coders) + b.litEnc, prev.litEnc = prev.litEnc, b.litEnc +} + +// blockHeader contains the information for a block header. +type blockHeader uint32 + +// setLast sets the 'last' indicator on a block. +func (h *blockHeader) setLast(b bool) { + if b { + *h = *h | 1 + } else { + const mask = (1 << 24) - 2 + *h = *h & mask + } +} + +// setSize will store the compressed size of a block. +func (h *blockHeader) setSize(v uint32) { + const mask = 7 + *h = (*h)&mask | blockHeader(v<<3) +} + +// setType sets the block type. +func (h *blockHeader) setType(t blockType) { + const mask = 1 | (((1 << 24) - 1) ^ 7) + *h = (*h & mask) | blockHeader(t<<1) +} + +// appendTo will append the block header to a slice. +func (h blockHeader) appendTo(b []byte) []byte { + return append(b, uint8(h), uint8(h>>8), uint8(h>>16)) +} + +// String returns a string representation of the block. +func (h blockHeader) String() string { + return fmt.Sprintf("Type: %d, Size: %d, Last:%t", (h>>1)&3, h>>3, h&1 == 1) +} + +// literalsHeader contains literals header information. +type literalsHeader uint64 + +// setType can be used to set the type of literal block. +func (h *literalsHeader) setType(t literalsBlockType) { + const mask = math.MaxUint64 - 3 + *h = (*h & mask) | literalsHeader(t) +} + +// setSize can be used to set a single size, for uncompressed and RLE content. +func (h *literalsHeader) setSize(regenLen int) { + inBits := bits.Len32(uint32(regenLen)) + // Only retain 2 bits + const mask = 3 + lh := uint64(*h & mask) + switch { + case inBits < 5: + lh |= (uint64(regenLen) << 3) | (1 << 60) + if debugEncoder { + got := int(lh>>3) & 0xff + if got != regenLen { + panic(fmt.Sprint("litRegenSize = ", regenLen, "(want) != ", got, "(got)")) + } + } + case inBits < 12: + lh |= (1 << 2) | (uint64(regenLen) << 4) | (2 << 60) + case inBits < 20: + lh |= (3 << 2) | (uint64(regenLen) << 4) | (3 << 60) + default: + panic(fmt.Errorf("internal error: block too big (%d)", regenLen)) + } + *h = literalsHeader(lh) +} + +// setSizes will set the size of a compressed literals section and the input length. +func (h *literalsHeader) setSizes(compLen, inLen int, single bool) { + compBits, inBits := bits.Len32(uint32(compLen)), bits.Len32(uint32(inLen)) + // Only retain 2 bits + const mask = 3 + lh := uint64(*h & mask) + switch { + case compBits <= 10 && inBits <= 10: + if !single { + lh |= 1 << 2 + } + lh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60) + if debugEncoder { + const mmask = (1 << 24) - 1 + n := (lh >> 4) & mmask + if int(n&1023) != inLen { + panic(fmt.Sprint("regensize:", int(n&1023), "!=", inLen, inBits)) + } + if int(n>>10) != compLen { + panic(fmt.Sprint("compsize:", int(n>>10), "!=", compLen, compBits)) + } + } + case compBits <= 14 && inBits <= 14: + lh |= (2 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (14 + 4)) | (4 << 60) + if single { + panic("single stream used with more than 10 bits length.") + } + case compBits <= 18 && inBits <= 18: + lh |= (3 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (18 + 4)) | (5 << 60) + if single { + panic("single stream used with more than 10 bits length.") + } + default: + panic("internal error: block too big") + } + *h = literalsHeader(lh) +} + +// appendTo will append the literals header to a byte slice. +func (h literalsHeader) appendTo(b []byte) []byte { + size := uint8(h >> 60) + switch size { + case 1: + b = append(b, uint8(h)) + case 2: + b = append(b, uint8(h), uint8(h>>8)) + case 3: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16)) + case 4: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24)) + case 5: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24), uint8(h>>32)) + default: + panic(fmt.Errorf("internal error: literalsHeader has invalid size (%d)", size)) + } + return b +} + +// size returns the output size with currently set values. +func (h literalsHeader) size() int { + return int(h >> 60) +} + +func (h literalsHeader) String() string { + return fmt.Sprintf("Type: %d, SizeFormat: %d, Size: 0x%d, Bytes:%d", literalsBlockType(h&3), (h>>2)&3, h&((1<<60)-1)>>4, h>>60) +} + +// pushOffsets will push the recent offsets to the backup store. +func (b *blockEnc) pushOffsets() { + b.prevRecentOffsets = b.recentOffsets +} + +// pushOffsets will push the recent offsets to the backup store. +func (b *blockEnc) popOffsets() { + b.recentOffsets = b.prevRecentOffsets +} + +// matchOffset will adjust recent offsets and return the adjusted one, +// if it matches a previous offset. +func (b *blockEnc) matchOffset(offset, lits uint32) uint32 { + // Check if offset is one of the recent offsets. + // Adjusts the output offset accordingly. + // Gives a tiny bit of compression, typically around 1%. + if true { + if lits > 0 { + switch offset { + case b.recentOffsets[0]: + offset = 1 + case b.recentOffsets[1]: + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 2 + case b.recentOffsets[2]: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 3 + default: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset += 3 + } + } else { + switch offset { + case b.recentOffsets[1]: + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 1 + case b.recentOffsets[2]: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 2 + case b.recentOffsets[0] - 1: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 3 + default: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset += 3 + } + } + } else { + offset += 3 + } + return offset +} + +// encodeRaw can be used to set the output to a raw representation of supplied bytes. +func (b *blockEnc) encodeRaw(a []byte) { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(a))) + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output[:0]) + b.output = append(b.output, a...) + if debugEncoder { + println("Adding RAW block, length", len(a), "last:", b.last) + } +} + +// encodeRaw can be used to set the output to a raw representation of supplied bytes. +func (b *blockEnc) encodeRawTo(dst, src []byte) []byte { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(src))) + bh.setType(blockTypeRaw) + dst = bh.appendTo(dst) + dst = append(dst, src...) + if debugEncoder { + println("Adding RAW block, length", len(src), "last:", b.last) + } + return dst +} + +// encodeLits can be used if the block is only litLen. +func (b *blockEnc) encodeLits(lits []byte, raw bool) error { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(lits))) + + // Don't compress extremely small blocks + if len(lits) < 8 || (len(lits) < 32 && b.dictLitEnc == nil) || raw { + if debugEncoder { + println("Adding RAW block, length", len(lits), "last:", b.last) + } + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits...) + return nil + } + + var ( + out []byte + reUsed, single bool + err error + ) + if b.dictLitEnc != nil { + b.litEnc.TransferCTable(b.dictLitEnc) + b.litEnc.Reuse = huff0.ReusePolicyAllow + b.dictLitEnc = nil + } + if len(lits) >= 1024 { + // Use 4 Streams. + out, reUsed, err = huff0.Compress4X(lits, b.litEnc) + } else if len(lits) > 32 { + // Use 1 stream + single = true + out, reUsed, err = huff0.Compress1X(lits, b.litEnc) + } else { + err = huff0.ErrIncompressible + } + + switch err { + case huff0.ErrIncompressible: + if debugEncoder { + println("Adding RAW block, length", len(lits), "last:", b.last) + } + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits...) + return nil + case huff0.ErrUseRLE: + if debugEncoder { + println("Adding RLE block, length", len(lits)) + } + bh.setType(blockTypeRLE) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits[0]) + return nil + case nil: + default: + return err + } + // Compressed... + // Now, allow reuse + b.litEnc.Reuse = huff0.ReusePolicyAllow + bh.setType(blockTypeCompressed) + var lh literalsHeader + if reUsed { + if debugEncoder { + println("Reused tree, compressed to", len(out)) + } + lh.setType(literalsBlockTreeless) + } else { + if debugEncoder { + println("New tree, compressed to", len(out), "tree size:", len(b.litEnc.OutTable)) + } + lh.setType(literalsBlockCompressed) + } + // Set sizes + lh.setSizes(len(out), len(lits), single) + bh.setSize(uint32(len(out) + lh.size() + 1)) + + // Write block headers. + b.output = bh.appendTo(b.output) + b.output = lh.appendTo(b.output) + // Add compressed data. + b.output = append(b.output, out...) + // No sequences. + b.output = append(b.output, 0) + return nil +} + +// fuzzFseEncoder can be used to fuzz the FSE encoder. +func fuzzFseEncoder(data []byte) int { + if len(data) > maxSequences || len(data) < 2 { + return 0 + } + enc := fseEncoder{} + hist := enc.Histogram() + maxSym := uint8(0) + for i, v := range data { + v = v & 63 + data[i] = v + hist[v]++ + if v > maxSym { + maxSym = v + } + } + if maxSym == 0 { + // All 0 + return 0 + } + maxCount := func(a []uint32) int { + var max uint32 + for _, v := range a { + if v > max { + max = v + } + } + return int(max) + } + cnt := maxCount(hist[:maxSym]) + if cnt == len(data) { + // RLE + return 0 + } + enc.HistogramFinished(maxSym, cnt) + err := enc.normalizeCount(len(data)) + if err != nil { + return 0 + } + _, err = enc.writeCount(nil) + if err != nil { + panic(err) + } + return 1 +} + +// encode will encode the block and append the output in b.output. +// Previous offset codes must be pushed if more blocks are expected. +func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { + if len(b.sequences) == 0 { + return b.encodeLits(b.literals, rawAllLits) + } + // We want some difference to at least account for the headers. + saved := b.size - len(b.literals) - (b.size >> 5) + if saved < 16 { + if org == nil { + return errIncompressible + } + b.popOffsets() + return b.encodeLits(org, rawAllLits) + } + + var bh blockHeader + var lh literalsHeader + bh.setLast(b.last) + bh.setType(blockTypeCompressed) + // Store offset of the block header. Needed when we know the size. + bhOffset := len(b.output) + b.output = bh.appendTo(b.output) + + var ( + out []byte + reUsed, single bool + err error + ) + if b.dictLitEnc != nil { + b.litEnc.TransferCTable(b.dictLitEnc) + b.litEnc.Reuse = huff0.ReusePolicyAllow + b.dictLitEnc = nil + } + if len(b.literals) >= 1024 && !raw { + // Use 4 Streams. + out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc) + } else if len(b.literals) > 32 && !raw { + // Use 1 stream + single = true + out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc) + } else { + err = huff0.ErrIncompressible + } + + switch err { + case huff0.ErrIncompressible: + lh.setType(literalsBlockRaw) + lh.setSize(len(b.literals)) + b.output = lh.appendTo(b.output) + b.output = append(b.output, b.literals...) + if debugEncoder { + println("Adding literals RAW, length", len(b.literals)) + } + case huff0.ErrUseRLE: + lh.setType(literalsBlockRLE) + lh.setSize(len(b.literals)) + b.output = lh.appendTo(b.output) + b.output = append(b.output, b.literals[0]) + if debugEncoder { + println("Adding literals RLE") + } + case nil: + // Compressed litLen... + if reUsed { + if debugEncoder { + println("reused tree") + } + lh.setType(literalsBlockTreeless) + } else { + if debugEncoder { + println("new tree, size:", len(b.litEnc.OutTable)) + } + lh.setType(literalsBlockCompressed) + if debugEncoder { + _, _, err := huff0.ReadTable(out, nil) + if err != nil { + panic(err) + } + } + } + lh.setSizes(len(out), len(b.literals), single) + if debugEncoder { + printf("Compressed %d literals to %d bytes", len(b.literals), len(out)) + println("Adding literal header:", lh) + } + b.output = lh.appendTo(b.output) + b.output = append(b.output, out...) + b.litEnc.Reuse = huff0.ReusePolicyAllow + if debugEncoder { + println("Adding literals compressed") + } + default: + if debugEncoder { + println("Adding literals ERROR:", err) + } + return err + } + // Sequence compression + + // Write the number of sequences + switch { + case len(b.sequences) < 128: + b.output = append(b.output, uint8(len(b.sequences))) + case len(b.sequences) < 0x7f00: // TODO: this could be wrong + n := len(b.sequences) + b.output = append(b.output, 128+uint8(n>>8), uint8(n)) + default: + n := len(b.sequences) - 0x7f00 + b.output = append(b.output, 255, uint8(n), uint8(n>>8)) + } + if debugEncoder { + println("Encoding", len(b.sequences), "sequences") + } + b.genCodes() + llEnc := b.coders.llEnc + ofEnc := b.coders.ofEnc + mlEnc := b.coders.mlEnc + err = llEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + err = ofEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + err = mlEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + + // Choose the best compression mode for each type. + // Will evaluate the new vs predefined and previous. + chooseComp := func(cur, prev, preDef *fseEncoder) (*fseEncoder, seqCompMode) { + // See if predefined/previous is better + hist := cur.count[:cur.symbolLen] + nSize := cur.approxSize(hist) + cur.maxHeaderSize() + predefSize := preDef.approxSize(hist) + prevSize := prev.approxSize(hist) + + // Add a small penalty for new encoders. + // Don't bother with extremely small (<2 byte gains). + nSize = nSize + (nSize+2*8*16)>>4 + switch { + case predefSize <= prevSize && predefSize <= nSize || forcePreDef: + if debugEncoder { + println("Using predefined", predefSize>>3, "<=", nSize>>3) + } + return preDef, compModePredefined + case prevSize <= nSize: + if debugEncoder { + println("Using previous", prevSize>>3, "<=", nSize>>3) + } + return prev, compModeRepeat + default: + if debugEncoder { + println("Using new, predef", predefSize>>3, ". previous:", prevSize>>3, ">", nSize>>3, "header max:", cur.maxHeaderSize()>>3, "bytes") + println("tl:", cur.actualTableLog, "symbolLen:", cur.symbolLen, "norm:", cur.norm[:cur.symbolLen], "hist", cur.count[:cur.symbolLen]) + } + return cur, compModeFSE + } + } + + // Write compression mode + var mode uint8 + if llEnc.useRLE { + mode |= uint8(compModeRLE) << 6 + llEnc.setRLE(b.sequences[0].llCode) + if debugEncoder { + println("llEnc.useRLE") + } + } else { + var m seqCompMode + llEnc, m = chooseComp(llEnc, b.coders.llPrev, &fsePredefEnc[tableLiteralLengths]) + mode |= uint8(m) << 6 + } + if ofEnc.useRLE { + mode |= uint8(compModeRLE) << 4 + ofEnc.setRLE(b.sequences[0].ofCode) + if debugEncoder { + println("ofEnc.useRLE") + } + } else { + var m seqCompMode + ofEnc, m = chooseComp(ofEnc, b.coders.ofPrev, &fsePredefEnc[tableOffsets]) + mode |= uint8(m) << 4 + } + + if mlEnc.useRLE { + mode |= uint8(compModeRLE) << 2 + mlEnc.setRLE(b.sequences[0].mlCode) + if debugEncoder { + println("mlEnc.useRLE, code: ", b.sequences[0].mlCode, "value", b.sequences[0].matchLen) + } + } else { + var m seqCompMode + mlEnc, m = chooseComp(mlEnc, b.coders.mlPrev, &fsePredefEnc[tableMatchLengths]) + mode |= uint8(m) << 2 + } + b.output = append(b.output, mode) + if debugEncoder { + printf("Compression modes: 0b%b", mode) + } + b.output, err = llEnc.writeCount(b.output) + if err != nil { + return err + } + start := len(b.output) + b.output, err = ofEnc.writeCount(b.output) + if err != nil { + return err + } + if false { + println("block:", b.output[start:], "tablelog", ofEnc.actualTableLog, "maxcount:", ofEnc.maxCount) + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", ofEnc.actualTableLog, ofEnc.symbolLen) + for i, v := range ofEnc.norm[:ofEnc.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, ofEnc.count[i], v) + } + } + b.output, err = mlEnc.writeCount(b.output) + if err != nil { + return err + } + + // Maybe in block? + wr := &b.wr + wr.reset(b.output) + + var ll, of, ml cState + + // Current sequence + seq := len(b.sequences) - 1 + s := b.sequences[seq] + llEnc.setBits(llBitsTable[:]) + mlEnc.setBits(mlBitsTable[:]) + ofEnc.setBits(nil) + + llTT, ofTT, mlTT := llEnc.ct.symbolTT[:256], ofEnc.ct.symbolTT[:256], mlEnc.ct.symbolTT[:256] + + // We have 3 bounds checks here (and in the loop). + // Since we are iterating backwards it is kinda hard to avoid. + llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] + ll.init(wr, &llEnc.ct, llB) + of.init(wr, &ofEnc.ct, ofB) + wr.flush32() + ml.init(wr, &mlEnc.ct, mlB) + + // Each of these lookups also generates a bounds check. + wr.addBits32NC(s.litLen, llB.outBits) + wr.addBits32NC(s.matchLen, mlB.outBits) + wr.flush32() + wr.addBits32NC(s.offset, ofB.outBits) + if debugSequences { + println("Encoded seq", seq, s, "codes:", s.llCode, s.mlCode, s.ofCode, "states:", ll.state, ml.state, of.state, "bits:", llB, mlB, ofB) + } + seq-- + // Store sequences in reverse... + for seq >= 0 { + s = b.sequences[seq] + + ofB := ofTT[s.ofCode] + wr.flush32() // tablelog max is below 8 for each, so it will fill max 24 bits. + //of.encode(ofB) + nbBitsOut := (uint32(of.state) + ofB.deltaNbBits) >> 16 + dstState := int32(of.state>>(nbBitsOut&15)) + int32(ofB.deltaFindState) + wr.addBits16NC(of.state, uint8(nbBitsOut)) + of.state = of.stateTable[dstState] + + // Accumulate extra bits. + outBits := ofB.outBits & 31 + extraBits := uint64(s.offset & bitMask32[outBits]) + extraBitsN := outBits + + mlB := mlTT[s.mlCode] + //ml.encode(mlB) + nbBitsOut = (uint32(ml.state) + mlB.deltaNbBits) >> 16 + dstState = int32(ml.state>>(nbBitsOut&15)) + int32(mlB.deltaFindState) + wr.addBits16NC(ml.state, uint8(nbBitsOut)) + ml.state = ml.stateTable[dstState] + + outBits = mlB.outBits & 31 + extraBits = extraBits<> 16 + dstState = int32(ll.state>>(nbBitsOut&15)) + int32(llB.deltaFindState) + wr.addBits16NC(ll.state, uint8(nbBitsOut)) + ll.state = ll.stateTable[dstState] + + outBits = llB.outBits & 31 + extraBits = extraBits<= b.size { + // Maybe even add a bigger margin. + b.litEnc.Reuse = huff0.ReusePolicyNone + return errIncompressible + } + + // Size is output minus block header. + bh.setSize(uint32(len(b.output)-bhOffset) - 3) + if debugEncoder { + println("Rewriting block header", bh) + } + _ = bh.appendTo(b.output[bhOffset:bhOffset]) + b.coders.setPrev(llEnc, mlEnc, ofEnc) + return nil +} + +var errIncompressible = errors.New("incompressible") + +func (b *blockEnc) genCodes() { + if len(b.sequences) == 0 { + // nothing to do + return + } + if len(b.sequences) > math.MaxUint16 { + panic("can only encode up to 64K sequences") + } + // No bounds checks after here: + llH := b.coders.llEnc.Histogram() + ofH := b.coders.ofEnc.Histogram() + mlH := b.coders.mlEnc.Histogram() + for i := range llH { + llH[i] = 0 + } + for i := range ofH { + ofH[i] = 0 + } + for i := range mlH { + mlH[i] = 0 + } + + var llMax, ofMax, mlMax uint8 + for i := range b.sequences { + seq := &b.sequences[i] + v := llCode(seq.litLen) + seq.llCode = v + llH[v]++ + if v > llMax { + llMax = v + } + + v = ofCode(seq.offset) + seq.ofCode = v + ofH[v]++ + if v > ofMax { + ofMax = v + } + + v = mlCode(seq.matchLen) + seq.mlCode = v + mlH[v]++ + if v > mlMax { + mlMax = v + if debugAsserts && mlMax > maxMatchLengthSymbol { + panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen)) + } + } + } + maxCount := func(a []uint32) int { + var max uint32 + for _, v := range a { + if v > max { + max = v + } + } + return int(max) + } + if debugAsserts && mlMax > maxMatchLengthSymbol { + panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax)) + } + if debugAsserts && ofMax > maxOffsetBits { + panic(fmt.Errorf("ofMax > maxOffsetBits (%d)", ofMax)) + } + if debugAsserts && llMax > maxLiteralLengthSymbol { + panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax)) + } + + b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1])) + b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1])) + b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1])) +} diff --git a/vendor/github.com/klauspost/compress/zstd/blocktype_string.go b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go new file mode 100644 index 000000000..01a01e486 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go @@ -0,0 +1,85 @@ +// Code generated by "stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex"; DO NOT EDIT. + +package zstd + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[blockTypeRaw-0] + _ = x[blockTypeRLE-1] + _ = x[blockTypeCompressed-2] + _ = x[blockTypeReserved-3] +} + +const _blockType_name = "blockTypeRawblockTypeRLEblockTypeCompressedblockTypeReserved" + +var _blockType_index = [...]uint8{0, 12, 24, 43, 60} + +func (i blockType) String() string { + if i >= blockType(len(_blockType_index)-1) { + return "blockType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _blockType_name[_blockType_index[i]:_blockType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[literalsBlockRaw-0] + _ = x[literalsBlockRLE-1] + _ = x[literalsBlockCompressed-2] + _ = x[literalsBlockTreeless-3] +} + +const _literalsBlockType_name = "literalsBlockRawliteralsBlockRLEliteralsBlockCompressedliteralsBlockTreeless" + +var _literalsBlockType_index = [...]uint8{0, 16, 32, 55, 76} + +func (i literalsBlockType) String() string { + if i >= literalsBlockType(len(_literalsBlockType_index)-1) { + return "literalsBlockType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _literalsBlockType_name[_literalsBlockType_index[i]:_literalsBlockType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[compModePredefined-0] + _ = x[compModeRLE-1] + _ = x[compModeFSE-2] + _ = x[compModeRepeat-3] +} + +const _seqCompMode_name = "compModePredefinedcompModeRLEcompModeFSEcompModeRepeat" + +var _seqCompMode_index = [...]uint8{0, 18, 29, 40, 54} + +func (i seqCompMode) String() string { + if i >= seqCompMode(len(_seqCompMode_index)-1) { + return "seqCompMode(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _seqCompMode_name[_seqCompMode_index[i]:_seqCompMode_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[tableLiteralLengths-0] + _ = x[tableOffsets-1] + _ = x[tableMatchLengths-2] +} + +const _tableIndex_name = "tableLiteralLengthstableOffsetstableMatchLengths" + +var _tableIndex_index = [...]uint8{0, 19, 31, 48} + +func (i tableIndex) String() string { + if i >= tableIndex(len(_tableIndex_index)-1) { + return "tableIndex(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _tableIndex_name[_tableIndex_index[i]:_tableIndex_index[i+1]] +} diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go new file mode 100644 index 000000000..512ffe5b9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go @@ -0,0 +1,131 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "io" +) + +type byteBuffer interface { + // Read up to 8 bytes. + // Returns io.ErrUnexpectedEOF if this cannot be satisfied. + readSmall(n int) ([]byte, error) + + // Read >8 bytes. + // MAY use the destination slice. + readBig(n int, dst []byte) ([]byte, error) + + // Read a single byte. + readByte() (byte, error) + + // Skip n bytes. + skipN(n int64) error +} + +// in-memory buffer +type byteBuf []byte + +func (b *byteBuf) readSmall(n int) ([]byte, error) { + if debugAsserts && n > 8 { + panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) + } + bb := *b + if len(bb) < n { + return nil, io.ErrUnexpectedEOF + } + r := bb[:n] + *b = bb[n:] + return r, nil +} + +func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { + bb := *b + if len(bb) < n { + return nil, io.ErrUnexpectedEOF + } + r := bb[:n] + *b = bb[n:] + return r, nil +} + +func (b *byteBuf) readByte() (byte, error) { + bb := *b + if len(bb) < 1 { + return 0, io.ErrUnexpectedEOF + } + r := bb[0] + *b = bb[1:] + return r, nil +} + +func (b *byteBuf) skipN(n int64) error { + bb := *b + if n < 0 { + return fmt.Errorf("negative skip (%d) requested", n) + } + if int64(len(bb)) < n { + return io.ErrUnexpectedEOF + } + *b = bb[n:] + return nil +} + +// wrapper around a reader. +type readerWrapper struct { + r io.Reader + tmp [8]byte +} + +func (r *readerWrapper) readSmall(n int) ([]byte, error) { + if debugAsserts && n > 8 { + panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) + } + n2, err := io.ReadFull(r.r, r.tmp[:n]) + // We only really care about the actual bytes read. + if err != nil { + if err == io.EOF { + return nil, io.ErrUnexpectedEOF + } + if debugDecoder { + println("readSmall: got", n2, "want", n, "err", err) + } + return nil, err + } + return r.tmp[:n], nil +} + +func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { + if cap(dst) < n { + dst = make([]byte, n) + } + n2, err := io.ReadFull(r.r, dst[:n]) + if err == io.EOF && n > 0 { + err = io.ErrUnexpectedEOF + } + return dst[:n2], err +} + +func (r *readerWrapper) readByte() (byte, error) { + n2, err := r.r.Read(r.tmp[:1]) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return 0, err + } + if n2 != 1 { + return 0, io.ErrUnexpectedEOF + } + return r.tmp[0], nil +} + +func (r *readerWrapper) skipN(n int64) error { + n2, err := io.CopyN(io.Discard, r.r, n) + if n2 != n { + err = io.ErrUnexpectedEOF + } + return err +} diff --git a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go new file mode 100644 index 000000000..0e59a242d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bytereader.go @@ -0,0 +1,82 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// overread returns whether we have advanced too far. +func (b *byteReader) overread() bool { + return b.off > len(b.b) +} + +// Int32 returns a little endian int32 starting at current offset. +func (b byteReader) Int32() int32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := int32(b2[3]) + v2 := int32(b2[2]) + v1 := int32(b2[1]) + v0 := int32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// Uint8 returns the next byte +func (b *byteReader) Uint8() uint8 { + v := b.b[b.off] + return v +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + if r := b.remain(); r < 4 { + // Very rare + v := uint32(0) + for i := 1; i <= r; i++ { + v = (v << 8) | uint32(b.b[len(b.b)-i]) + } + return v + } + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// Uint32NC returns a little endian uint32 starting at current offset. +// The caller must be sure if there are at least 4 bytes left. +func (b byteReader) Uint32NC() uint32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go new file mode 100644 index 000000000..f6a240970 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decodeheader.go @@ -0,0 +1,229 @@ +// Copyright 2020+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "encoding/binary" + "errors" + "io" +) + +// HeaderMaxSize is the maximum size of a Frame and Block Header. +// If less is sent to Header.Decode it *may* still contain enough information. +const HeaderMaxSize = 14 + 3 + +// Header contains information about the first frame and block within that. +type Header struct { + // SingleSegment specifies whether the data is to be decompressed into a + // single contiguous memory segment. + // It implies that WindowSize is invalid and that FrameContentSize is valid. + SingleSegment bool + + // WindowSize is the window of data to keep while decoding. + // Will only be set if SingleSegment is false. + WindowSize uint64 + + // Dictionary ID. + // If 0, no dictionary. + DictionaryID uint32 + + // HasFCS specifies whether FrameContentSize has a valid value. + HasFCS bool + + // FrameContentSize is the expected uncompressed size of the entire frame. + FrameContentSize uint64 + + // Skippable will be true if the frame is meant to be skipped. + // This implies that FirstBlock.OK is false. + Skippable bool + + // SkippableID is the user-specific ID for the skippable frame. + // Valid values are between 0 to 15, inclusive. + SkippableID int + + // SkippableSize is the length of the user data to skip following + // the header. + SkippableSize uint32 + + // HeaderSize is the raw size of the frame header. + // + // For normal frames, it includes the size of the magic number and + // the size of the header (per section 3.1.1.1). + // It does not include the size for any data blocks (section 3.1.1.2) nor + // the size for the trailing content checksum. + // + // For skippable frames, this counts the size of the magic number + // along with the size of the size field of the payload. + // It does not include the size of the skippable payload itself. + // The total frame size is the HeaderSize plus the SkippableSize. + HeaderSize int + + // First block information. + FirstBlock struct { + // OK will be set if first block could be decoded. + OK bool + + // Is this the last block of a frame? + Last bool + + // Is the data compressed? + // If true CompressedSize will be populated. + // Unfortunately DecompressedSize cannot be determined + // without decoding the blocks. + Compressed bool + + // DecompressedSize is the expected decompressed size of the block. + // Will be 0 if it cannot be determined. + DecompressedSize int + + // CompressedSize of the data in the block. + // Does not include the block header. + // Will be equal to DecompressedSize if not Compressed. + CompressedSize int + } + + // If set there is a checksum present for the block content. + // The checksum field at the end is always 4 bytes long. + HasCheckSum bool +} + +// Decode the header from the beginning of the stream. +// This will decode the frame header and the first block header if enough bytes are provided. +// It is recommended to provide at least HeaderMaxSize bytes. +// If the frame header cannot be read an error will be returned. +// If there isn't enough input, io.ErrUnexpectedEOF is returned. +// The FirstBlock.OK will indicate if enough information was available to decode the first block header. +func (h *Header) Decode(in []byte) error { + *h = Header{} + if len(in) < 4 { + return io.ErrUnexpectedEOF + } + h.HeaderSize += 4 + b, in := in[:4], in[4:] + if string(b) != frameMagic { + if string(b[1:4]) != skippableFrameMagic || b[0]&0xf0 != 0x50 { + return ErrMagicMismatch + } + if len(in) < 4 { + return io.ErrUnexpectedEOF + } + h.HeaderSize += 4 + h.Skippable = true + h.SkippableID = int(b[0] & 0xf) + h.SkippableSize = binary.LittleEndian.Uint32(in) + return nil + } + + // Read Window_Descriptor + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor + if len(in) < 1 { + return io.ErrUnexpectedEOF + } + fhd, in := in[0], in[1:] + h.HeaderSize++ + h.SingleSegment = fhd&(1<<5) != 0 + h.HasCheckSum = fhd&(1<<2) != 0 + if fhd&(1<<3) != 0 { + return errors.New("reserved bit set on frame header") + } + + if !h.SingleSegment { + if len(in) < 1 { + return io.ErrUnexpectedEOF + } + var wd byte + wd, in = in[0], in[1:] + h.HeaderSize++ + windowLog := 10 + (wd >> 3) + windowBase := uint64(1) << windowLog + windowAdd := (windowBase / 8) * uint64(wd&0x7) + h.WindowSize = windowBase + windowAdd + } + + // Read Dictionary_ID + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id + if size := fhd & 3; size != 0 { + if size == 3 { + size = 4 + } + if len(in) < int(size) { + return io.ErrUnexpectedEOF + } + b, in = in[:size], in[size:] + h.HeaderSize += int(size) + switch len(b) { + case 1: + h.DictionaryID = uint32(b[0]) + case 2: + h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) + case 4: + h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + } + } + + // Read Frame_Content_Size + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size + var fcsSize int + v := fhd >> 6 + switch v { + case 0: + if h.SingleSegment { + fcsSize = 1 + } + default: + fcsSize = 1 << v + } + + if fcsSize > 0 { + h.HasFCS = true + if len(in) < fcsSize { + return io.ErrUnexpectedEOF + } + b, in = in[:fcsSize], in[fcsSize:] + h.HeaderSize += int(fcsSize) + switch len(b) { + case 1: + h.FrameContentSize = uint64(b[0]) + case 2: + // When FCS_Field_Size is 2, the offset of 256 is added. + h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 + case 4: + h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) + case 8: + d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) + h.FrameContentSize = uint64(d1) | (uint64(d2) << 32) + } + } + + // Frame Header done, we will not fail from now on. + if len(in) < 3 { + return nil + } + tmp := in[:3] + bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) + h.FirstBlock.Last = bh&1 != 0 + blockType := blockType((bh >> 1) & 3) + // find size. + cSize := int(bh >> 3) + switch blockType { + case blockTypeReserved: + return nil + case blockTypeRLE: + h.FirstBlock.Compressed = true + h.FirstBlock.DecompressedSize = cSize + h.FirstBlock.CompressedSize = 1 + case blockTypeCompressed: + h.FirstBlock.Compressed = true + h.FirstBlock.CompressedSize = cSize + case blockTypeRaw: + h.FirstBlock.DecompressedSize = cSize + h.FirstBlock.CompressedSize = cSize + default: + panic("Invalid block type") + } + + h.FirstBlock.OK = true + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go new file mode 100644 index 000000000..7113e69ee --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -0,0 +1,953 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "context" + "encoding/binary" + "io" + "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +// Decoder provides decoding of zstandard streams. +// The decoder has been designed to operate without allocations after a warmup. +// This means that you should store the decoder for best performance. +// To re-use a stream decoder, use the Reset(r io.Reader) error to switch to another stream. +// A decoder can safely be re-used even if the previous stream failed. +// To release the resources, you must call the Close() function on a decoder. +type Decoder struct { + o decoderOptions + + // Unreferenced decoders, ready for use. + decoders chan *blockDec + + // Current read position used for Reader functionality. + current decoderState + + // sync stream decoding + syncStream struct { + decodedFrame uint64 + br readerWrapper + enabled bool + inFrame bool + dstBuf []byte + } + + frame *frameDec + + // Custom dictionaries. + dicts map[uint32]*dict + + // streamWg is the waitgroup for all streams + streamWg sync.WaitGroup +} + +// decoderState is used for maintaining state when the decoder +// is used for streaming. +type decoderState struct { + // current block being written to stream. + decodeOutput + + // output in order to be written to stream. + output chan decodeOutput + + // cancel remaining output. + cancel context.CancelFunc + + // crc of current frame + crc *xxhash.Digest + + flushed bool +} + +var ( + // Check the interfaces we want to support. + _ = io.WriterTo(&Decoder{}) + _ = io.Reader(&Decoder{}) +) + +// NewReader creates a new decoder. +// A nil Reader can be provided in which case Reset can be used to start a decode. +// +// A Decoder can be used in two modes: +// +// 1) As a stream, or +// 2) For stateless decoding using DecodeAll. +// +// Only a single stream can be decoded concurrently, but the same decoder +// can run multiple concurrent stateless decodes. It is even possible to +// use stateless decodes while a stream is being decoded. +// +// The Reset function can be used to initiate a new stream, which is will considerably +// reduce the allocations normally caused by NewReader. +func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { + initPredefined() + var d Decoder + d.o.setDefault() + for _, o := range opts { + err := o(&d.o) + if err != nil { + return nil, err + } + } + d.current.crc = xxhash.New() + d.current.flushed = true + + if r == nil { + d.current.err = ErrDecoderNilInput + } + + // Transfer option dicts. + d.dicts = make(map[uint32]*dict, len(d.o.dicts)) + for _, dc := range d.o.dicts { + d.dicts[dc.id] = dc + } + d.o.dicts = nil + + // Create decoders + d.decoders = make(chan *blockDec, d.o.concurrent) + for i := 0; i < d.o.concurrent; i++ { + dec := newBlockDec(d.o.lowMem) + dec.localFrame = newFrameDec(d.o) + d.decoders <- dec + } + + if r == nil { + return &d, nil + } + return &d, d.Reset(r) +} + +// Read bytes from the decompressed stream into p. +// Returns the number of bytes written and any error that occurred. +// When the stream is done, io.EOF will be returned. +func (d *Decoder) Read(p []byte) (int, error) { + var n int + for { + if len(d.current.b) > 0 { + filled := copy(p, d.current.b) + p = p[filled:] + d.current.b = d.current.b[filled:] + n += filled + } + if len(p) == 0 { + break + } + if len(d.current.b) == 0 { + // We have an error and no more data + if d.current.err != nil { + break + } + if !d.nextBlock(n == 0) { + return n, d.current.err + } + } + } + if len(d.current.b) > 0 { + if debugDecoder { + println("returning", n, "still bytes left:", len(d.current.b)) + } + // Only return error at end of block + return n, nil + } + if d.current.err != nil { + d.drainOutput() + } + if debugDecoder { + println("returning", n, d.current.err, len(d.decoders)) + } + return n, d.current.err +} + +// Reset will reset the decoder the supplied stream after the current has finished processing. +// Note that this functionality cannot be used after Close has been called. +// Reset can be called with a nil reader to release references to the previous reader. +// After being called with a nil reader, no other operations than Reset or DecodeAll or Close +// should be used. +func (d *Decoder) Reset(r io.Reader) error { + if d.current.err == ErrDecoderClosed { + return d.current.err + } + + d.drainOutput() + + d.syncStream.br.r = nil + if r == nil { + d.current.err = ErrDecoderNilInput + if len(d.current.b) > 0 { + d.current.b = d.current.b[:0] + } + d.current.flushed = true + return nil + } + + // If bytes buffer and < 5MB, do sync decoding anyway. + if bb, ok := r.(byter); ok && bb.Len() < d.o.decodeBufsBelow && !d.o.limitToCap { + bb2 := bb + if debugDecoder { + println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) + } + b := bb2.Bytes() + var dst []byte + if cap(d.syncStream.dstBuf) > 0 { + dst = d.syncStream.dstBuf[:0] + } + + dst, err := d.DecodeAll(b, dst) + if err == nil { + err = io.EOF + } + // Save output buffer + d.syncStream.dstBuf = dst + d.current.b = dst + d.current.err = err + d.current.flushed = true + if debugDecoder { + println("sync decode to", len(dst), "bytes, err:", err) + } + return nil + } + // Remove current block. + d.stashDecoder() + d.current.decodeOutput = decodeOutput{} + d.current.err = nil + d.current.flushed = false + d.current.d = nil + d.syncStream.dstBuf = nil + + // Ensure no-one else is still running... + d.streamWg.Wait() + if d.frame == nil { + d.frame = newFrameDec(d.o) + } + + if d.o.concurrent == 1 { + return d.startSyncDecoder(r) + } + + d.current.output = make(chan decodeOutput, d.o.concurrent) + ctx, cancel := context.WithCancel(context.Background()) + d.current.cancel = cancel + d.streamWg.Add(1) + go d.startStreamDecoder(ctx, r, d.current.output) + + return nil +} + +// drainOutput will drain the output until errEndOfStream is sent. +func (d *Decoder) drainOutput() { + if d.current.cancel != nil { + if debugDecoder { + println("cancelling current") + } + d.current.cancel() + d.current.cancel = nil + } + if d.current.d != nil { + if debugDecoder { + printf("re-adding current decoder %p, decoders: %d", d.current.d, len(d.decoders)) + } + d.decoders <- d.current.d + d.current.d = nil + d.current.b = nil + } + if d.current.output == nil || d.current.flushed { + println("current already flushed") + return + } + for v := range d.current.output { + if v.d != nil { + if debugDecoder { + printf("re-adding decoder %p", v.d) + } + d.decoders <- v.d + } + } + d.current.output = nil + d.current.flushed = true +} + +// WriteTo writes data to w until there's no more data to write or when an error occurs. +// The return value n is the number of bytes written. +// Any error encountered during the write is also returned. +func (d *Decoder) WriteTo(w io.Writer) (int64, error) { + var n int64 + for { + if len(d.current.b) > 0 { + n2, err2 := w.Write(d.current.b) + n += int64(n2) + if err2 != nil && (d.current.err == nil || d.current.err == io.EOF) { + d.current.err = err2 + } else if n2 != len(d.current.b) { + d.current.err = io.ErrShortWrite + } + } + if d.current.err != nil { + break + } + d.nextBlock(true) + } + err := d.current.err + if err != nil { + d.drainOutput() + } + if err == io.EOF { + err = nil + } + return n, err +} + +// DecodeAll allows stateless decoding of a blob of bytes. +// Output will be appended to dst, so if the destination size is known +// you can pre-allocate the destination slice to avoid allocations. +// DecodeAll can be used concurrently. +// The Decoder concurrency limits will be respected. +func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { + if d.decoders == nil { + return dst, ErrDecoderClosed + } + + // Grab a block decoder and frame decoder. + block := <-d.decoders + frame := block.localFrame + initialSize := len(dst) + defer func() { + if debugDecoder { + printf("re-adding decoder: %p", block) + } + frame.rawInput = nil + frame.bBuf = nil + if frame.history.decoders.br != nil { + frame.history.decoders.br.in = nil + } + d.decoders <- block + }() + frame.bBuf = input + + for { + frame.history.reset() + err := frame.reset(&frame.bBuf) + if err != nil { + if err == io.EOF { + if debugDecoder { + println("frame reset return EOF") + } + return dst, nil + } + return dst, err + } + if err = d.setDict(frame); err != nil { + return nil, err + } + if frame.WindowSize > d.o.maxWindowSize { + if debugDecoder { + println("window size exceeded:", frame.WindowSize, ">", d.o.maxWindowSize) + } + return dst, ErrWindowSizeExceeded + } + if frame.FrameContentSize != fcsUnknown { + if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)-initialSize) { + if debugDecoder { + println("decoder size exceeded; fcs:", frame.FrameContentSize, "> mcs:", d.o.maxDecodedSize-uint64(len(dst)-initialSize), "len:", len(dst)) + } + return dst, ErrDecoderSizeExceeded + } + if d.o.limitToCap && frame.FrameContentSize > uint64(cap(dst)-len(dst)) { + if debugDecoder { + println("decoder size exceeded; fcs:", frame.FrameContentSize, "> (cap-len)", cap(dst)-len(dst)) + } + return dst, ErrDecoderSizeExceeded + } + if cap(dst)-len(dst) < int(frame.FrameContentSize) { + dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)+compressedBlockOverAlloc) + copy(dst2, dst) + dst = dst2 + } + } + + if cap(dst) == 0 && !d.o.limitToCap { + // Allocate len(input) * 2 by default if nothing is provided + // and we didn't get frame content size. + size := len(input) * 2 + // Cap to 1 MB. + if size > 1<<20 { + size = 1 << 20 + } + if uint64(size) > d.o.maxDecodedSize { + size = int(d.o.maxDecodedSize) + } + dst = make([]byte, 0, size) + } + + dst, err = frame.runDecoder(dst, block) + if err != nil { + return dst, err + } + if uint64(len(dst)-initialSize) > d.o.maxDecodedSize { + return dst, ErrDecoderSizeExceeded + } + if len(frame.bBuf) == 0 { + if debugDecoder { + println("frame dbuf empty") + } + break + } + } + return dst, nil +} + +// nextBlock returns the next block. +// If an error occurs d.err will be set. +// Optionally the function can block for new output. +// If non-blocking mode is used the returned boolean will be false +// if no data was available without blocking. +func (d *Decoder) nextBlock(blocking bool) (ok bool) { + if d.current.err != nil { + // Keep error state. + return false + } + d.current.b = d.current.b[:0] + + // SYNC: + if d.syncStream.enabled { + if !blocking { + return false + } + ok = d.nextBlockSync() + if !ok { + d.stashDecoder() + } + return ok + } + + //ASYNC: + d.stashDecoder() + if blocking { + d.current.decodeOutput, ok = <-d.current.output + } else { + select { + case d.current.decodeOutput, ok = <-d.current.output: + default: + return false + } + } + if !ok { + // This should not happen, so signal error state... + d.current.err = io.ErrUnexpectedEOF + return false + } + next := d.current.decodeOutput + if next.d != nil && next.d.async.newHist != nil { + d.current.crc.Reset() + } + if debugDecoder { + var tmp [4]byte + binary.LittleEndian.PutUint32(tmp[:], uint32(xxhash.Sum64(next.b))) + println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp) + } + + if d.o.ignoreChecksum { + return true + } + + if len(next.b) > 0 { + n, err := d.current.crc.Write(next.b) + if err == nil { + if n != len(next.b) { + d.current.err = io.ErrShortWrite + } + } + } + if next.err == nil && next.d != nil && next.d.hasCRC { + got := uint32(d.current.crc.Sum64()) + if got != next.d.checkCRC { + if debugDecoder { + printf("CRC Check Failed: %08x (got) != %08x (on stream)\n", got, next.d.checkCRC) + } + d.current.err = ErrCRCMismatch + } else { + if debugDecoder { + printf("CRC ok %08x\n", got) + } + } + } + + return true +} + +func (d *Decoder) nextBlockSync() (ok bool) { + if d.current.d == nil { + d.current.d = <-d.decoders + } + for len(d.current.b) == 0 { + if !d.syncStream.inFrame { + d.frame.history.reset() + d.current.err = d.frame.reset(&d.syncStream.br) + if d.current.err == nil { + d.current.err = d.setDict(d.frame) + } + if d.current.err != nil { + return false + } + if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize { + d.current.err = ErrDecoderSizeExceeded + return false + } + + d.syncStream.decodedFrame = 0 + d.syncStream.inFrame = true + } + d.current.err = d.frame.next(d.current.d) + if d.current.err != nil { + return false + } + d.frame.history.ensureBlock() + if debugDecoder { + println("History trimmed:", len(d.frame.history.b), "decoded already:", d.syncStream.decodedFrame) + } + histBefore := len(d.frame.history.b) + d.current.err = d.current.d.decodeBuf(&d.frame.history) + + if d.current.err != nil { + println("error after:", d.current.err) + return false + } + d.current.b = d.frame.history.b[histBefore:] + if debugDecoder { + println("history after:", len(d.frame.history.b)) + } + + // Check frame size (before CRC) + d.syncStream.decodedFrame += uint64(len(d.current.b)) + if d.syncStream.decodedFrame > d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeExceeded + return false + } + + // Check FCS + if d.current.d.Last && d.frame.FrameContentSize != fcsUnknown && d.syncStream.decodedFrame != d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeMismatch + return false + } + + // Update/Check CRC + if d.frame.HasCheckSum { + if !d.o.ignoreChecksum { + d.frame.crc.Write(d.current.b) + } + if d.current.d.Last { + if !d.o.ignoreChecksum { + d.current.err = d.frame.checkCRC() + } else { + d.current.err = d.frame.consumeCRC() + } + if d.current.err != nil { + println("CRC error:", d.current.err) + return false + } + } + } + d.syncStream.inFrame = !d.current.d.Last + } + return true +} + +func (d *Decoder) stashDecoder() { + if d.current.d != nil { + if debugDecoder { + printf("re-adding current decoder %p", d.current.d) + } + d.decoders <- d.current.d + d.current.d = nil + } +} + +// Close will release all resources. +// It is NOT possible to reuse the decoder after this. +func (d *Decoder) Close() { + if d.current.err == ErrDecoderClosed { + return + } + d.drainOutput() + if d.current.cancel != nil { + d.current.cancel() + d.streamWg.Wait() + d.current.cancel = nil + } + if d.decoders != nil { + close(d.decoders) + for dec := range d.decoders { + dec.Close() + } + d.decoders = nil + } + if d.current.d != nil { + d.current.d.Close() + d.current.d = nil + } + d.current.err = ErrDecoderClosed +} + +// IOReadCloser returns the decoder as an io.ReadCloser for convenience. +// Any changes to the decoder will be reflected, so the returned ReadCloser +// can be reused along with the decoder. +// io.WriterTo is also supported by the returned ReadCloser. +func (d *Decoder) IOReadCloser() io.ReadCloser { + return closeWrapper{d: d} +} + +// closeWrapper wraps a function call as a closer. +type closeWrapper struct { + d *Decoder +} + +// WriteTo forwards WriteTo calls to the decoder. +func (c closeWrapper) WriteTo(w io.Writer) (n int64, err error) { + return c.d.WriteTo(w) +} + +// Read forwards read calls to the decoder. +func (c closeWrapper) Read(p []byte) (n int, err error) { + return c.d.Read(p) +} + +// Close closes the decoder. +func (c closeWrapper) Close() error { + c.d.Close() + return nil +} + +type decodeOutput struct { + d *blockDec + b []byte + err error +} + +func (d *Decoder) startSyncDecoder(r io.Reader) error { + d.frame.history.reset() + d.syncStream.br = readerWrapper{r: r} + d.syncStream.inFrame = false + d.syncStream.enabled = true + d.syncStream.decodedFrame = 0 + return nil +} + +// Create Decoder: +// ASYNC: +// Spawn 3 go routines. +// 0: Read frames and decode block literals. +// 1: Decode sequences. +// 2: Execute sequences, send to output. +func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) { + defer d.streamWg.Done() + br := readerWrapper{r: r} + + var seqDecode = make(chan *blockDec, d.o.concurrent) + var seqExecute = make(chan *blockDec, d.o.concurrent) + + // Async 1: Decode sequences... + go func() { + var hist history + var hasErr bool + + for block := range seqDecode { + if hasErr { + if block != nil { + seqExecute <- block + } + continue + } + if block.async.newHist != nil { + if debugDecoder { + println("Async 1: new history, recent:", block.async.newHist.recentOffsets) + } + hist.reset() + hist.decoders = block.async.newHist.decoders + hist.recentOffsets = block.async.newHist.recentOffsets + hist.windowSize = block.async.newHist.windowSize + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + } + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqExecute <- block + continue + } + + hist.decoders.literals = block.async.literals + block.err = block.prepareSequences(block.async.seqData, &hist) + if debugDecoder && block.err != nil { + println("prepareSequences returned:", block.err) + } + hasErr = block.err != nil + if block.err == nil { + block.err = block.decodeSequences(&hist) + if debugDecoder && block.err != nil { + println("decodeSequences returned:", block.err) + } + hasErr = block.err != nil + // block.async.sequence = hist.decoders.seq[:hist.decoders.nSeqs] + block.async.seqSize = hist.decoders.seqSize + } + seqExecute <- block + } + close(seqExecute) + hist.reset() + }() + + var wg sync.WaitGroup + wg.Add(1) + + // Async 3: Execute sequences... + frameHistCache := d.frame.history.b + go func() { + var hist history + var decodedFrame uint64 + var fcs uint64 + var hasErr bool + for block := range seqExecute { + out := decodeOutput{err: block.err, d: block} + if block.err != nil || hasErr { + hasErr = true + output <- out + continue + } + if block.async.newHist != nil { + if debugDecoder { + println("Async 2: new history") + } + hist.reset() + hist.windowSize = block.async.newHist.windowSize + hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + + if cap(hist.b) < hist.allocFrameBuffer { + if cap(frameHistCache) >= hist.allocFrameBuffer { + hist.b = frameHistCache + } else { + hist.b = make([]byte, 0, hist.allocFrameBuffer) + println("Alloc history sized", hist.allocFrameBuffer) + } + } + hist.b = hist.b[:0] + fcs = block.async.fcs + decodedFrame = 0 + } + do := decodeOutput{err: block.err, d: block} + switch block.Type { + case blockTypeRLE: + if debugDecoder { + println("add rle block length:", block.RLESize) + } + + if cap(block.dst) < int(block.RLESize) { + if block.lowMem { + block.dst = make([]byte, block.RLESize) + } else { + block.dst = make([]byte, maxCompressedBlockSize) + } + } + block.dst = block.dst[:block.RLESize] + v := block.data[0] + for i := range block.dst { + block.dst[i] = v + } + hist.append(block.dst) + do.b = block.dst + case blockTypeRaw: + if debugDecoder { + println("add raw block length:", len(block.data)) + } + hist.append(block.data) + do.b = block.data + case blockTypeCompressed: + if debugDecoder { + println("execute with history length:", len(hist.b), "window:", hist.windowSize) + } + hist.decoders.seqSize = block.async.seqSize + hist.decoders.literals = block.async.literals + do.err = block.executeSequences(&hist) + hasErr = do.err != nil + if debugDecoder && hasErr { + println("executeSequences returned:", do.err) + } + do.b = block.dst + } + if !hasErr { + decodedFrame += uint64(len(do.b)) + if decodedFrame > fcs { + println("fcs exceeded", block.Last, fcs, decodedFrame) + do.err = ErrFrameSizeExceeded + hasErr = true + } else if block.Last && fcs != fcsUnknown && decodedFrame != fcs { + do.err = ErrFrameSizeMismatch + hasErr = true + } else { + if debugDecoder { + println("fcs ok", block.Last, fcs, decodedFrame) + } + } + } + output <- do + } + close(output) + frameHistCache = hist.b + wg.Done() + if debugDecoder { + println("decoder goroutines finished") + } + hist.reset() + }() + + var hist history +decodeStream: + for { + var hasErr bool + hist.reset() + decodeBlock := func(block *blockDec) { + if hasErr { + if block != nil { + seqDecode <- block + } + return + } + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqDecode <- block + return + } + + remain, err := block.decodeLiterals(block.data, &hist) + block.err = err + hasErr = block.err != nil + if err == nil { + block.async.literals = hist.decoders.literals + block.async.seqData = remain + } else if debugDecoder { + println("decodeLiterals error:", err) + } + seqDecode <- block + } + frame := d.frame + if debugDecoder { + println("New frame...") + } + var historySent bool + frame.history.reset() + err := frame.reset(&br) + if debugDecoder && err != nil { + println("Frame decoder returned", err) + } + if err == nil { + err = d.setDict(frame) + } + if err == nil && d.frame.WindowSize > d.o.maxWindowSize { + if debugDecoder { + println("decoder size exceeded, fws:", d.frame.WindowSize, "> mws:", d.o.maxWindowSize) + } + + err = ErrDecoderSizeExceeded + } + if err != nil { + select { + case <-ctx.Done(): + case dec := <-d.decoders: + dec.sendErr(err) + decodeBlock(dec) + } + break decodeStream + } + + // Go through all blocks of the frame. + for { + var dec *blockDec + select { + case <-ctx.Done(): + break decodeStream + case dec = <-d.decoders: + // Once we have a decoder, we MUST return it. + } + err := frame.next(dec) + if !historySent { + h := frame.history + if debugDecoder { + println("Alloc History:", h.allocFrameBuffer) + } + hist.reset() + if h.dict != nil { + hist.setDict(h.dict) + } + dec.async.newHist = &h + dec.async.fcs = frame.FrameContentSize + historySent = true + } else { + dec.async.newHist = nil + } + if debugDecoder && err != nil { + println("next block returned error:", err) + } + dec.err = err + dec.hasCRC = false + if dec.Last && frame.HasCheckSum && err == nil { + crc, err := frame.rawInput.readSmall(4) + if len(crc) < 4 { + if err == nil { + err = io.ErrUnexpectedEOF + + } + println("CRC missing?", err) + dec.err = err + } else { + dec.checkCRC = binary.LittleEndian.Uint32(crc) + dec.hasCRC = true + if debugDecoder { + printf("found crc to check: %08x\n", dec.checkCRC) + } + } + } + err = dec.err + last := dec.Last + decodeBlock(dec) + if err != nil { + break decodeStream + } + if last { + break + } + } + } + close(seqDecode) + wg.Wait() + hist.reset() + d.frame.history.b = frameHistCache +} + +func (d *Decoder) setDict(frame *frameDec) (err error) { + dict, ok := d.dicts[frame.DictionaryID] + if ok { + if debugDecoder { + println("setting dict", frame.DictionaryID) + } + frame.history.setDict(dict) + } else if frame.DictionaryID != 0 { + // A zero or missing dictionary id is ambiguous: + // either dictionary zero, or no dictionary. In particular, + // zstd --patch-from uses this id for the source file, + // so only return an error if the dictionary id is not zero. + err = ErrUnknownDictionary + } + return err +} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go new file mode 100644 index 000000000..07a90dd7a --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -0,0 +1,169 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math/bits" + "runtime" +) + +// DOption is an option for creating a decoder. +type DOption func(*decoderOptions) error + +// options retains accumulated state of multiple options. +type decoderOptions struct { + lowMem bool + concurrent int + maxDecodedSize uint64 + maxWindowSize uint64 + dicts []*dict + ignoreChecksum bool + limitToCap bool + decodeBufsBelow int +} + +func (o *decoderOptions) setDefault() { + *o = decoderOptions{ + // use less ram: true for now, but may change. + lowMem: true, + concurrent: runtime.GOMAXPROCS(0), + maxWindowSize: MaxWindowSize, + decodeBufsBelow: 128 << 10, + } + if o.concurrent > 4 { + o.concurrent = 4 + } + o.maxDecodedSize = 64 << 30 +} + +// WithDecoderLowmem will set whether to use a lower amount of memory, +// but possibly have to allocate more while running. +func WithDecoderLowmem(b bool) DOption { + return func(o *decoderOptions) error { o.lowMem = b; return nil } +} + +// WithDecoderConcurrency sets the number of created decoders. +// When decoding block with DecodeAll, this will limit the number +// of possible concurrently running decodes. +// When decoding streams, this will limit the number of +// inflight blocks. +// When decoding streams and setting maximum to 1, +// no async decoding will be done. +// When a value of 0 is provided GOMAXPROCS will be used. +// By default this will be set to 4 or GOMAXPROCS, whatever is lower. +func WithDecoderConcurrency(n int) DOption { + return func(o *decoderOptions) error { + if n < 0 { + return errors.New("concurrency must be at least 1") + } + if n == 0 { + o.concurrent = runtime.GOMAXPROCS(0) + } else { + o.concurrent = n + } + return nil + } +} + +// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory +// non-streaming operations or maximum window size for streaming operations. +// This can be used to control memory usage of potentially hostile content. +// Maximum is 1 << 63 bytes. Default is 64GiB. +func WithDecoderMaxMemory(n uint64) DOption { + return func(o *decoderOptions) error { + if n == 0 { + return errors.New("WithDecoderMaxMemory must be at least 1") + } + if n > 1<<63 { + return errors.New("WithDecoderMaxmemory must be less than 1 << 63") + } + o.maxDecodedSize = n + return nil + } +} + +// WithDecoderDicts allows to register one or more dictionaries for the decoder. +// +// Each slice in dict must be in the [dictionary format] produced by +// "zstd --train" from the Zstandard reference implementation. +// +// If several dictionaries with the same ID are provided, the last one will be used. +// +// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format +func WithDecoderDicts(dicts ...[]byte) DOption { + return func(o *decoderOptions) error { + for _, b := range dicts { + d, err := loadDict(b) + if err != nil { + return err + } + o.dicts = append(o.dicts, d) + } + return nil + } +} + +// WithEncoderDictRaw registers a dictionary that may be used by the decoder. +// The slice content can be arbitrary data. +func WithDecoderDictRaw(id uint32, content []byte) DOption { + return func(o *decoderOptions) error { + if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { + return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) + } + o.dicts = append(o.dicts, &dict{id: id, content: content, offsets: [3]int{1, 4, 8}}) + return nil + } +} + +// WithDecoderMaxWindow allows to set a maximum window size for decodes. +// This allows rejecting packets that will cause big memory usage. +// The Decoder will likely allocate more memory based on the WithDecoderLowmem setting. +// If WithDecoderMaxMemory is set to a lower value, that will be used. +// Default is 512MB, Maximum is ~3.75 TB as per zstandard spec. +func WithDecoderMaxWindow(size uint64) DOption { + return func(o *decoderOptions) error { + if size < MinWindowSize { + return errors.New("WithMaxWindowSize must be at least 1KB, 1024 bytes") + } + if size > (1<<41)+7*(1<<38) { + return errors.New("WithMaxWindowSize must be less than (1<<41) + 7*(1<<38) ~ 3.75TB") + } + o.maxWindowSize = size + return nil + } +} + +// WithDecodeAllCapLimit will limit DecodeAll to decoding cap(dst)-len(dst) bytes, +// or any size set in WithDecoderMaxMemory. +// This can be used to limit decoding to a specific maximum output size. +// Disabled by default. +func WithDecodeAllCapLimit(b bool) DOption { + return func(o *decoderOptions) error { + o.limitToCap = b + return nil + } +} + +// WithDecodeBuffersBelow will fully decode readers that have a +// `Bytes() []byte` and `Len() int` interface similar to bytes.Buffer. +// This typically uses less allocations but will have the full decompressed object in memory. +// Note that DecodeAllCapLimit will disable this, as well as giving a size of 0 or less. +// Default is 128KiB. +func WithDecodeBuffersBelow(size int) DOption { + return func(o *decoderOptions) error { + o.decodeBufsBelow = size + return nil + } +} + +// IgnoreChecksum allows to forcibly ignore checksum checking. +func IgnoreChecksum(b bool) DOption { + return func(o *decoderOptions) error { + o.ignoreChecksum = b + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go new file mode 100644 index 000000000..ca0951452 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -0,0 +1,161 @@ +package zstd + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + + "github.com/klauspost/compress/huff0" +) + +type dict struct { + id uint32 + + litEnc *huff0.Scratch + llDec, ofDec, mlDec sequenceDec + //llEnc, ofEnc, mlEnc []*fseEncoder + offsets [3]int + content []byte +} + +const dictMagic = "\x37\xa4\x30\xec" + +// Maximum dictionary size for the reference implementation (1.5.3) is 2 GiB. +const dictMaxLength = 1 << 31 + +// ID returns the dictionary id or 0 if d is nil. +func (d *dict) ID() uint32 { + if d == nil { + return 0 + } + return d.id +} + +// ContentSize returns the dictionary content size or 0 if d is nil. +func (d *dict) ContentSize() int { + if d == nil { + return 0 + } + return len(d.content) +} + +// Content returns the dictionary content. +func (d *dict) Content() []byte { + if d == nil { + return nil + } + return d.content +} + +// Offsets returns the initial offsets. +func (d *dict) Offsets() [3]int { + if d == nil { + return [3]int{} + } + return d.offsets +} + +// LitEncoder returns the literal encoder. +func (d *dict) LitEncoder() *huff0.Scratch { + if d == nil { + return nil + } + return d.litEnc +} + +// Load a dictionary as described in +// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format +func loadDict(b []byte) (*dict, error) { + // Check static field size. + if len(b) <= 8+(3*4) { + return nil, io.ErrUnexpectedEOF + } + d := dict{ + llDec: sequenceDec{fse: &fseDecoder{}}, + ofDec: sequenceDec{fse: &fseDecoder{}}, + mlDec: sequenceDec{fse: &fseDecoder{}}, + } + if string(b[:4]) != dictMagic { + return nil, ErrMagicMismatch + } + d.id = binary.LittleEndian.Uint32(b[4:8]) + if d.id == 0 { + return nil, errors.New("dictionaries cannot have ID 0") + } + + // Read literal table + var err error + d.litEnc, b, err = huff0.ReadTable(b[8:], nil) + if err != nil { + return nil, fmt.Errorf("loading literal table: %w", err) + } + d.litEnc.Reuse = huff0.ReusePolicyMust + + br := byteReader{ + b: b, + off: 0, + } + readDec := func(i tableIndex, dec *fseDecoder) error { + if err := dec.readNCount(&br, uint16(maxTableSymbol[i])); err != nil { + return err + } + if br.overread() { + return io.ErrUnexpectedEOF + } + err = dec.transform(symbolTableX[i]) + if err != nil { + println("Transform table error:", err) + return err + } + if debugDecoder || debugEncoder { + println("Read table ok", "symbolLen:", dec.symbolLen) + } + // Set decoders as predefined so they aren't reused. + dec.preDefined = true + return nil + } + + if err := readDec(tableOffsets, d.ofDec.fse); err != nil { + return nil, err + } + if err := readDec(tableMatchLengths, d.mlDec.fse); err != nil { + return nil, err + } + if err := readDec(tableLiteralLengths, d.llDec.fse); err != nil { + return nil, err + } + if br.remain() < 12 { + return nil, io.ErrUnexpectedEOF + } + + d.offsets[0] = int(br.Uint32()) + br.advance(4) + d.offsets[1] = int(br.Uint32()) + br.advance(4) + d.offsets[2] = int(br.Uint32()) + br.advance(4) + if d.offsets[0] <= 0 || d.offsets[1] <= 0 || d.offsets[2] <= 0 { + return nil, errors.New("invalid offset in dictionary") + } + d.content = make([]byte, br.remain()) + copy(d.content, br.unread()) + if d.offsets[0] > len(d.content) || d.offsets[1] > len(d.content) || d.offsets[2] > len(d.content) { + return nil, fmt.Errorf("initial offset bigger than dictionary content size %d, offsets: %v", len(d.content), d.offsets) + } + + return &d, nil +} + +// InspectDictionary loads a zstd dictionary and provides functions to inspect the content. +func InspectDictionary(b []byte) (interface { + ID() uint32 + ContentSize() int + Content() []byte + Offsets() [3]int + LitEncoder() *huff0.Scratch +}, error) { + initPredefined() + d, err := loadDict(b) + return d, err +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go new file mode 100644 index 000000000..e008b9929 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -0,0 +1,172 @@ +package zstd + +import ( + "fmt" + "math/bits" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +const ( + dictShardBits = 6 +) + +type fastBase struct { + // cur is the offset at the start of hist + cur int32 + // maximum offset. Should be at least 2x block size. + maxMatchOff int32 + bufferReset int32 + hist []byte + crc *xxhash.Digest + tmp [8]byte + blk *blockEnc + lastDictID uint32 + lowMem bool +} + +// CRC returns the underlying CRC writer. +func (e *fastBase) CRC() *xxhash.Digest { + return e.crc +} + +// AppendCRC will append the CRC to the destination slice and return it. +func (e *fastBase) AppendCRC(dst []byte) []byte { + crc := e.crc.Sum(e.tmp[:0]) + dst = append(dst, crc[7], crc[6], crc[5], crc[4]) + return dst +} + +// WindowSize returns the window size of the encoder, +// or a window size small enough to contain the input size, if > 0. +func (e *fastBase) WindowSize(size int64) int32 { + if size > 0 && size < int64(e.maxMatchOff) { + b := int32(1) << uint(bits.Len(uint(size))) + // Keep minimum window. + if b < 1024 { + b = 1024 + } + return b + } + return e.maxMatchOff +} + +// Block returns the current block. +func (e *fastBase) Block() *blockEnc { + return e.blk +} + +func (e *fastBase) addBlock(src []byte) int32 { + if debugAsserts && e.cur > e.bufferReset { + panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, e.bufferReset)) + } + // check if we have space already + if len(e.hist)+len(src) > cap(e.hist) { + if cap(e.hist) == 0 { + e.ensureHist(len(src)) + } else { + if cap(e.hist) < int(e.maxMatchOff+maxCompressedBlockSize) { + panic(fmt.Errorf("unexpected buffer cap %d, want at least %d with window %d", cap(e.hist), e.maxMatchOff+maxCompressedBlockSize, e.maxMatchOff)) + } + // Move down + offset := int32(len(e.hist)) - e.maxMatchOff + copy(e.hist[0:e.maxMatchOff], e.hist[offset:]) + e.cur += offset + e.hist = e.hist[:e.maxMatchOff] + } + } + s := int32(len(e.hist)) + e.hist = append(e.hist, src...) + return s +} + +// ensureHist will ensure that history can keep at least this many bytes. +func (e *fastBase) ensureHist(n int) { + if cap(e.hist) >= n { + return + } + l := e.maxMatchOff + if (e.lowMem && e.maxMatchOff > maxCompressedBlockSize) || e.maxMatchOff <= maxCompressedBlockSize { + l += maxCompressedBlockSize + } else { + l += e.maxMatchOff + } + // Make it at least 1MB. + if l < 1<<20 && !e.lowMem { + l = 1 << 20 + } + // Make it at least the requested size. + if l < int32(n) { + l = int32(n) + } + e.hist = make([]byte, 0, l) +} + +// useBlock will replace the block with the provided one, +// but transfer recent offsets from the previous. +func (e *fastBase) UseBlock(enc *blockEnc) { + enc.reset(e.blk) + e.blk = enc +} + +func (e *fastBase) matchlen(s, t int32, src []byte) int32 { + if debugAsserts { + if s < 0 { + err := fmt.Sprintf("s (%d) < 0", s) + panic(err) + } + if t < 0 { + err := fmt.Sprintf("s (%d) < 0", s) + panic(err) + } + if s-t > e.maxMatchOff { + err := fmt.Sprintf("s (%d) - t (%d) > maxMatchOff (%d)", s, t, e.maxMatchOff) + panic(err) + } + if len(src)-int(s) > maxCompressedBlockSize { + panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize)) + } + } + return int32(matchLen(src[s:], src[t:])) +} + +// Reset the encoding table. +func (e *fastBase) resetBase(d *dict, singleBlock bool) { + if e.blk == nil { + e.blk = &blockEnc{lowMem: e.lowMem} + e.blk.init() + } else { + e.blk.reset(nil) + } + e.blk.initNewEncode() + if e.crc == nil { + e.crc = xxhash.New() + } else { + e.crc.Reset() + } + if d != nil { + low := e.lowMem + if singleBlock { + e.lowMem = true + } + e.ensureHist(d.ContentSize() + maxCompressedBlockSize) + e.lowMem = low + } + + // We offset current position so everything will be out of reach. + // If above reset line, history will be purged. + if e.cur < e.bufferReset { + e.cur += e.maxMatchOff + int32(len(e.hist)) + } + e.hist = e.hist[:0] + if d != nil { + // Set offsets (currently not used) + for i, off := range d.offsets { + e.blk.recentOffsets[i] = uint32(off) + e.blk.prevRecentOffsets[i] = e.blk.recentOffsets[i] + } + // Transfer litenc. + e.blk.dictLitEnc = d.litEnc + e.hist = append(e.hist, d.content...) + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go new file mode 100644 index 000000000..07f657d36 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -0,0 +1,555 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "bytes" + "fmt" + + "github.com/klauspost/compress" +) + +const ( + bestLongTableBits = 22 // Bits used in the long match table + bestLongTableSize = 1 << bestLongTableBits // Size of the table + bestLongLen = 8 // Bytes used for table hash + + // Note: Increasing the short table bits or making the hash shorter + // can actually lead to compression degradation since it will 'steal' more from the + // long match table and match offsets are quite big. + // This greatly depends on the type of input. + bestShortTableBits = 18 // Bits used in the short match table + bestShortTableSize = 1 << bestShortTableBits // Size of the table + bestShortLen = 4 // Bytes used for table hash + +) + +type match struct { + offset int32 + s int32 + length int32 + rep int32 + est int32 +} + +const highScore = 25000 + +// estBits will estimate output bits from predefined tables. +func (m *match) estBits(bitsPerByte int32) { + mlc := mlCode(uint32(m.length - zstdMinMatch)) + var ofc uint8 + if m.rep < 0 { + ofc = ofCode(uint32(m.s-m.offset) + 3) + } else { + ofc = ofCode(uint32(m.rep)) + } + // Cost, excluding + ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc] + + // Add cost of match encoding... + m.est = int32(ofTT.outBits + mlTT.outBits) + m.est += int32(ofTT.deltaNbBits>>16 + mlTT.deltaNbBits>>16) + // Subtract savings compared to literal encoding... + m.est -= (m.length * bitsPerByte) >> 10 + if m.est > 0 { + // Unlikely gain.. + m.length = 0 + m.est = highScore + } +} + +// bestFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. +// The long match table contains the previous entry with the same hash, +// effectively making it a "chain" of length 2. +// When we find a long match we choose between the two values and select the longest. +// When we find a short match, after checking the long, we check if we can find a long at n+1 +// and that it is longer (lazy matching). +type bestFastEncoder struct { + fastBase + table [bestShortTableSize]prevEntry + longTable [bestLongTableSize]prevEntry + dictTable []prevEntry + dictLongTable []prevEntry +} + +// Encode improves compression... +func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 4 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [bestShortTableSize]prevEntry{} + e.longTable = [bestLongTableSize]prevEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + v2 := e.table[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.table[i] = prevEntry{ + offset: v, + prev: v2, + } + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Use this to estimate literal cost. + // Scaled by 10 bits. + bitsPerByte := int32((compress.ShannonEntropyBits(src) * 1024) / len(src)) + // Huffman can never go < 1 bit/byte + if bitsPerByte < 1024 { + bitsPerByte = 1024 + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + const kSearchStrength = 10 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + offset3 := int32(blk.recentOffsets[2]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + _ = addLiterals + + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + const goodEnough = 100 + + nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) + nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + // Set m to a match at offset if it looks like that will improve compression. + improve := func(m *match, offset int32, s int32, first uint32, rep int32) { + if s-offset >= e.maxMatchOff || load3232(src, offset) != first { + return + } + if debugAsserts { + if !bytes.Equal(src[s:s+4], src[offset:offset+4]) { + panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) + } + } + cand := match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep} + cand.estBits(bitsPerByte) + if m.est >= highScore || cand.est-m.est+(cand.s-m.s)*bitsPerByte>>10 < 0 { + *m = cand + } + } + + best := match{s: s, est: highScore} + improve(&best, candidateL.offset-e.cur, s, uint32(cv), -1) + improve(&best, candidateL.prev-e.cur, s, uint32(cv), -1) + improve(&best, candidateS.offset-e.cur, s, uint32(cv), -1) + improve(&best, candidateS.prev-e.cur, s, uint32(cv), -1) + + if canRepeat && best.length < goodEnough { + cv32 := uint32(cv >> 8) + spp := s + 1 + improve(&best, spp-offset1, spp, cv32, 1) + improve(&best, spp-offset2, spp, cv32, 2) + improve(&best, spp-offset3, spp, cv32, 3) + if best.length > 0 { + cv32 = uint32(cv >> 24) + spp += 2 + improve(&best, spp-offset1, spp, cv32, 1) + improve(&best, spp-offset2, spp, cv32, 2) + improve(&best, spp-offset3, spp, cv32, 3) + } + } + // Load next and check... + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset} + e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset} + + // Look far ahead, unless we have a really long match already... + if best.length < goodEnough { + // No match found, move forward on input, no need to check forward... + if best.length < 4 { + s += 1 + (s-nextEmit)>>(kSearchStrength-1) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + continue + } + + s++ + candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)] + cv = load6432(src, s) + cv2 := load6432(src, s+1) + candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)] + candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)] + + // Short at s+1 + improve(&best, candidateS.offset-e.cur, s, uint32(cv), -1) + // Long at s+1, s+2 + improve(&best, candidateL.offset-e.cur, s, uint32(cv), -1) + improve(&best, candidateL.prev-e.cur, s, uint32(cv), -1) + improve(&best, candidateL2.offset-e.cur, s+1, uint32(cv2), -1) + improve(&best, candidateL2.prev-e.cur, s+1, uint32(cv2), -1) + if false { + // Short at s+3. + // Too often worse... + improve(&best, e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1) + } + // See if we can find a better match by checking where the current best ends. + // Use that offset to see if we can find a better full match. + if sAt := best.s + best.length; sAt < sLimit { + nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen) + candidateEnd := e.longTable[nextHashL] + // Start check at a fixed offset to allow for a few mismatches. + // For this compression level 2 yields the best results. + const skipBeginning = 2 + if pos := candidateEnd.offset - e.cur - best.length + skipBeginning; pos >= 0 { + improve(&best, pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) + if pos := candidateEnd.prev - e.cur - best.length + skipBeginning; pos >= 0 { + improve(&best, pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) + } + } + } + } + + if debugAsserts { + if !bytes.Equal(src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]) { + panic(fmt.Sprintf("match mismatch: %v != %v", src[best.s:best.s+best.length], src[best.offset:best.offset+best.length])) + } + } + + // We have a match, we can store the forward value + if best.rep > 0 { + s = best.s + var seq seq + seq.matchLen = uint32(best.length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := best.s + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + repIndex := best.offset + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = uint32(best.rep) + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + index0 := s + s = best.s + best.length + + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, best.length) + + } + break encodeLoop + } + // Index skipped... + off := index0 + e.cur + for index0 < s-1 { + cv0 := load6432(src, index0) + h0 := hashLen(cv0, bestLongTableBits, bestLongLen) + h1 := hashLen(cv0, bestShortTableBits, bestShortLen) + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} + off++ + index0++ + } + switch best.rep { + case 2: + offset1, offset2 = offset2, offset1 + case 3: + offset1, offset2, offset3 = offset3, offset1, offset2 + } + cv = load6432(src, s) + continue + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + s = best.s + t := best.offset + offset1, offset2, offset3 = s-t, offset1, offset2 + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := best.length + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + index0 := s - l + 1 + // every entry + for index0 < s-1 { + cv0 := load6432(src, index0) + h0 := hashLen(cv0, bestLongTableBits, bestLongLen) + h1 := hashLen(cv0, bestShortTableBits, bestShortLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} + index0++ + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) + nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: e.table[nextHashS].offset} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + blk.recentOffsets[2] = uint32(offset3) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *bestFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + e.ensureHist(len(src)) + e.Encode(blk, src) +} + +// Reset will reset and set a dictionary if not nil +func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]prevEntry, len(e.table)) + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff; i < end; i += 4 { + const hashLog = bestShortTableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hashLen(cv, hashLog, bestShortLen) // 0 -> 4 + nextHash1 := hashLen(cv>>8, hashLog, bestShortLen) // 1 -> 5 + nextHash2 := hashLen(cv>>16, hashLog, bestShortLen) // 2 -> 6 + nextHash3 := hashLen(cv>>24, hashLog, bestShortLen) // 3 -> 7 + e.dictTable[nextHash] = prevEntry{ + prev: e.dictTable[nextHash].offset, + offset: i, + } + e.dictTable[nextHash1] = prevEntry{ + prev: e.dictTable[nextHash1].offset, + offset: i + 1, + } + e.dictTable[nextHash2] = prevEntry{ + prev: e.dictTable[nextHash2].offset, + offset: i + 2, + } + e.dictTable[nextHash3] = prevEntry{ + prev: e.dictTable[nextHash3].offset, + offset: i + 3, + } + } + e.lastDictID = d.id + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]prevEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + h := hashLen(cv, bestLongTableBits, bestLongLen) + e.dictLongTable[h] = prevEntry{ + offset: e.maxMatchOff, + prev: e.dictLongTable[h].offset, + } + + end := int32(len(d.content)) - 8 + e.maxMatchOff + off := 8 // First to read + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[off]) << 56) + h := hashLen(cv, bestLongTableBits, bestLongLen) + e.dictLongTable[h] = prevEntry{ + offset: i, + prev: e.dictLongTable[h].offset, + } + off++ + } + } + e.lastDictID = d.id + } + // Reset table to initial state + copy(e.longTable[:], e.dictLongTable) + + e.cur = e.maxMatchOff + // Reset table to initial state + copy(e.table[:], e.dictTable) +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go new file mode 100644 index 000000000..8582f31a7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -0,0 +1,1242 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +const ( + betterLongTableBits = 19 // Bits used in the long match table + betterLongTableSize = 1 << betterLongTableBits // Size of the table + betterLongLen = 8 // Bytes used for table hash + + // Note: Increasing the short table bits or making the hash shorter + // can actually lead to compression degradation since it will 'steal' more from the + // long match table and match offsets are quite big. + // This greatly depends on the type of input. + betterShortTableBits = 13 // Bits used in the short match table + betterShortTableSize = 1 << betterShortTableBits // Size of the table + betterShortLen = 5 // Bytes used for table hash + + betterLongTableShardCnt = 1 << (betterLongTableBits - dictShardBits) // Number of shards in the table + betterLongTableShardSize = betterLongTableSize / betterLongTableShardCnt // Size of an individual shard + + betterShortTableShardCnt = 1 << (betterShortTableBits - dictShardBits) // Number of shards in the table + betterShortTableShardSize = betterShortTableSize / betterShortTableShardCnt // Size of an individual shard +) + +type prevEntry struct { + offset int32 + prev int32 +} + +// betterFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. +// The long match table contains the previous entry with the same hash, +// effectively making it a "chain" of length 2. +// When we find a long match we choose between the two values and select the longest. +// When we find a short match, after checking the long, we check if we can find a long at n+1 +// and that it is longer (lazy matching). +type betterFastEncoder struct { + fastBase + table [betterShortTableSize]tableEntry + longTable [betterLongTableSize]prevEntry +} + +type betterFastEncoderDict struct { + betterFastEncoder + dictTable []tableEntry + dictLongTable []prevEntry + shortTableShardDirty [betterShortTableShardCnt]bool + longTableShardDirty [betterLongTableShardCnt]bool + allDirty bool +} + +// Encode improves compression... +func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [betterShortTableSize]tableEntry{} + e.longTable = [betterLongTableSize]prevEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 9 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + var matched int32 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + off := s + e.cur + e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} + e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + index0 := s + repOff + s += lenght + repOff + + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + continue + } + const repOff2 = 1 + + // We deviate from the reference encoder and also check offset 2. + // Still slower and not much better, so disabled. + // repIndex = s - offset2 + repOff2 + if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { + // Consider history as well. + var seq seq + lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 2 + seq.offset = 2 + if debugSequences { + println("repeat sequence 2", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + index0 := s + repOff2 + s += lenght + repOff2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + // Swap offsets + offset1, offset2 = offset2, offset1 + continue + } + } + // Find the offsets of our two matches. + coffsetL := candidateL.offset - e.cur + coffsetLP := candidateL.prev - e.cur + + // Check if we have a long match. + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetL+8, src) + 8 + t = coffsetL + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 + if prevMatch > matched { + matched = prevMatch + t = coffsetLP + } + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + } + break + } + + // Check if we have a long match on prev. + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetLP+8, src) + 8 + t = coffsetLP + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + coffsetS := candidateS.offset - e.cur + + // Check if we have a short match. + if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + matched = e.matchlen(s+4, coffsetS+4, src) + 4 + + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = candidateL.offset - e.cur + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("long match (after short)") + } + break + } + } + + // Check prev long... + coffsetL = candidateL.prev - e.cur + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("prev long match (after short)") + } + break + } + } + t = coffsetS + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // Try to find a better match by searching for a long match at the end of the current best match + if s+matched < sLimit { + // Allow some bytes at the beginning to mismatch. + // Sweet spot is around 3 bytes, but depends on input. + // The skipped bytes are tested in Extend backwards, + // and still picked up as part of the match if they do. + const skipBeginning = 3 + + nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) + s2 := s + skipBeginning + cv := load3232(src, s2) + candidateL := e.longTable[nextHashL] + coffsetL := candidateL.offset - e.cur - matched + skipBeginning + if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + s = s2 + matched = matchedNext + if debugMatches { + println("long match at end-of-match") + } + } + } + + // Check prev long... + if true { + coffsetL = candidateL.prev - e.cur - matched + skipBeginning + if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + s = s2 + matched = matchedNext + if debugMatches { + println("prev long match at end-of-match") + } + } + } + } + } + // A match has been found. Update recent offsets. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := matched + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + index0 := s - l + 1 + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *betterFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + e.ensureHist(len(src)) + e.Encode(blk, src) +} + +// Encode improves compression... +func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = prevEntry{} + } + e.cur = e.maxMatchOff + e.allDirty = true + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.allDirty = true + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 9 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + var matched int32 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + off := s + e.cur + e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + e.markShortShardDirty(nextHashS) + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + index0 := s + repOff + s += lenght + repOff + + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + } + cv = load6432(src, s) + continue + } + const repOff2 = 1 + + // We deviate from the reference encoder and also check offset 2. + // Still slower and not much better, so disabled. + // repIndex = s - offset2 + repOff2 + if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { + // Consider history as well. + var seq seq + lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 2 + seq.offset = 2 + if debugSequences { + println("repeat sequence 2", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + index0 := s + repOff2 + s += lenght + repOff2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + } + cv = load6432(src, s) + // Swap offsets + offset1, offset2 = offset2, offset1 + continue + } + } + // Find the offsets of our two matches. + coffsetL := candidateL.offset - e.cur + coffsetLP := candidateL.prev - e.cur + + // Check if we have a long match. + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetL+8, src) + 8 + t = coffsetL + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 + if prevMatch > matched { + matched = prevMatch + t = coffsetLP + } + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + } + break + } + + // Check if we have a long match on prev. + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetLP+8, src) + 8 + t = coffsetLP + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + coffsetS := candidateS.offset - e.cur + + // Check if we have a short match. + if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + matched = e.matchlen(s+4, coffsetS+4, src) + 4 + + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = candidateL.offset - e.cur + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} + e.markLongShardDirty(nextHashL) + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("long match (after short)") + } + break + } + } + + // Check prev long... + coffsetL = candidateL.prev - e.cur + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("prev long match (after short)") + } + break + } + } + t = coffsetS + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // Try to find a better match by searching for a long match at the end of the current best match + if s+matched < sLimit { + nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) + cv := load3232(src, s) + candidateL := e.longTable[nextHashL] + coffsetL := candidateL.offset - e.cur - matched + if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + matched = matchedNext + if debugMatches { + println("long match at end-of-match") + } + } + } + + // Check prev long... + if true { + coffsetL = candidateL.prev - e.cur - matched + if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + matched = matchedNext + if debugMatches { + println("prev long match at end-of-match") + } + } + } + } + } + // A match has been found. Update recent offsets. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := matched + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + index0 := s - l + 1 + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShortShardDirty(nextHashS) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d != nil { + panic("betterFastEncoder: Reset with dict") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]tableEntry, len(e.table)) + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff; i < end; i += 4 { + const hashLog = betterShortTableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hashLen(cv, hashLog, betterShortLen) // 0 -> 4 + nextHash1 := hashLen(cv>>8, hashLog, betterShortLen) // 1 -> 5 + nextHash2 := hashLen(cv>>16, hashLog, betterShortLen) // 2 -> 6 + nextHash3 := hashLen(cv>>24, hashLog, betterShortLen) // 3 -> 7 + e.dictTable[nextHash] = tableEntry{ + val: uint32(cv), + offset: i, + } + e.dictTable[nextHash1] = tableEntry{ + val: uint32(cv >> 8), + offset: i + 1, + } + e.dictTable[nextHash2] = tableEntry{ + val: uint32(cv >> 16), + offset: i + 2, + } + e.dictTable[nextHash3] = tableEntry{ + val: uint32(cv >> 24), + offset: i + 3, + } + } + e.lastDictID = d.id + e.allDirty = true + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]prevEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + h := hashLen(cv, betterLongTableBits, betterLongLen) + e.dictLongTable[h] = prevEntry{ + offset: e.maxMatchOff, + prev: e.dictLongTable[h].offset, + } + + end := int32(len(d.content)) - 8 + e.maxMatchOff + off := 8 // First to read + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[off]) << 56) + h := hashLen(cv, betterLongTableBits, betterLongLen) + e.dictLongTable[h] = prevEntry{ + offset: i, + prev: e.dictLongTable[h].offset, + } + off++ + } + } + e.lastDictID = d.id + e.allDirty = true + } + + // Reset table to initial state + { + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.shortTableShardDirty { + if e.shortTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + const shardCnt = betterShortTableShardCnt + const shardSize = betterShortTableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + copy(e.table[:], e.dictTable) + for i := range e.shortTableShardDirty { + e.shortTableShardDirty[i] = false + } + } else { + for i := range e.shortTableShardDirty { + if !e.shortTableShardDirty[i] { + continue + } + + copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + e.shortTableShardDirty[i] = false + } + } + } + { + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.shortTableShardDirty { + if e.shortTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + const shardCnt = betterLongTableShardCnt + const shardSize = betterLongTableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + copy(e.longTable[:], e.dictLongTable) + for i := range e.longTableShardDirty { + e.longTableShardDirty[i] = false + } + } else { + for i := range e.longTableShardDirty { + if !e.longTableShardDirty[i] { + continue + } + + copy(e.longTable[i*shardSize:(i+1)*shardSize], e.dictLongTable[i*shardSize:(i+1)*shardSize]) + e.longTableShardDirty[i] = false + } + } + } + e.cur = e.maxMatchOff + e.allDirty = false +} + +func (e *betterFastEncoderDict) markLongShardDirty(entryNum uint32) { + e.longTableShardDirty[entryNum/betterLongTableShardSize] = true +} + +func (e *betterFastEncoderDict) markShortShardDirty(entryNum uint32) { + e.shortTableShardDirty[entryNum/betterShortTableShardSize] = true +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go new file mode 100644 index 000000000..7d425109a --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -0,0 +1,1123 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +const ( + dFastLongTableBits = 17 // Bits used in the long match table + dFastLongTableSize = 1 << dFastLongTableBits // Size of the table + dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + dFastLongLen = 8 // Bytes used for table hash + + dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table + dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard + + dFastShortTableBits = tableBits // Bits used in the short match table + dFastShortTableSize = 1 << dFastShortTableBits // Size of the table + dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + dFastShortLen = 5 // Bytes used for table hash + +) + +type doubleFastEncoder struct { + fastEncoder + longTable [dFastLongTableSize]tableEntry +} + +type doubleFastEncoderDict struct { + fastEncoderDict + longTable [dFastLongTableSize]tableEntry + dictLongTable []tableEntry + longTableShardDirty [dLongTableShardCnt]bool +} + +// Encode mimmics functionality in zstd_dfast.c +func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [dFastShortTableSize]tableEntry{} + e.longTable = [dFastLongTableSize]tableEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.longTable[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += lenght + repOff + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 + e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 + e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 + + cv = load6432(src, s) + + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + if e.cur >= e.bufferReset { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.cur = e.maxMatchOff + } + + s := int32(0) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + for { + + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + + if len(blk.sequences) > 2 { + if load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + //length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + int32(matchLen(src[s+4+repOff:], src[repIndex+4:])) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + repOff + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d). cur: %d", s, t, e.cur)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + // Extend the 4-byte match as long as possible. + //l := e.matchlen(s+4, t+4, src) + 4 + l := int32(matchLen(src[s+4:], src[t+4:])) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 + e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 + e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 + + cv = load6432(src, s) + + if len(blk.sequences) <= 2 { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hashLen(cv1>>8, dFastShortTableBits, dFastShortLen) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + //l := 4 + e.matchlen(s+4, o2+4, src) + l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + + // We do not store history, so we must offset e.cur to avoid false matches for next user. + if e.cur < e.bufferReset { + e.cur += int32(len(src)) + } +} + +// Encode will encode the content, with a dictionary if initialized for it. +func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.markAllShardsDirty() + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.longTable[i].offset = v + } + e.markAllShardsDirty() + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = entry + e.markShardDirty(nextHashS) + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += lenght + repOff + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + e.markLongShardDirty(nextHashL) + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen) + longHash2 := hashLen(cv1, dFastLongTableBits, dFastLongLen) + e.longTable[longHash1] = te0 + e.longTable[longHash2] = te1 + e.markLongShardDirty(longHash1) + e.markLongShardDirty(longHash2) + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + hashVal1 := hashLen(cv0, dFastShortTableBits, dFastShortLen) + hashVal2 := hashLen(cv1, dFastShortTableBits, dFastShortLen) + e.table[hashVal1] = te0 + e.markShardDirty(hashVal1) + e.table[hashVal2] = te1 + e.markShardDirty(hashVal2) + + cv = load6432(src, s) + + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = entry + e.markShardDirty(nextHashS) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + // If we encoded more than 64K mark all dirty. + if len(src) > 64<<10 { + e.markAllShardsDirty() + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) { + e.fastEncoder.Reset(d, singleBlock) + if d != nil { + panic("doubleFastEncoder: Reset with dict not supported") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { + allDirty := e.allDirty + e.fastEncoderDict.Reset(d, singleBlock) + if d == nil { + return + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]tableEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ + val: uint32(cv), + offset: e.maxMatchOff, + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[i-e.maxMatchOff+7]) << 56) + e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ + val: uint32(cv), + offset: i, + } + } + } + e.lastDictID = d.id + e.allDirty = true + } + // Reset table to initial state + e.cur = e.maxMatchOff + + dirtyShardCnt := 0 + if !allDirty { + for i := range e.longTableShardDirty { + if e.longTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + + if allDirty || dirtyShardCnt > dLongTableShardCnt/2 { + //copy(e.longTable[:], e.dictLongTable) + e.longTable = *(*[dFastLongTableSize]tableEntry)(e.dictLongTable) + for i := range e.longTableShardDirty { + e.longTableShardDirty[i] = false + } + return + } + for i := range e.longTableShardDirty { + if !e.longTableShardDirty[i] { + continue + } + + // copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize]) + *(*[dLongTableShardSize]tableEntry)(e.longTable[i*dLongTableShardSize:]) = *(*[dLongTableShardSize]tableEntry)(e.dictLongTable[i*dLongTableShardSize:]) + + e.longTableShardDirty[i] = false + } +} + +func (e *doubleFastEncoderDict) markLongShardDirty(entryNum uint32) { + e.longTableShardDirty[entryNum/dLongTableShardSize] = true +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go new file mode 100644 index 000000000..315b1a8f2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -0,0 +1,898 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" +) + +const ( + tableBits = 15 // Bits used in the table + tableSize = 1 << tableBits // Size of the table + tableShardCnt = 1 << (tableBits - dictShardBits) // Number of shards in the table + tableShardSize = tableSize / tableShardCnt // Size of an individual shard + tableFastHashLen = 6 + tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + maxMatchLength = 131074 +) + +type tableEntry struct { + val uint32 + offset int32 +} + +type fastEncoder struct { + fastBase + table [tableSize]tableEntry +} + +type fastEncoderDict struct { + fastEncoder + dictTable []tableEntry + tableShardDirty [tableShardCnt]bool + allDirty bool +} + +// Encode mimmics functionality in zstd_fast.c +func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + + if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + var length int32 + length = 4 + e.matchlen(s+6, repIndex+4, src) + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + // Store this, since we have it. + nextHash := hashLen(cv, hashLog, tableFastHashLen) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + if debugEncoder { + if len(src) > maxCompressedBlockSize { + panic("src too big") + } + } + + // Protect against e.cur wraparound. + if e.cur >= e.bufferReset { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + } + + s := int32(0) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + + for { + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + + if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+6, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic(fmt.Sprintf("t (%d) < 0, candidate.offset: %d, e.cur: %d, coffset0: %d, e.maxMatchOff: %d", t, candidate.offset, e.cur, coffset0, e.maxMatchOff)) + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && t < 0 { + panic(fmt.Sprintf("t (%d) < 0 ", t)) + } + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + // Store this, since we have it. + nextHash := hashLen(cv, hashLog, tableFastHashLen) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + // We do not store history, so we must offset e.cur to avoid false matches for next user. + if e.cur < e.bufferReset { + e.cur += int32(len(src)) + } +} + +// Encode will encode the content, with a dictionary if initialized for it. +func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + if e.allDirty || len(src) > 32<<10 { + e.fastEncoder.Encode(blk, src) + e.allDirty = true + return + } + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [tableSize]tableEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 7 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShardDirty(nextHash) + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + e.markShardDirty(nextHash2) + + if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + var length int32 + length = 4 + e.matchlen(s+6, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + // Store this, since we have it. + nextHash := hashLen(cv, hashLog, tableFastHashLen) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShardDirty(nextHash) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *fastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d != nil { + panic("fastEncoder: Reset with dict") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]tableEntry, len(e.table)) + } + if true { + end := e.maxMatchOff + int32(len(d.content)) - 8 + for i := e.maxMatchOff; i < end; i += 3 { + const hashLog = tableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 5 + nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 6 + nextHash2 := hashLen(cv>>16, hashLog, tableFastHashLen) // 2 -> 7 + e.dictTable[nextHash] = tableEntry{ + val: uint32(cv), + offset: i, + } + e.dictTable[nextHash1] = tableEntry{ + val: uint32(cv >> 8), + offset: i + 1, + } + e.dictTable[nextHash2] = tableEntry{ + val: uint32(cv >> 16), + offset: i + 2, + } + } + } + e.lastDictID = d.id + e.allDirty = true + } + + e.cur = e.maxMatchOff + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.tableShardDirty { + if e.tableShardDirty[i] { + dirtyShardCnt++ + } + } + } + + const shardCnt = tableShardCnt + const shardSize = tableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + //copy(e.table[:], e.dictTable) + e.table = *(*[tableSize]tableEntry)(e.dictTable) + for i := range e.tableShardDirty { + e.tableShardDirty[i] = false + } + e.allDirty = false + return + } + for i := range e.tableShardDirty { + if !e.tableShardDirty[i] { + continue + } + + //copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + *(*[shardSize]tableEntry)(e.table[i*shardSize:]) = *(*[shardSize]tableEntry)(e.dictTable[i*shardSize:]) + e.tableShardDirty[i] = false + } + e.allDirty = false +} + +func (e *fastEncoderDict) markAllShardsDirty() { + e.allDirty = true +} + +func (e *fastEncoderDict) markShardDirty(entryNum uint32) { + e.tableShardDirty[entryNum/tableShardSize] = true +} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go new file mode 100644 index 000000000..65c6c36dc --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -0,0 +1,676 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "crypto/rand" + "fmt" + "io" + "math" + rdebug "runtime/debug" + "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +// Encoder provides encoding to Zstandard. +// An Encoder can be used for either compressing a stream via the +// io.WriteCloser interface supported by the Encoder or as multiple independent +// tasks via the EncodeAll function. +// Smaller encodes are encouraged to use the EncodeAll function. +// Use NewWriter to create a new instance. +type Encoder struct { + o encoderOptions + encoders chan encoder + state encoderState + init sync.Once +} + +type encoder interface { + Encode(blk *blockEnc, src []byte) + EncodeNoHist(blk *blockEnc, src []byte) + Block() *blockEnc + CRC() *xxhash.Digest + AppendCRC([]byte) []byte + WindowSize(size int64) int32 + UseBlock(*blockEnc) + Reset(d *dict, singleBlock bool) +} + +type encoderState struct { + w io.Writer + filling []byte + current []byte + previous []byte + encoder encoder + writing *blockEnc + err error + writeErr error + nWritten int64 + nInput int64 + frameContentSize int64 + headerWritten bool + eofWritten bool + fullFrameWritten bool + + // This waitgroup indicates an encode is running. + wg sync.WaitGroup + // This waitgroup indicates we have a block encoding/writing. + wWg sync.WaitGroup +} + +// NewWriter will create a new Zstandard encoder. +// If the encoder will be used for encoding blocks a nil writer can be used. +func NewWriter(w io.Writer, opts ...EOption) (*Encoder, error) { + initPredefined() + var e Encoder + e.o.setDefault() + for _, o := range opts { + err := o(&e.o) + if err != nil { + return nil, err + } + } + if w != nil { + e.Reset(w) + } + return &e, nil +} + +func (e *Encoder) initialize() { + if e.o.concurrent == 0 { + e.o.setDefault() + } + e.encoders = make(chan encoder, e.o.concurrent) + for i := 0; i < e.o.concurrent; i++ { + enc := e.o.encoder() + e.encoders <- enc + } +} + +// Reset will re-initialize the writer and new writes will encode to the supplied writer +// as a new, independent stream. +func (e *Encoder) Reset(w io.Writer) { + s := &e.state + s.wg.Wait() + s.wWg.Wait() + if cap(s.filling) == 0 { + s.filling = make([]byte, 0, e.o.blockSize) + } + if e.o.concurrent > 1 { + if cap(s.current) == 0 { + s.current = make([]byte, 0, e.o.blockSize) + } + if cap(s.previous) == 0 { + s.previous = make([]byte, 0, e.o.blockSize) + } + s.current = s.current[:0] + s.previous = s.previous[:0] + if s.writing == nil { + s.writing = &blockEnc{lowMem: e.o.lowMem} + s.writing.init() + } + s.writing.initNewEncode() + } + if s.encoder == nil { + s.encoder = e.o.encoder() + } + s.filling = s.filling[:0] + s.encoder.Reset(e.o.dict, false) + s.headerWritten = false + s.eofWritten = false + s.fullFrameWritten = false + s.w = w + s.err = nil + s.nWritten = 0 + s.nInput = 0 + s.writeErr = nil + s.frameContentSize = 0 +} + +// ResetContentSize will reset and set a content size for the next stream. +// If the bytes written does not match the size given an error will be returned +// when calling Close(). +// This is removed when Reset is called. +// Sizes <= 0 results in no content size set. +func (e *Encoder) ResetContentSize(w io.Writer, size int64) { + e.Reset(w) + if size >= 0 { + e.state.frameContentSize = size + } +} + +// Write data to the encoder. +// Input data will be buffered and as the buffer fills up +// content will be compressed and written to the output. +// When done writing, use Close to flush the remaining output +// and write CRC if requested. +func (e *Encoder) Write(p []byte) (n int, err error) { + s := &e.state + for len(p) > 0 { + if len(p)+len(s.filling) < e.o.blockSize { + if e.o.crc { + _, _ = s.encoder.CRC().Write(p) + } + s.filling = append(s.filling, p...) + return n + len(p), nil + } + add := p + if len(p)+len(s.filling) > e.o.blockSize { + add = add[:e.o.blockSize-len(s.filling)] + } + if e.o.crc { + _, _ = s.encoder.CRC().Write(add) + } + s.filling = append(s.filling, add...) + p = p[len(add):] + n += len(add) + if len(s.filling) < e.o.blockSize { + return n, nil + } + err := e.nextBlock(false) + if err != nil { + return n, err + } + if debugAsserts && len(s.filling) > 0 { + panic(len(s.filling)) + } + } + return n, nil +} + +// nextBlock will synchronize and start compressing input in e.state.filling. +// If an error has occurred during encoding it will be returned. +func (e *Encoder) nextBlock(final bool) error { + s := &e.state + // Wait for current block. + s.wg.Wait() + if s.err != nil { + return s.err + } + if len(s.filling) > e.o.blockSize { + return fmt.Errorf("block > maxStoreBlockSize") + } + if !s.headerWritten { + // If we have a single block encode, do a sync compression. + if final && len(s.filling) == 0 && !e.o.fullZero { + s.headerWritten = true + s.fullFrameWritten = true + s.eofWritten = true + return nil + } + if final && len(s.filling) > 0 { + s.current = e.EncodeAll(s.filling, s.current[:0]) + var n2 int + n2, s.err = s.w.Write(s.current) + if s.err != nil { + return s.err + } + s.nWritten += int64(n2) + s.nInput += int64(len(s.filling)) + s.current = s.current[:0] + s.filling = s.filling[:0] + s.headerWritten = true + s.fullFrameWritten = true + s.eofWritten = true + return nil + } + + var tmp [maxHeaderSize]byte + fh := frameHeader{ + ContentSize: uint64(s.frameContentSize), + WindowSize: uint32(s.encoder.WindowSize(s.frameContentSize)), + SingleSegment: false, + Checksum: e.o.crc, + DictID: e.o.dict.ID(), + } + + dst, err := fh.appendTo(tmp[:0]) + if err != nil { + return err + } + s.headerWritten = true + s.wWg.Wait() + var n2 int + n2, s.err = s.w.Write(dst) + if s.err != nil { + return s.err + } + s.nWritten += int64(n2) + } + if s.eofWritten { + // Ensure we only write it once. + final = false + } + + if len(s.filling) == 0 { + // Final block, but no data. + if final { + enc := s.encoder + blk := enc.Block() + blk.reset(nil) + blk.last = true + blk.encodeRaw(nil) + s.wWg.Wait() + _, s.err = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + s.eofWritten = true + } + return s.err + } + + // SYNC: + if e.o.concurrent == 1 { + src := s.filling + s.nInput += int64(len(s.filling)) + if debugEncoder { + println("Adding sync block,", len(src), "bytes, final:", final) + } + enc := s.encoder + blk := enc.Block() + blk.reset(nil) + enc.Encode(blk, src) + blk.last = final + if final { + s.eofWritten = true + } + + err := errIncompressible + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + if len(src) != len(blk.literals) || len(src) != e.o.blockSize { + err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + } + switch err { + case errIncompressible: + if debugEncoder { + println("Storing incompressible block as raw") + } + blk.encodeRaw(src) + // In fast mode, we do not transfer offsets, so we don't have to deal with changing the. + case nil: + default: + s.err = err + return err + } + _, s.err = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + s.filling = s.filling[:0] + return s.err + } + + // Move blocks forward. + s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current + s.nInput += int64(len(s.current)) + s.wg.Add(1) + go func(src []byte) { + if debugEncoder { + println("Adding block,", len(src), "bytes, final:", final) + } + defer func() { + if r := recover(); r != nil { + s.err = fmt.Errorf("panic while encoding: %v", r) + rdebug.PrintStack() + } + s.wg.Done() + }() + enc := s.encoder + blk := enc.Block() + enc.Encode(blk, src) + blk.last = final + if final { + s.eofWritten = true + } + // Wait for pending writes. + s.wWg.Wait() + if s.writeErr != nil { + s.err = s.writeErr + return + } + // Transfer encoders from previous write block. + blk.swapEncoders(s.writing) + // Transfer recent offsets to next. + enc.UseBlock(s.writing) + s.writing = blk + s.wWg.Add(1) + go func() { + defer func() { + if r := recover(); r != nil { + s.writeErr = fmt.Errorf("panic while encoding/writing: %v", r) + rdebug.PrintStack() + } + s.wWg.Done() + }() + err := errIncompressible + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + if len(src) != len(blk.literals) || len(src) != e.o.blockSize { + err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + } + switch err { + case errIncompressible: + if debugEncoder { + println("Storing incompressible block as raw") + } + blk.encodeRaw(src) + // In fast mode, we do not transfer offsets, so we don't have to deal with changing the. + case nil: + default: + s.writeErr = err + return + } + _, s.writeErr = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + }() + }(s.current) + return nil +} + +// ReadFrom reads data from r until EOF or error. +// The return value n is the number of bytes read. +// Any error except io.EOF encountered during the read is also returned. +// +// The Copy function uses ReaderFrom if available. +func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) { + if debugEncoder { + println("Using ReadFrom") + } + + // Flush any current writes. + if len(e.state.filling) > 0 { + if err := e.nextBlock(false); err != nil { + return 0, err + } + } + e.state.filling = e.state.filling[:e.o.blockSize] + src := e.state.filling + for { + n2, err := r.Read(src) + if e.o.crc { + _, _ = e.state.encoder.CRC().Write(src[:n2]) + } + // src is now the unfilled part... + src = src[n2:] + n += int64(n2) + switch err { + case io.EOF: + e.state.filling = e.state.filling[:len(e.state.filling)-len(src)] + if debugEncoder { + println("ReadFrom: got EOF final block:", len(e.state.filling)) + } + return n, nil + case nil: + default: + if debugEncoder { + println("ReadFrom: got error:", err) + } + e.state.err = err + return n, err + } + if len(src) > 0 { + if debugEncoder { + println("ReadFrom: got space left in source:", len(src)) + } + continue + } + err = e.nextBlock(false) + if err != nil { + return n, err + } + e.state.filling = e.state.filling[:e.o.blockSize] + src = e.state.filling + } +} + +// Flush will send the currently written data to output +// and block until everything has been written. +// This should only be used on rare occasions where pushing the currently queued data is critical. +func (e *Encoder) Flush() error { + s := &e.state + if len(s.filling) > 0 { + err := e.nextBlock(false) + if err != nil { + return err + } + } + s.wg.Wait() + s.wWg.Wait() + if s.err != nil { + return s.err + } + return s.writeErr +} + +// Close will flush the final output and close the stream. +// The function will block until everything has been written. +// The Encoder can still be re-used after calling this. +func (e *Encoder) Close() error { + s := &e.state + if s.encoder == nil { + return nil + } + err := e.nextBlock(true) + if err != nil { + return err + } + if s.frameContentSize > 0 { + if s.nInput != s.frameContentSize { + return fmt.Errorf("frame content size %d given, but %d bytes was written", s.frameContentSize, s.nInput) + } + } + if e.state.fullFrameWritten { + return s.err + } + s.wg.Wait() + s.wWg.Wait() + + if s.err != nil { + return s.err + } + if s.writeErr != nil { + return s.writeErr + } + + // Write CRC + if e.o.crc && s.err == nil { + // heap alloc. + var tmp [4]byte + _, s.err = s.w.Write(s.encoder.AppendCRC(tmp[:0])) + s.nWritten += 4 + } + + // Add padding with content from crypto/rand.Reader + if s.err == nil && e.o.pad > 0 { + add := calcSkippableFrame(s.nWritten, int64(e.o.pad)) + frame, err := skippableFrame(s.filling[:0], add, rand.Reader) + if err != nil { + return err + } + _, s.err = s.w.Write(frame) + } + return s.err +} + +// EncodeAll will encode all input in src and append it to dst. +// This function can be called concurrently, but each call will only run on a single goroutine. +// If empty input is given, nothing is returned, unless WithZeroFrames is specified. +// Encoded blocks can be concatenated and the result will be the combined input stream. +// Data compressed with EncodeAll can be decoded with the Decoder, +// using either a stream or DecodeAll. +func (e *Encoder) EncodeAll(src, dst []byte) []byte { + if len(src) == 0 { + if e.o.fullZero { + // Add frame header. + fh := frameHeader{ + ContentSize: 0, + WindowSize: MinWindowSize, + SingleSegment: true, + // Adding a checksum would be a waste of space. + Checksum: false, + DictID: 0, + } + dst, _ = fh.appendTo(dst) + + // Write raw block as last one only. + var blk blockHeader + blk.setSize(0) + blk.setType(blockTypeRaw) + blk.setLast(true) + dst = blk.appendTo(dst) + } + return dst + } + e.init.Do(e.initialize) + enc := <-e.encoders + defer func() { + // Release encoder reference to last block. + // If a non-single block is needed the encoder will reset again. + e.encoders <- enc + }() + // Use single segments when above minimum window and below window size. + single := len(src) <= e.o.windowSize && len(src) > MinWindowSize + if e.o.single != nil { + single = *e.o.single + } + fh := frameHeader{ + ContentSize: uint64(len(src)), + WindowSize: uint32(enc.WindowSize(int64(len(src)))), + SingleSegment: single, + Checksum: e.o.crc, + DictID: e.o.dict.ID(), + } + + // If less than 1MB, allocate a buffer up front. + if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem { + dst = make([]byte, 0, len(src)) + } + dst, err := fh.appendTo(dst) + if err != nil { + panic(err) + } + + // If we can do everything in one block, prefer that. + if len(src) <= e.o.blockSize { + enc.Reset(e.o.dict, true) + // Slightly faster with no history and everything in one block. + if e.o.crc { + _, _ = enc.CRC().Write(src) + } + blk := enc.Block() + blk.last = true + if e.o.dict == nil { + enc.EncodeNoHist(blk, src) + } else { + enc.Encode(blk, src) + } + + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + err := errIncompressible + oldout := blk.output + if len(blk.literals) != len(src) || len(src) != e.o.blockSize { + // Output directly to dst + blk.output = dst + err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + } + + switch err { + case errIncompressible: + if debugEncoder { + println("Storing incompressible block as raw") + } + dst = blk.encodeRawTo(dst, src) + case nil: + dst = blk.output + default: + panic(err) + } + blk.output = oldout + } else { + enc.Reset(e.o.dict, false) + blk := enc.Block() + for len(src) > 0 { + todo := src + if len(todo) > e.o.blockSize { + todo = todo[:e.o.blockSize] + } + src = src[len(todo):] + if e.o.crc { + _, _ = enc.CRC().Write(todo) + } + blk.pushOffsets() + enc.Encode(blk, todo) + if len(src) == 0 { + blk.last = true + } + err := errIncompressible + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize { + err = blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy) + } + + switch err { + case errIncompressible: + if debugEncoder { + println("Storing incompressible block as raw") + } + dst = blk.encodeRawTo(dst, todo) + blk.popOffsets() + case nil: + dst = append(dst, blk.output...) + default: + panic(err) + } + blk.reset(nil) + } + } + if e.o.crc { + dst = enc.AppendCRC(dst) + } + // Add padding with content from crypto/rand.Reader + if e.o.pad > 0 { + add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad)) + dst, err = skippableFrame(dst, add, rand.Reader) + if err != nil { + panic(err) + } + } + return dst +} + +// MaxEncodedSize returns the expected maximum +// size of an encoded block or stream. +func (e *Encoder) MaxEncodedSize(size int) int { + frameHeader := 4 + 2 // magic + frame header & window descriptor + if e.o.dict != nil { + frameHeader += 4 + } + // Frame content size: + if size < 256 { + frameHeader++ + } else if size < 65536+256 { + frameHeader += 2 + } else if size < math.MaxInt32 { + frameHeader += 4 + } else { + frameHeader += 8 + } + // Final crc + if e.o.crc { + frameHeader += 4 + } + + // Max overhead is 3 bytes/block. + // There cannot be 0 blocks. + blocks := (size + e.o.blockSize) / e.o.blockSize + + // Combine, add padding. + maxSz := frameHeader + 3*blocks + size + if e.o.pad > 1 { + maxSz += calcSkippableFrame(int64(maxSz), int64(e.o.pad)) + } + return maxSz +} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go new file mode 100644 index 000000000..8e15be2f7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -0,0 +1,339 @@ +package zstd + +import ( + "errors" + "fmt" + "math" + "math/bits" + "runtime" + "strings" +) + +// EOption is an option for creating a encoder. +type EOption func(*encoderOptions) error + +// options retains accumulated state of multiple options. +type encoderOptions struct { + concurrent int + level EncoderLevel + single *bool + pad int + blockSize int + windowSize int + crc bool + fullZero bool + noEntropy bool + allLitEntropy bool + customWindow bool + customALEntropy bool + customBlockSize bool + lowMem bool + dict *dict +} + +func (o *encoderOptions) setDefault() { + *o = encoderOptions{ + concurrent: runtime.GOMAXPROCS(0), + crc: true, + single: nil, + blockSize: maxCompressedBlockSize, + windowSize: 8 << 20, + level: SpeedDefault, + allLitEntropy: true, + lowMem: false, + } +} + +// encoder returns an encoder with the selected options. +func (o encoderOptions) encoder() encoder { + switch o.level { + case SpeedFastest: + if o.dict != nil { + return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} + } + return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} + + case SpeedDefault: + if o.dict != nil { + return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}} + } + return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} + case SpeedBetterCompression: + if o.dict != nil { + return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} + } + return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} + case SpeedBestCompression: + return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} + } + panic("unknown compression level") +} + +// WithEncoderCRC will add CRC value to output. +// Output will be 4 bytes larger. +func WithEncoderCRC(b bool) EOption { + return func(o *encoderOptions) error { o.crc = b; return nil } +} + +// WithEncoderConcurrency will set the concurrency, +// meaning the maximum number of encoders to run concurrently. +// The value supplied must be at least 1. +// For streams, setting a value of 1 will disable async compression. +// By default this will be set to GOMAXPROCS. +func WithEncoderConcurrency(n int) EOption { + return func(o *encoderOptions) error { + if n <= 0 { + return fmt.Errorf("concurrency must be at least 1") + } + o.concurrent = n + return nil + } +} + +// WithWindowSize will set the maximum allowed back-reference distance. +// The value must be a power of two between MinWindowSize and MaxWindowSize. +// A larger value will enable better compression but allocate more memory and, +// for above-default values, take considerably longer. +// The default value is determined by the compression level. +func WithWindowSize(n int) EOption { + return func(o *encoderOptions) error { + switch { + case n < MinWindowSize: + return fmt.Errorf("window size must be at least %d", MinWindowSize) + case n > MaxWindowSize: + return fmt.Errorf("window size must be at most %d", MaxWindowSize) + case (n & (n - 1)) != 0: + return errors.New("window size must be a power of 2") + } + + o.windowSize = n + o.customWindow = true + if o.blockSize > o.windowSize { + o.blockSize = o.windowSize + o.customBlockSize = true + } + return nil + } +} + +// WithEncoderPadding will add padding to all output so the size will be a multiple of n. +// This can be used to obfuscate the exact output size or make blocks of a certain size. +// The contents will be a skippable frame, so it will be invisible by the decoder. +// n must be > 0 and <= 1GB, 1<<30 bytes. +// The padded area will be filled with data from crypto/rand.Reader. +// If `EncodeAll` is used with data already in the destination, the total size will be multiple of this. +func WithEncoderPadding(n int) EOption { + return func(o *encoderOptions) error { + if n <= 0 { + return fmt.Errorf("padding must be at least 1") + } + // No need to waste our time. + if n == 1 { + o.pad = 0 + } + if n > 1<<30 { + return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ") + } + o.pad = n + return nil + } +} + +// EncoderLevel predefines encoder compression levels. +// Only use the constants made available, since the actual mapping +// of these values are very likely to change and your compression could change +// unpredictably when upgrading the library. +type EncoderLevel int + +const ( + speedNotSet EncoderLevel = iota + + // SpeedFastest will choose the fastest reasonable compression. + // This is roughly equivalent to the fastest Zstandard mode. + SpeedFastest + + // SpeedDefault is the default "pretty fast" compression option. + // This is roughly equivalent to the default Zstandard mode (level 3). + SpeedDefault + + // SpeedBetterCompression will yield better compression than the default. + // Currently it is about zstd level 7-8 with ~ 2x-3x the default CPU usage. + // By using this, notice that CPU usage may go up in the future. + SpeedBetterCompression + + // SpeedBestCompression will choose the best available compression option. + // This will offer the best compression no matter the CPU cost. + SpeedBestCompression + + // speedLast should be kept as the last actual compression option. + // The is not for external usage, but is used to keep track of the valid options. + speedLast +) + +// EncoderLevelFromString will convert a string representation of an encoding level back +// to a compression level. The compare is not case sensitive. +// If the string wasn't recognized, (false, SpeedDefault) will be returned. +func EncoderLevelFromString(s string) (bool, EncoderLevel) { + for l := speedNotSet + 1; l < speedLast; l++ { + if strings.EqualFold(s, l.String()) { + return true, l + } + } + return false, SpeedDefault +} + +// EncoderLevelFromZstd will return an encoder level that closest matches the compression +// ratio of a specific zstd compression level. +// Many input values will provide the same compression level. +func EncoderLevelFromZstd(level int) EncoderLevel { + switch { + case level < 3: + return SpeedFastest + case level >= 3 && level < 6: + return SpeedDefault + case level >= 6 && level < 10: + return SpeedBetterCompression + default: + return SpeedBestCompression + } +} + +// String provides a string representation of the compression level. +func (e EncoderLevel) String() string { + switch e { + case SpeedFastest: + return "fastest" + case SpeedDefault: + return "default" + case SpeedBetterCompression: + return "better" + case SpeedBestCompression: + return "best" + default: + return "invalid" + } +} + +// WithEncoderLevel specifies a predefined compression level. +func WithEncoderLevel(l EncoderLevel) EOption { + return func(o *encoderOptions) error { + switch { + case l <= speedNotSet || l >= speedLast: + return fmt.Errorf("unknown encoder level") + } + o.level = l + if !o.customWindow { + switch o.level { + case SpeedFastest: + o.windowSize = 4 << 20 + if !o.customBlockSize { + o.blockSize = 1 << 16 + } + case SpeedDefault: + o.windowSize = 8 << 20 + case SpeedBetterCompression: + o.windowSize = 16 << 20 + case SpeedBestCompression: + o.windowSize = 32 << 20 + } + } + if !o.customALEntropy { + o.allLitEntropy = l > SpeedFastest + } + + return nil + } +} + +// WithZeroFrames will encode 0 length input as full frames. +// This can be needed for compatibility with zstandard usage, +// but is not needed for this package. +func WithZeroFrames(b bool) EOption { + return func(o *encoderOptions) error { + o.fullZero = b + return nil + } +} + +// WithAllLitEntropyCompression will apply entropy compression if no matches are found. +// Disabling this will skip incompressible data faster, but in cases with no matches but +// skewed character distribution compression is lost. +// Default value depends on the compression level selected. +func WithAllLitEntropyCompression(b bool) EOption { + return func(o *encoderOptions) error { + o.customALEntropy = true + o.allLitEntropy = b + return nil + } +} + +// WithNoEntropyCompression will always skip entropy compression of literals. +// This can be useful if content has matches, but unlikely to benefit from entropy +// compression. Usually the slight speed improvement is not worth enabling this. +func WithNoEntropyCompression(b bool) EOption { + return func(o *encoderOptions) error { + o.noEntropy = b + return nil + } +} + +// WithSingleSegment will set the "single segment" flag when EncodeAll is used. +// If this flag is set, data must be regenerated within a single continuous memory segment. +// In this case, Window_Descriptor byte is skipped, but Frame_Content_Size is necessarily present. +// As a consequence, the decoder must allocate a memory segment of size equal or larger than size of your content. +// In order to preserve the decoder from unreasonable memory requirements, +// a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range. +// For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB. +// This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations. +// If this is not specified, block encodes will automatically choose this based on the input size and the window size. +// This setting has no effect on streamed encodes. +func WithSingleSegment(b bool) EOption { + return func(o *encoderOptions) error { + o.single = &b + return nil + } +} + +// WithLowerEncoderMem will trade in some memory cases trade less memory usage for +// slower encoding speed. +// This will not change the window size which is the primary function for reducing +// memory usage. See WithWindowSize. +func WithLowerEncoderMem(b bool) EOption { + return func(o *encoderOptions) error { + o.lowMem = b + return nil + } +} + +// WithEncoderDict allows to register a dictionary that will be used for the encode. +// +// The slice dict must be in the [dictionary format] produced by +// "zstd --train" from the Zstandard reference implementation. +// +// The encoder *may* choose to use no dictionary instead for certain payloads. +// +// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format +func WithEncoderDict(dict []byte) EOption { + return func(o *encoderOptions) error { + d, err := loadDict(dict) + if err != nil { + return err + } + o.dict = d + return nil + } +} + +// WithEncoderDictRaw registers a dictionary that may be used by the encoder. +// +// The slice content may contain arbitrary data. It will be used as an initial +// history. +func WithEncoderDictRaw(id uint32, content []byte) EOption { + return func(o *encoderOptions) error { + if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { + return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) + } + o.dict = &dict{id: id, content: content, offsets: [3]int{1, 4, 8}} + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go new file mode 100644 index 000000000..d8e8a05bd --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -0,0 +1,432 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "encoding/hex" + "errors" + "io" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +type frameDec struct { + o decoderOptions + crc *xxhash.Digest + + WindowSize uint64 + + // Frame history passed between blocks + history history + + rawInput byteBuffer + + // Byte buffer that can be reused for small input blocks. + bBuf byteBuf + + FrameContentSize uint64 + + DictionaryID uint32 + HasCheckSum bool + SingleSegment bool +} + +const ( + // MinWindowSize is the minimum Window Size, which is 1 KB. + MinWindowSize = 1 << 10 + + // MaxWindowSize is the maximum encoder window size + // and the default decoder maximum window size. + MaxWindowSize = 1 << 29 +) + +const ( + frameMagic = "\x28\xb5\x2f\xfd" + skippableFrameMagic = "\x2a\x4d\x18" +) + +func newFrameDec(o decoderOptions) *frameDec { + if o.maxWindowSize > o.maxDecodedSize { + o.maxWindowSize = o.maxDecodedSize + } + d := frameDec{ + o: o, + } + return &d +} + +// reset will read the frame header and prepare for block decoding. +// If nothing can be read from the input, io.EOF will be returned. +// Any other error indicated that the stream contained data, but +// there was a problem. +func (d *frameDec) reset(br byteBuffer) error { + d.HasCheckSum = false + d.WindowSize = 0 + var signature [4]byte + for { + var err error + // Check if we can read more... + b, err := br.readSmall(1) + switch err { + case io.EOF, io.ErrUnexpectedEOF: + return io.EOF + default: + return err + case nil: + signature[0] = b[0] + } + // Read the rest, don't allow io.ErrUnexpectedEOF + b, err = br.readSmall(3) + switch err { + case io.EOF: + return io.EOF + default: + return err + case nil: + copy(signature[1:], b) + } + + if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 { + if debugDecoder { + println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString([]byte(skippableFrameMagic))) + } + // Break if not skippable frame. + break + } + // Read size to skip + b, err = br.readSmall(4) + if err != nil { + if debugDecoder { + println("Reading Frame Size", err) + } + return err + } + n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + println("Skipping frame with", n, "bytes.") + err = br.skipN(int64(n)) + if err != nil { + if debugDecoder { + println("Reading discarded frame", err) + } + return err + } + } + if string(signature[:]) != frameMagic { + if debugDecoder { + println("Got magic numbers: ", signature, "want:", []byte(frameMagic)) + } + return ErrMagicMismatch + } + + // Read Frame_Header_Descriptor + fhd, err := br.readByte() + if err != nil { + if debugDecoder { + println("Reading Frame_Header_Descriptor", err) + } + return err + } + d.SingleSegment = fhd&(1<<5) != 0 + + if fhd&(1<<3) != 0 { + return errors.New("reserved bit set on frame header") + } + + // Read Window_Descriptor + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor + d.WindowSize = 0 + if !d.SingleSegment { + wd, err := br.readByte() + if err != nil { + if debugDecoder { + println("Reading Window_Descriptor", err) + } + return err + } + printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) + windowLog := 10 + (wd >> 3) + windowBase := uint64(1) << windowLog + windowAdd := (windowBase / 8) * uint64(wd&0x7) + d.WindowSize = windowBase + windowAdd + } + + // Read Dictionary_ID + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id + d.DictionaryID = 0 + if size := fhd & 3; size != 0 { + if size == 3 { + size = 4 + } + + b, err := br.readSmall(int(size)) + if err != nil { + println("Reading Dictionary_ID", err) + return err + } + var id uint32 + switch len(b) { + case 1: + id = uint32(b[0]) + case 2: + id = uint32(b[0]) | (uint32(b[1]) << 8) + case 4: + id = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + } + if debugDecoder { + println("Dict size", size, "ID:", id) + } + d.DictionaryID = id + } + + // Read Frame_Content_Size + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size + var fcsSize int + v := fhd >> 6 + switch v { + case 0: + if d.SingleSegment { + fcsSize = 1 + } + default: + fcsSize = 1 << v + } + d.FrameContentSize = fcsUnknown + if fcsSize > 0 { + b, err := br.readSmall(fcsSize) + if err != nil { + println("Reading Frame content", err) + return err + } + switch len(b) { + case 1: + d.FrameContentSize = uint64(b[0]) + case 2: + // When FCS_Field_Size is 2, the offset of 256 is added. + d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 + case 4: + d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) + case 8: + d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) + d.FrameContentSize = uint64(d1) | (uint64(d2) << 32) + } + if debugDecoder { + println("Read FCS:", d.FrameContentSize) + } + } + + // Move this to shared. + d.HasCheckSum = fhd&(1<<2) != 0 + if d.HasCheckSum { + if d.crc == nil { + d.crc = xxhash.New() + } + d.crc.Reset() + } + + if d.WindowSize > d.o.maxWindowSize { + if debugDecoder { + printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) + } + return ErrWindowSizeExceeded + } + + if d.WindowSize == 0 && d.SingleSegment { + // We may not need window in this case. + d.WindowSize = d.FrameContentSize + if d.WindowSize < MinWindowSize { + d.WindowSize = MinWindowSize + } + if d.WindowSize > d.o.maxDecodedSize { + if debugDecoder { + printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) + } + return ErrDecoderSizeExceeded + } + } + + // The minimum Window_Size is 1 KB. + if d.WindowSize < MinWindowSize { + if debugDecoder { + println("got window size: ", d.WindowSize) + } + return ErrWindowSizeTooSmall + } + d.history.windowSize = int(d.WindowSize) + if !d.o.lowMem || d.history.windowSize < maxBlockSize { + // Alloc 2x window size if not low-mem, or window size below 2MB. + d.history.allocFrameBuffer = d.history.windowSize * 2 + } else { + if d.o.lowMem { + // Alloc with 1MB extra. + d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize/2 + } else { + // Alloc with 2MB extra. + d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize + } + } + + if debugDecoder { + println("Frame: Dict:", d.DictionaryID, "FrameContentSize:", d.FrameContentSize, "singleseg:", d.SingleSegment, "window:", d.WindowSize, "crc:", d.HasCheckSum) + } + + // history contains input - maybe we do something + d.rawInput = br + return nil +} + +// next will start decoding the next block from stream. +func (d *frameDec) next(block *blockDec) error { + if debugDecoder { + println("decoding new block") + } + err := block.reset(d.rawInput, d.WindowSize) + if err != nil { + println("block error:", err) + // Signal the frame decoder we have a problem. + block.sendErr(err) + return err + } + return nil +} + +// checkCRC will check the checksum if the frame has one. +// Will return ErrCRCMismatch if crc check failed, otherwise nil. +func (d *frameDec) checkCRC() error { + if !d.HasCheckSum { + return nil + } + + // We can overwrite upper tmp now + buf, err := d.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + return err + } + + if d.o.ignoreChecksum { + return nil + } + + want := binary.LittleEndian.Uint32(buf[:4]) + got := uint32(d.crc.Sum64()) + + if got != want { + if debugDecoder { + printf("CRC check failed: got %08x, want %08x\n", got, want) + } + return ErrCRCMismatch + } + if debugDecoder { + printf("CRC ok %08x\n", got) + } + return nil +} + +// consumeCRC reads the checksum data if the frame has one. +func (d *frameDec) consumeCRC() error { + if d.HasCheckSum { + _, err := d.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + return err + } + } + + return nil +} + +// runDecoder will run the decoder for the remainder of the frame. +func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { + saved := d.history.b + + // We use the history for output to avoid copying it. + d.history.b = dst + d.history.ignoreBuffer = len(dst) + // Store input length, so we only check new data. + crcStart := len(dst) + d.history.decoders.maxSyncLen = 0 + if d.o.limitToCap { + d.history.decoders.maxSyncLen = uint64(cap(dst) - len(dst)) + } + if d.FrameContentSize != fcsUnknown { + if !d.o.limitToCap || d.FrameContentSize+uint64(len(dst)) < d.history.decoders.maxSyncLen { + d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst)) + } + if d.history.decoders.maxSyncLen > d.o.maxDecodedSize { + if debugDecoder { + println("maxSyncLen:", d.history.decoders.maxSyncLen, "> maxDecodedSize:", d.o.maxDecodedSize) + } + return dst, ErrDecoderSizeExceeded + } + if debugDecoder { + println("maxSyncLen:", d.history.decoders.maxSyncLen) + } + if !d.o.limitToCap && uint64(cap(dst)) < d.history.decoders.maxSyncLen { + // Alloc for output + dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc) + copy(dst2, dst) + dst = dst2 + } + } + var err error + for { + err = dec.reset(d.rawInput, d.WindowSize) + if err != nil { + break + } + if debugDecoder { + println("next block:", dec) + } + err = dec.decodeBuf(&d.history) + if err != nil { + break + } + if uint64(len(d.history.b)-crcStart) > d.o.maxDecodedSize { + println("runDecoder: maxDecodedSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.o.maxDecodedSize) + err = ErrDecoderSizeExceeded + break + } + if d.o.limitToCap && len(d.history.b) > cap(dst) { + println("runDecoder: cap exceeded", uint64(len(d.history.b)), ">", cap(dst)) + err = ErrDecoderSizeExceeded + break + } + if uint64(len(d.history.b)-crcStart) > d.FrameContentSize { + println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize) + err = ErrFrameSizeExceeded + break + } + if dec.Last { + break + } + if debugDecoder { + println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize) + } + } + dst = d.history.b + if err == nil { + if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize { + err = ErrFrameSizeMismatch + } else if d.HasCheckSum { + if d.o.ignoreChecksum { + err = d.consumeCRC() + } else { + var n int + n, err = d.crc.Write(dst[crcStart:]) + if err == nil { + if n != len(dst)-crcStart { + err = io.ErrShortWrite + } else { + err = d.checkCRC() + } + } + } + } + } + d.history.b = saved + return dst, err +} diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go new file mode 100644 index 000000000..4ef7f5a3e --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/frameenc.go @@ -0,0 +1,137 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "fmt" + "io" + "math" + "math/bits" +) + +type frameHeader struct { + ContentSize uint64 + WindowSize uint32 + SingleSegment bool + Checksum bool + DictID uint32 +} + +const maxHeaderSize = 14 + +func (f frameHeader) appendTo(dst []byte) ([]byte, error) { + dst = append(dst, frameMagic...) + var fhd uint8 + if f.Checksum { + fhd |= 1 << 2 + } + if f.SingleSegment { + fhd |= 1 << 5 + } + + var dictIDContent []byte + if f.DictID > 0 { + var tmp [4]byte + if f.DictID < 256 { + fhd |= 1 + tmp[0] = uint8(f.DictID) + dictIDContent = tmp[:1] + } else if f.DictID < 1<<16 { + fhd |= 2 + binary.LittleEndian.PutUint16(tmp[:2], uint16(f.DictID)) + dictIDContent = tmp[:2] + } else { + fhd |= 3 + binary.LittleEndian.PutUint32(tmp[:4], f.DictID) + dictIDContent = tmp[:4] + } + } + var fcs uint8 + if f.ContentSize >= 256 { + fcs++ + } + if f.ContentSize >= 65536+256 { + fcs++ + } + if f.ContentSize >= 0xffffffff { + fcs++ + } + + fhd |= fcs << 6 + + dst = append(dst, fhd) + if !f.SingleSegment { + const winLogMin = 10 + windowLog := (bits.Len32(f.WindowSize-1) - winLogMin) << 3 + dst = append(dst, uint8(windowLog)) + } + if f.DictID > 0 { + dst = append(dst, dictIDContent...) + } + switch fcs { + case 0: + if f.SingleSegment { + dst = append(dst, uint8(f.ContentSize)) + } + // Unless SingleSegment is set, framessizes < 256 are nto stored. + case 1: + f.ContentSize -= 256 + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8)) + case 2: + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24)) + case 3: + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24), + uint8(f.ContentSize>>32), uint8(f.ContentSize>>40), uint8(f.ContentSize>>48), uint8(f.ContentSize>>56)) + default: + panic("invalid fcs") + } + return dst, nil +} + +const skippableFrameHeader = 4 + 4 + +// calcSkippableFrame will return a total size to be added for written +// to be divisible by multiple. +// The value will always be > skippableFrameHeader. +// The function will panic if written < 0 or wantMultiple <= 0. +func calcSkippableFrame(written, wantMultiple int64) int { + if wantMultiple <= 0 { + panic("wantMultiple <= 0") + } + if written < 0 { + panic("written < 0") + } + leftOver := written % wantMultiple + if leftOver == 0 { + return 0 + } + toAdd := wantMultiple - leftOver + for toAdd < skippableFrameHeader { + toAdd += wantMultiple + } + return int(toAdd) +} + +// skippableFrame will add a skippable frame with a total size of bytes. +// total should be >= skippableFrameHeader and < math.MaxUint32. +func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) { + if total == 0 { + return dst, nil + } + if total < skippableFrameHeader { + return dst, fmt.Errorf("requested skippable frame (%d) < 8", total) + } + if int64(total) > math.MaxUint32 { + return dst, fmt.Errorf("requested skippable frame (%d) > max uint32", total) + } + dst = append(dst, 0x50, 0x2a, 0x4d, 0x18) + f := uint32(total - skippableFrameHeader) + dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)) + start := len(dst) + dst = append(dst, make([]byte, f)...) + _, err := io.ReadFull(r, dst[start:]) + return dst, err +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go new file mode 100644 index 000000000..2f8860a72 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go @@ -0,0 +1,307 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "fmt" + "io" +) + +const ( + tablelogAbsoluteMax = 9 +) + +const ( + /*!MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ + maxMemoryUsage = tablelogAbsoluteMax + 2 + + maxTableLog = maxMemoryUsage - 2 + maxTablesize = 1 << maxTableLog + maxTableMask = (1 << maxTableLog) - 1 + minTablelog = 5 + maxSymbolValue = 255 +) + +// fseDecoder provides temporary storage for compression and decompression. +type fseDecoder struct { + dt [maxTablesize]decSymbol // Decompression table. + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + maxBits uint8 // Maximum number of additional bits + + // used for table creation to avoid allocations. + stateTable [256]uint16 + norm [maxSymbolValue + 1]int16 + preDefined bool +} + +// tableStep returns the next table index. +func tableStep(tableSize uint32) uint32 { + return (tableSize >> 1) + (tableSize >> 3) + 3 +} + +// readNCount will read the symbol distribution so decoding tables can be constructed. +func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error { + var ( + charnum uint16 + previous0 bool + ) + if b.remain() < 4 { + return errors.New("input too small") + } + bitStream := b.Uint32NC() + nbBits := uint((bitStream & 0xF) + minTablelog) // extract tableLog + if nbBits > tablelogAbsoluteMax { + println("Invalid tablelog:", nbBits) + return errors.New("tableLog too large") + } + bitStream >>= 4 + bitCount := uint(4) + + s.actualTableLog = uint8(nbBits) + remaining := int32((1 << nbBits) + 1) + threshold := int32(1 << nbBits) + gotTotal := int32(0) + nbBits++ + + for remaining > 1 && charnum <= maxSymbol { + if previous0 { + //println("prev0") + n0 := charnum + for (bitStream & 0xFFFF) == 0xFFFF { + //println("24 x 0") + n0 += 24 + if r := b.remain(); r > 5 { + b.advance(2) + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> bitCount + } else { + // end of bit stream + bitStream >>= 16 + bitCount += 16 + } + } + //printf("bitstream: %d, 0b%b", bitStream&3, bitStream) + for (bitStream & 3) == 3 { + n0 += 3 + bitStream >>= 2 + bitCount += 2 + } + n0 += uint16(bitStream & 3) + bitCount += 2 + + if n0 > maxSymbolValue { + return errors.New("maxSymbolValue too small") + } + //println("inserting ", n0-charnum, "zeroes from idx", charnum, "ending before", n0) + for charnum < n0 { + s.norm[uint8(charnum)] = 0 + charnum++ + } + + if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { + b.advance(bitCount >> 3) + bitCount &= 7 + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> bitCount + } else { + bitStream >>= 2 + } + } + + max := (2*threshold - 1) - remaining + var count int32 + + if int32(bitStream)&(threshold-1) < max { + count = int32(bitStream) & (threshold - 1) + if debugAsserts && nbBits < 1 { + panic("nbBits underflow") + } + bitCount += nbBits - 1 + } else { + count = int32(bitStream) & (2*threshold - 1) + if count >= threshold { + count -= max + } + bitCount += nbBits + } + + // extra accuracy + count-- + if count < 0 { + // -1 means +1 + remaining += count + gotTotal -= count + } else { + remaining -= count + gotTotal += count + } + s.norm[charnum&0xff] = int16(count) + charnum++ + previous0 = count == 0 + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { + b.advance(bitCount >> 3) + bitCount &= 7 + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> (bitCount & 31) + } else { + bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) + b.off = len(b.b) - 4 + bitStream = b.Uint32() >> (bitCount & 31) + } + } + s.symbolLen = charnum + if s.symbolLen <= 1 { + return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) + } + if s.symbolLen > maxSymbolValue+1 { + return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) + } + if remaining != 1 { + return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) + } + if bitCount > 32 { + return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) + } + if gotTotal != 1<> 3) + return s.buildDtable() +} + +func (s *fseDecoder) mustReadFrom(r io.Reader) { + fatalErr := func(err error) { + if err != nil { + panic(err) + } + } + // dt [maxTablesize]decSymbol // Decompression table. + // symbolLen uint16 // Length of active part of the symbol table. + // actualTableLog uint8 // Selected tablelog. + // maxBits uint8 // Maximum number of additional bits + // // used for table creation to avoid allocations. + // stateTable [256]uint16 + // norm [maxSymbolValue + 1]int16 + // preDefined bool + fatalErr(binary.Read(r, binary.LittleEndian, &s.dt)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.symbolLen)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.actualTableLog)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.maxBits)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.stateTable)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.norm)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.preDefined)) +} + +// decSymbol contains information about a state entry, +// Including the state offset base, the output symbol and +// the number of bits to read for the low part of the destination state. +// Using a composite uint64 is faster than a struct with separate members. +type decSymbol uint64 + +func newDecSymbol(nbits, addBits uint8, newState uint16, baseline uint32) decSymbol { + return decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) +} + +func (d decSymbol) nbBits() uint8 { + return uint8(d) +} + +func (d decSymbol) addBits() uint8 { + return uint8(d >> 8) +} + +func (d decSymbol) newState() uint16 { + return uint16(d >> 16) +} + +func (d decSymbol) baselineInt() int { + return int(d >> 32) +} + +func (d *decSymbol) setNBits(nBits uint8) { + const mask = 0xffffffffffffff00 + *d = (*d & mask) | decSymbol(nBits) +} + +func (d *decSymbol) setAddBits(addBits uint8) { + const mask = 0xffffffffffff00ff + *d = (*d & mask) | (decSymbol(addBits) << 8) +} + +func (d *decSymbol) setNewState(state uint16) { + const mask = 0xffffffff0000ffff + *d = (*d & mask) | decSymbol(state)<<16 +} + +func (d *decSymbol) setExt(addBits uint8, baseline uint32) { + const mask = 0xffff00ff + *d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32) +} + +// decSymbolValue returns the transformed decSymbol for the given symbol. +func decSymbolValue(symb uint8, t []baseOffset) (decSymbol, error) { + if int(symb) >= len(t) { + return 0, fmt.Errorf("rle symbol %d >= max %d", symb, len(t)) + } + lu := t[symb] + return newDecSymbol(0, lu.addBits, 0, lu.baseLine), nil +} + +// setRLE will set the decoder til RLE mode. +func (s *fseDecoder) setRLE(symbol decSymbol) { + s.actualTableLog = 0 + s.maxBits = symbol.addBits() + s.dt[0] = symbol +} + +// transform will transform the decoder table into a table usable for +// decoding without having to apply the transformation while decoding. +// The state will contain the base value and the number of bits to read. +func (s *fseDecoder) transform(t []baseOffset) error { + tableSize := uint16(1 << s.actualTableLog) + s.maxBits = 0 + for i, v := range s.dt[:tableSize] { + add := v.addBits() + if int(add) >= len(t) { + return fmt.Errorf("invalid decoding table entry %d, symbol %d >= max (%d)", i, v.addBits(), len(t)) + } + lu := t[add] + if lu.addBits > s.maxBits { + s.maxBits = lu.addBits + } + v.setExt(lu.addBits, lu.baseLine) + s.dt[i] = v + } + return nil +} + +type fseState struct { + dt []decSymbol + state decSymbol +} + +// Initialize and decodeAsync first state and symbol. +func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) { + s.dt = dt + br.fill() + s.state = dt[br.getBits(tableLog)] +} + +// final returns the current state symbol without decoding the next. +func (s decSymbol) final() (int, uint8) { + return s.baselineInt(), s.addBits() +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go new file mode 100644 index 000000000..d04a829b0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go @@ -0,0 +1,65 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package zstd + +import ( + "fmt" +) + +type buildDtableAsmContext struct { + // inputs + stateTable *uint16 + norm *int16 + dt *uint64 + + // outputs --- set by the procedure in the case of error; + // for interpretation please see the error handling part below + errParam1 uint64 + errParam2 uint64 +} + +// buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable. +// Function returns non-zero exit code on error. +// +//go:noescape +func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int + +// please keep in sync with _generate/gen_fse.go +const ( + errorCorruptedNormalizedCounter = 1 + errorNewStateTooBig = 2 + errorNewStateNoBits = 3 +) + +// buildDtable will build the decoding table. +func (s *fseDecoder) buildDtable() error { + ctx := buildDtableAsmContext{ + stateTable: &s.stateTable[0], + norm: &s.norm[0], + dt: (*uint64)(&s.dt[0]), + } + code := buildDtable_asm(s, &ctx) + + if code != 0 { + switch code { + case errorCorruptedNormalizedCounter: + position := ctx.errParam1 + return fmt.Errorf("corrupted input (position=%d, expected 0)", position) + + case errorNewStateTooBig: + newState := decSymbol(ctx.errParam1) + size := ctx.errParam2 + return fmt.Errorf("newState (%d) outside table size (%d)", newState, size) + + case errorNewStateNoBits: + newState := decSymbol(ctx.errParam1) + oldState := decSymbol(ctx.errParam2) + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, oldState) + + default: + return fmt.Errorf("buildDtable_asm returned unhandled nonzero code = %d", code) + } + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s new file mode 100644 index 000000000..bcde39869 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s @@ -0,0 +1,126 @@ +// Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm + +// func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int +TEXT ·buildDtable_asm(SB), $0-24 + MOVQ ctx+8(FP), CX + MOVQ s+0(FP), DI + + // Load values + MOVBQZX 4098(DI), DX + XORQ AX, AX + BTSQ DX, AX + MOVQ (CX), BX + MOVQ 16(CX), SI + LEAQ -1(AX), R8 + MOVQ 8(CX), CX + MOVWQZX 4096(DI), DI + + // End load values + // Init, lay down lowprob symbols + XORQ R9, R9 + JMP init_main_loop_condition + +init_main_loop: + MOVWQSX (CX)(R9*2), R10 + CMPW R10, $-1 + JNE do_not_update_high_threshold + MOVB R9, 1(SI)(R8*8) + DECQ R8 + MOVQ $0x0000000000000001, R10 + +do_not_update_high_threshold: + MOVW R10, (BX)(R9*2) + INCQ R9 + +init_main_loop_condition: + CMPQ R9, DI + JL init_main_loop + + // Spread symbols + // Calculate table step + MOVQ AX, R9 + SHRQ $0x01, R9 + MOVQ AX, R10 + SHRQ $0x03, R10 + LEAQ 3(R9)(R10*1), R9 + + // Fill add bits values + LEAQ -1(AX), R10 + XORQ R11, R11 + XORQ R12, R12 + JMP spread_main_loop_condition + +spread_main_loop: + XORQ R13, R13 + MOVWQSX (CX)(R12*2), R14 + JMP spread_inner_loop_condition + +spread_inner_loop: + MOVB R12, 1(SI)(R11*8) + +adjust_position: + ADDQ R9, R11 + ANDQ R10, R11 + CMPQ R11, R8 + JG adjust_position + INCQ R13 + +spread_inner_loop_condition: + CMPQ R13, R14 + JL spread_inner_loop + INCQ R12 + +spread_main_loop_condition: + CMPQ R12, DI + JL spread_main_loop + TESTQ R11, R11 + JZ spread_check_ok + MOVQ ctx+8(FP), AX + MOVQ R11, 24(AX) + MOVQ $+1, ret+16(FP) + RET + +spread_check_ok: + // Build Decoding table + XORQ DI, DI + +build_table_main_table: + MOVBQZX 1(SI)(DI*8), CX + MOVWQZX (BX)(CX*2), R8 + LEAQ 1(R8), R9 + MOVW R9, (BX)(CX*2) + MOVQ R8, R9 + BSRQ R9, R9 + MOVQ DX, CX + SUBQ R9, CX + SHLQ CL, R8 + SUBQ AX, R8 + MOVB CL, (SI)(DI*8) + MOVW R8, 2(SI)(DI*8) + CMPQ R8, AX + JLE build_table_check1_ok + MOVQ ctx+8(FP), CX + MOVQ R8, 24(CX) + MOVQ AX, 32(CX) + MOVQ $+2, ret+16(FP) + RET + +build_table_check1_ok: + TESTB CL, CL + JNZ build_table_check2_ok + CMPW R8, DI + JNE build_table_check2_ok + MOVQ ctx+8(FP), AX + MOVQ R8, 24(AX) + MOVQ DI, 32(AX) + MOVQ $+3, ret+16(FP) + RET + +build_table_check2_ok: + INCQ DI + CMPQ DI, AX + JL build_table_main_table + MOVQ $+0, ret+16(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go new file mode 100644 index 000000000..332e51fe4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go @@ -0,0 +1,72 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package zstd + +import ( + "errors" + "fmt" +) + +// buildDtable will build the decoding table. +func (s *fseDecoder) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + symbolNext := s.stateTable[:256] + + // Init, lay down lowprob symbols + { + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.dt[highThreshold].setAddBits(uint8(i)) + highThreshold-- + symbolNext[i] = 1 + } else { + symbolNext[i] = uint16(v) + } + } + } + + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.dt[position].setAddBits(uint8(ss)) + position = (position + step) & tableMask + for position > highThreshold { + // lowprob area + position = (position + step) & tableMask + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.dt[:tableSize] { + symbol := v.addBits() + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.dt[u&maxTableMask].setNBits(nBits) + newState := (nextState << nBits) - tableSize + if newState > tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.dt[u&maxTableMask].setNewState(newState) + } + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go new file mode 100644 index 000000000..ab26326a8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go @@ -0,0 +1,701 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math" +) + +const ( + // For encoding we only support up to + maxEncTableLog = 8 + maxEncTablesize = 1 << maxTableLog + maxEncTableMask = (1 << maxTableLog) - 1 + minEncTablelog = 5 + maxEncSymbolValue = maxMatchLengthSymbol +) + +// Scratch provides temporary storage for compression and decompression. +type fseEncoder struct { + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + ct cTable // Compression tables. + maxCount int // count of the most probable symbol + zeroBits bool // no bits has prob > 50%. + clearCount bool // clear count + useRLE bool // This encoder is for RLE + preDefined bool // This encoder is predefined. + reUsed bool // Set to know when the encoder has been reused. + rleVal uint8 // RLE Symbol + maxBits uint8 // Maximum output bits after transform. + + // TODO: Technically zstd should be fine with 64 bytes. + count [256]uint32 + norm [256]int16 +} + +// cTable contains tables used for compression. +type cTable struct { + tableSymbol []byte + stateTable []uint16 + symbolTT []symbolTransform +} + +// symbolTransform contains the state transform for a symbol. +type symbolTransform struct { + deltaNbBits uint32 + deltaFindState int16 + outBits uint8 +} + +// String prints values as a human readable string. +func (s symbolTransform) String() string { + return fmt.Sprintf("{deltabits: %08x, findstate:%d outbits:%d}", s.deltaNbBits, s.deltaFindState, s.outBits) +} + +// Histogram allows to populate the histogram and skip that step in the compression, +// It otherwise allows to inspect the histogram when compression is done. +// To indicate that you have populated the histogram call HistogramFinished +// with the value of the highest populated symbol, as well as the number of entries +// in the most populated entry. These are accepted at face value. +func (s *fseEncoder) Histogram() *[256]uint32 { + return &s.count +} + +// HistogramFinished can be called to indicate that the histogram has been populated. +// maxSymbol is the index of the highest set symbol of the next data segment. +// maxCount is the number of entries in the most populated entry. +// These are accepted at face value. +func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) { + s.maxCount = maxCount + s.symbolLen = uint16(maxSymbol) + 1 + s.clearCount = maxCount != 0 +} + +// allocCtable will allocate tables needed for compression. +// If existing tables a re big enough, they are simply re-used. +func (s *fseEncoder) allocCtable() { + tableSize := 1 << s.actualTableLog + // get tableSymbol that is big enough. + if cap(s.ct.tableSymbol) < tableSize { + s.ct.tableSymbol = make([]byte, tableSize) + } + s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] + + ctSize := tableSize + if cap(s.ct.stateTable) < ctSize { + s.ct.stateTable = make([]uint16, ctSize) + } + s.ct.stateTable = s.ct.stateTable[:ctSize] + + if cap(s.ct.symbolTT) < 256 { + s.ct.symbolTT = make([]symbolTransform, 256) + } + s.ct.symbolTT = s.ct.symbolTT[:256] +} + +// buildCTable will populate the compression table so it is ready to be used. +func (s *fseEncoder) buildCTable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + var cumul [256]int16 + + s.allocCtable() + tableSymbol := s.ct.tableSymbol[:tableSize] + // symbol start positions + { + cumul[0] = 0 + for ui, v := range s.norm[:s.symbolLen-1] { + u := byte(ui) // one less than reference + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = u + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + } + // Encode last symbol separately to avoid overflowing u + u := int(s.symbolLen - 1) + v := s.norm[s.symbolLen-1] + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = byte(u) + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + if uint32(cumul[s.symbolLen]) != tableSize { + return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) + } + cumul[s.symbolLen] = int16(tableSize) + 1 + } + // Spread symbols + s.zeroBits = false + { + step := tableStep(tableSize) + tableMask := tableSize - 1 + var position uint32 + // if any symbol > largeLimit, we may have 0 bits output. + largeLimit := int16(1 << (s.actualTableLog - 1)) + for ui, v := range s.norm[:s.symbolLen] { + symbol := byte(ui) + if v > largeLimit { + s.zeroBits = true + } + for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + tableSymbol[position] = symbol + position = (position + step) & tableMask + for position > highThreshold { + position = (position + step) & tableMask + } /* Low proba area */ + } + } + + // Check if we have gone through all positions + if position != 0 { + return errors.New("position!=0") + } + } + + // Build table + table := s.ct.stateTable + { + tsi := int(tableSize) + for u, v := range tableSymbol { + // TableU16 : sorted by symbol order; gives next state value + table[cumul[v]] = uint16(tsi + u) + cumul[v]++ + } + } + + // Build Symbol Transformation Table + { + total := int16(0) + symbolTT := s.ct.symbolTT[:s.symbolLen] + tableLog := s.actualTableLog + tl := (uint32(tableLog) << 16) - (1 << tableLog) + for i, v := range s.norm[:s.symbolLen] { + switch v { + case 0: + case -1, 1: + symbolTT[i].deltaNbBits = tl + symbolTT[i].deltaFindState = total - 1 + total++ + default: + maxBitsOut := uint32(tableLog) - highBit(uint32(v-1)) + minStatePlus := uint32(v) << maxBitsOut + symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus + symbolTT[i].deltaFindState = total - v + total += v + } + } + if total != int16(tableSize) { + return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) + } + } + return nil +} + +var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} + +func (s *fseEncoder) setRLE(val byte) { + s.allocCtable() + s.actualTableLog = 0 + s.ct.stateTable = s.ct.stateTable[:1] + s.ct.symbolTT[val] = symbolTransform{ + deltaFindState: 0, + deltaNbBits: 0, + } + if debugEncoder { + println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val]) + } + s.rleVal = val + s.useRLE = true +} + +// setBits will set output bits for the transform. +// if nil is provided, the number of bits is equal to the index. +func (s *fseEncoder) setBits(transform []byte) { + if s.reUsed || s.preDefined { + return + } + if s.useRLE { + if transform == nil { + s.ct.symbolTT[s.rleVal].outBits = s.rleVal + s.maxBits = s.rleVal + return + } + s.maxBits = transform[s.rleVal] + s.ct.symbolTT[s.rleVal].outBits = s.maxBits + return + } + if transform == nil { + for i := range s.ct.symbolTT[:s.symbolLen] { + s.ct.symbolTT[i].outBits = uint8(i) + } + s.maxBits = uint8(s.symbolLen - 1) + return + } + s.maxBits = 0 + for i, v := range transform[:s.symbolLen] { + s.ct.symbolTT[i].outBits = v + if v > s.maxBits { + // We could assume bits always going up, but we play safe. + s.maxBits = v + } + } +} + +// normalizeCount will normalize the count of the symbols so +// the total is equal to the table size. +// If successful, compression tables will also be made ready. +func (s *fseEncoder) normalizeCount(length int) error { + if s.reUsed { + return nil + } + s.optimalTableLog(length) + var ( + tableLog = s.actualTableLog + scale = 62 - uint64(tableLog) + step = (1 << 62) / uint64(length) + vStep = uint64(1) << (scale - 20) + stillToDistribute = int16(1 << tableLog) + largest int + largestP int16 + lowThreshold = (uint32)(length >> tableLog) + ) + if s.maxCount == length { + s.useRLE = true + return nil + } + s.useRLE = false + for i, cnt := range s.count[:s.symbolLen] { + // already handled + // if (count[s] == s.length) return 0; /* rle special case */ + + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + stillToDistribute-- + } else { + proba := (int16)((uint64(cnt) * step) >> scale) + if proba < 8 { + restToBeat := vStep * uint64(rtbTable[proba]) + v := uint64(cnt)*step - (uint64(proba) << scale) + if v > restToBeat { + proba++ + } + } + if proba > largestP { + largestP = proba + largest = i + } + s.norm[i] = proba + stillToDistribute -= proba + } + } + + if -stillToDistribute >= (s.norm[largest] >> 1) { + // corner case, need another normalization method + err := s.normalizeCount2(length) + if err != nil { + return err + } + if debugAsserts { + err = s.validateNorm() + if err != nil { + return err + } + } + return s.buildCTable() + } + s.norm[largest] += stillToDistribute + if debugAsserts { + err := s.validateNorm() + if err != nil { + return err + } + } + return s.buildCTable() +} + +// Secondary normalization method. +// To be used when primary method fails. +func (s *fseEncoder) normalizeCount2(length int) error { + const notYetAssigned = -2 + var ( + distributed uint32 + total = uint32(length) + tableLog = s.actualTableLog + lowThreshold = total >> tableLog + lowOne = (total * 3) >> (tableLog + 1) + ) + for i, cnt := range s.count[:s.symbolLen] { + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + distributed++ + total -= cnt + continue + } + if cnt <= lowOne { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + s.norm[i] = notYetAssigned + } + toDistribute := (1 << tableLog) - distributed + + if (total / toDistribute) > lowOne { + // risk of rounding to zero + lowOne = (total * 3) / (toDistribute * 2) + for i, cnt := range s.count[:s.symbolLen] { + if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + } + toDistribute = (1 << tableLog) - distributed + } + if distributed == uint32(s.symbolLen)+1 { + // all values are pretty poor; + // probably incompressible data (should have already been detected); + // find max, then give all remaining points to max + var maxV int + var maxC uint32 + for i, cnt := range s.count[:s.symbolLen] { + if cnt > maxC { + maxV = i + maxC = cnt + } + } + s.norm[maxV] += int16(toDistribute) + return nil + } + + if total == 0 { + // all of the symbols were low enough for the lowOne or lowThreshold + for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { + if s.norm[i] > 0 { + toDistribute-- + s.norm[i]++ + } + } + return nil + } + + var ( + vStepLog = 62 - uint64(tableLog) + mid = uint64((1 << (vStepLog - 1)) - 1) + rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining + tmpTotal = mid + ) + for i, cnt := range s.count[:s.symbolLen] { + if s.norm[i] == notYetAssigned { + var ( + end = tmpTotal + uint64(cnt)*rStep + sStart = uint32(tmpTotal >> vStepLog) + sEnd = uint32(end >> vStepLog) + weight = sEnd - sStart + ) + if weight < 1 { + return errors.New("weight < 1") + } + s.norm[i] = int16(weight) + tmpTotal = end + } + } + return nil +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *fseEncoder) optimalTableLog(length int) { + tableLog := uint8(maxEncTableLog) + minBitsSrc := highBit(uint32(length)) + 1 + minBitsSymbols := highBit(uint32(s.symbolLen-1)) + 2 + minBits := uint8(minBitsSymbols) + if minBitsSrc < minBitsSymbols { + minBits = uint8(minBitsSrc) + } + + maxBitsSrc := uint8(highBit(uint32(length-1))) - 2 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minEncTablelog { + tableLog = minEncTablelog + } + if tableLog > maxEncTableLog { + tableLog = maxEncTableLog + } + s.actualTableLog = tableLog +} + +// validateNorm validates the normalized histogram table. +func (s *fseEncoder) validateNorm() (err error) { + var total int + for _, v := range s.norm[:s.symbolLen] { + if v >= 0 { + total += int(v) + } else { + total -= int(v) + } + } + defer func() { + if err == nil { + return + } + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) + for i, v := range s.norm[:s.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) + } + }() + if total != (1 << s.actualTableLog) { + return fmt.Errorf("warning: Total == %d != %d", total, 1<> 3) + 3 + 2 + + // Write Table Size + bitStream = uint32(tableLog - minEncTablelog) + bitCount = uint(4) + remaining = int16(tableSize + 1) /* +1 for extra accuracy */ + threshold = int16(tableSize) + nbBits = uint(tableLog + 1) + outP = len(out) + ) + if cap(out) < outP+maxHeaderSize { + out = append(out, make([]byte, maxHeaderSize*3)...) + out = out[:len(out)-maxHeaderSize*3] + } + out = out[:outP+maxHeaderSize] + + // stops at 1 + for remaining > 1 { + if previous0 { + start := charnum + for s.norm[charnum] == 0 { + charnum++ + } + for charnum >= start+24 { + start += 24 + bitStream += uint32(0xFFFF) << bitCount + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + } + for charnum >= start+3 { + start += 3 + bitStream += 3 << bitCount + bitCount += 2 + } + bitStream += uint32(charnum-start) << bitCount + bitCount += 2 + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + count := s.norm[charnum] + charnum++ + max := (2*threshold - 1) - remaining + if count < 0 { + remaining += count + } else { + remaining -= count + } + count++ // +1 for extra accuracy + if count >= threshold { + count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ + } + bitStream += uint32(count) << bitCount + bitCount += nbBits + if count < max { + bitCount-- + } + + previous0 = count == 1 + if remaining < 1 { + return nil, errors.New("internal error: remaining < 1") + } + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + if outP+2 > len(out) { + return nil, fmt.Errorf("internal error: %d > %d, maxheader: %d, sl: %d, tl: %d, normcount: %v", outP+2, len(out), maxHeaderSize, s.symbolLen, int(tableLog), s.norm[:s.symbolLen]) + } + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += int((bitCount + 7) / 8) + + if charnum > s.symbolLen { + return nil, errors.New("internal error: charnum > s.symbolLen") + } + return out[:outP], nil +} + +// Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits) +// note 1 : assume symbolValue is valid (<= maxSymbolValue) +// note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits * +func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 { + minNbBits := s.ct.symbolTT[symbolValue].deltaNbBits >> 16 + threshold := (minNbBits + 1) << 16 + if debugAsserts { + if !(s.actualTableLog < 16) { + panic("!s.actualTableLog < 16") + } + // ensure enough room for renormalization double shift + if !(uint8(accuracyLog) < 31-s.actualTableLog) { + panic("!uint8(accuracyLog) < 31-s.actualTableLog") + } + } + tableSize := uint32(1) << s.actualTableLog + deltaFromThreshold := threshold - (s.ct.symbolTT[symbolValue].deltaNbBits + tableSize) + // linear interpolation (very approximate) + normalizedDeltaFromThreshold := (deltaFromThreshold << accuracyLog) >> s.actualTableLog + bitMultiplier := uint32(1) << accuracyLog + if debugAsserts { + if s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold { + panic("s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold") + } + if normalizedDeltaFromThreshold > bitMultiplier { + panic("normalizedDeltaFromThreshold > bitMultiplier") + } + } + return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold +} + +// Returns the cost in bits of encoding the distribution in count using ctable. +// Histogram should only be up to the last non-zero symbol. +// Returns an -1 if ctable cannot represent all the symbols in count. +func (s *fseEncoder) approxSize(hist []uint32) uint32 { + if int(s.symbolLen) < len(hist) { + // More symbols than we have. + return math.MaxUint32 + } + if s.useRLE { + // We will never reuse RLE encoders. + return math.MaxUint32 + } + const kAccuracyLog = 8 + badCost := (uint32(s.actualTableLog) + 1) << kAccuracyLog + var cost uint32 + for i, v := range hist { + if v == 0 { + continue + } + if s.norm[i] == 0 { + return math.MaxUint32 + } + bitCost := s.bitCost(uint8(i), kAccuracyLog) + if bitCost > badCost { + return math.MaxUint32 + } + cost += v * bitCost + } + return cost >> kAccuracyLog +} + +// maxHeaderSize returns the maximum header size in bits. +// This is not exact size, but we want a penalty for new tables anyway. +func (s *fseEncoder) maxHeaderSize() uint32 { + if s.preDefined { + return 0 + } + if s.useRLE { + return 8 + } + return (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8 +} + +// cState contains the compression state of a stream. +type cState struct { + bw *bitWriter + stateTable []uint16 + state uint16 +} + +// init will initialize the compression state to the first symbol of the stream. +func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) { + c.bw = bw + c.stateTable = ct.stateTable + if len(c.stateTable) == 1 { + // RLE + c.stateTable[0] = uint16(0) + c.state = 0 + return + } + nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 + im := int32((nbBitsOut << 16) - first.deltaNbBits) + lu := (im >> nbBitsOut) + int32(first.deltaFindState) + c.state = c.stateTable[lu] +} + +// flush will write the tablelog to the output and flush the remaining full bytes. +func (c *cState) flush(tableLog uint8) { + c.bw.flush32() + c.bw.addBits16NC(c.state, tableLog) +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go new file mode 100644 index 000000000..474cb77d2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go @@ -0,0 +1,158 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "math" + "sync" +) + +var ( + // fsePredef are the predefined fse tables as defined here: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + // These values are already transformed. + fsePredef [3]fseDecoder + + // fsePredefEnc are the predefined encoder based on fse tables as defined here: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + // These values are already transformed. + fsePredefEnc [3]fseEncoder + + // symbolTableX contain the transformations needed for each type as defined in + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets + symbolTableX [3][]baseOffset + + // maxTableSymbol is the biggest supported symbol for each table type + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets + maxTableSymbol = [3]uint8{tableLiteralLengths: maxLiteralLengthSymbol, tableOffsets: maxOffsetLengthSymbol, tableMatchLengths: maxMatchLengthSymbol} + + // bitTables is the bits table for each table. + bitTables = [3][]byte{tableLiteralLengths: llBitsTable[:], tableOffsets: nil, tableMatchLengths: mlBitsTable[:]} +) + +type tableIndex uint8 + +const ( + // indexes for fsePredef and symbolTableX + tableLiteralLengths tableIndex = 0 + tableOffsets tableIndex = 1 + tableMatchLengths tableIndex = 2 + + maxLiteralLengthSymbol = 35 + maxOffsetLengthSymbol = 30 + maxMatchLengthSymbol = 52 +) + +// baseOffset is used for calculating transformations. +type baseOffset struct { + baseLine uint32 + addBits uint8 +} + +// fillBase will precalculate base offsets with the given bit distributions. +func fillBase(dst []baseOffset, base uint32, bits ...uint8) { + if len(bits) != len(dst) { + panic(fmt.Sprintf("len(dst) (%d) != len(bits) (%d)", len(dst), len(bits))) + } + for i, bit := range bits { + if base > math.MaxInt32 { + panic("invalid decoding table, base overflows int32") + } + + dst[i] = baseOffset{ + baseLine: base, + addBits: bit, + } + base += 1 << bit + } +} + +var predef sync.Once + +func initPredefined() { + predef.Do(func() { + // Literals length codes + tmp := make([]baseOffset, 36) + for i := range tmp[:16] { + tmp[i] = baseOffset{ + baseLine: uint32(i), + addBits: 0, + } + } + fillBase(tmp[16:], 16, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) + symbolTableX[tableLiteralLengths] = tmp + + // Match length codes + tmp = make([]baseOffset, 53) + for i := range tmp[:32] { + tmp[i] = baseOffset{ + // The transformation adds the 3 length. + baseLine: uint32(i) + 3, + addBits: 0, + } + } + fillBase(tmp[32:], 35, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) + symbolTableX[tableMatchLengths] = tmp + + // Offset codes + tmp = make([]baseOffset, maxOffsetBits+1) + tmp[1] = baseOffset{ + baseLine: 1, + addBits: 1, + } + fillBase(tmp[2:], 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30) + symbolTableX[tableOffsets] = tmp + + // Fill predefined tables and transform them. + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + for i := range fsePredef[:] { + f := &fsePredef[i] + switch tableIndex(i) { + case tableLiteralLengths: + // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L243 + f.actualTableLog = 6 + copy(f.norm[:], []int16{4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, + -1, -1, -1, -1}) + f.symbolLen = 36 + case tableOffsets: + // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L281 + f.actualTableLog = 5 + copy(f.norm[:], []int16{ + 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1}) + f.symbolLen = 29 + case tableMatchLengths: + //https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L304 + f.actualTableLog = 6 + copy(f.norm[:], []int16{ + 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, + -1, -1, -1, -1, -1}) + f.symbolLen = 53 + } + if err := f.buildDtable(); err != nil { + panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) + } + if err := f.transform(symbolTableX[i]); err != nil { + panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) + } + f.preDefined = true + + // Create encoder as well + enc := &fsePredefEnc[i] + copy(enc.norm[:], f.norm[:]) + enc.symbolLen = f.symbolLen + enc.actualTableLog = f.actualTableLog + if err := enc.buildCTable(); err != nil { + panic(fmt.Errorf("building encoding table %v: %v", tableIndex(i), err)) + } + enc.setBits(bitTables[i]) + enc.preDefined = true + } + }) +} diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go new file mode 100644 index 000000000..5d73c21eb --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/hash.go @@ -0,0 +1,35 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +const ( + prime3bytes = 506832829 + prime4bytes = 2654435761 + prime5bytes = 889523592379 + prime6bytes = 227718039650203 + prime7bytes = 58295818150454627 + prime8bytes = 0xcf1bbcdcb7a56463 +) + +// hashLen returns a hash of the lowest mls bytes of with length output bits. +// mls must be >=3 and <=8. Any other value will return hash for 4 bytes. +// length should always be < 32. +// Preferably length and mls should be a constant for inlining. +func hashLen(u uint64, length, mls uint8) uint32 { + switch mls { + case 3: + return (uint32(u<<8) * prime3bytes) >> (32 - length) + case 5: + return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length)) + case 6: + return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length)) + case 7: + return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length)) + case 8: + return uint32((u * prime8bytes) >> (64 - length)) + default: + return (uint32(u) * prime4bytes) >> (32 - length) + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go new file mode 100644 index 000000000..09164856d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/history.go @@ -0,0 +1,116 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "github.com/klauspost/compress/huff0" +) + +// history contains the information transferred between blocks. +type history struct { + // Literal decompression + huffTree *huff0.Scratch + + // Sequence decompression + decoders sequenceDecs + recentOffsets [3]int + + // History buffer... + b []byte + + // ignoreBuffer is meant to ignore a number of bytes + // when checking for matches in history + ignoreBuffer int + + windowSize int + allocFrameBuffer int // needed? + error bool + dict *dict +} + +// reset will reset the history to initial state of a frame. +// The history must already have been initialized to the desired size. +func (h *history) reset() { + h.b = h.b[:0] + h.ignoreBuffer = 0 + h.error = false + h.recentOffsets = [3]int{1, 4, 8} + h.decoders.freeDecoders() + h.decoders = sequenceDecs{br: h.decoders.br} + h.freeHuffDecoder() + h.huffTree = nil + h.dict = nil + //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b)) +} + +func (h *history) freeHuffDecoder() { + if h.huffTree != nil { + if h.dict == nil || h.dict.litEnc != h.huffTree { + huffDecoderPool.Put(h.huffTree) + h.huffTree = nil + } + } +} + +func (h *history) setDict(dict *dict) { + if dict == nil { + return + } + h.dict = dict + h.decoders.litLengths = dict.llDec + h.decoders.offsets = dict.ofDec + h.decoders.matchLengths = dict.mlDec + h.decoders.dict = dict.content + h.recentOffsets = dict.offsets + h.huffTree = dict.litEnc +} + +// append bytes to history. +// This function will make sure there is space for it, +// if the buffer has been allocated with enough extra space. +func (h *history) append(b []byte) { + if len(b) >= h.windowSize { + // Discard all history by simply overwriting + h.b = h.b[:h.windowSize] + copy(h.b, b[len(b)-h.windowSize:]) + return + } + + // If there is space, append it. + if len(b) < cap(h.b)-len(h.b) { + h.b = append(h.b, b...) + return + } + + // Move data down so we only have window size left. + // We know we have less than window size in b at this point. + discard := len(b) + len(h.b) - h.windowSize + copy(h.b, h.b[discard:]) + h.b = h.b[:h.windowSize] + copy(h.b[h.windowSize-len(b):], b) +} + +// ensureBlock will ensure there is space for at least one block... +func (h *history) ensureBlock() { + if cap(h.b) < h.allocFrameBuffer { + h.b = make([]byte, 0, h.allocFrameBuffer) + return + } + + avail := cap(h.b) - len(h.b) + if avail >= h.windowSize || avail > maxCompressedBlockSize { + return + } + // Move data down so we only have window size left. + // We know we have less than window size in b at this point. + discard := len(h.b) - h.windowSize + copy(h.b, h.b[discard:]) + h.b = h.b[:h.windowSize] +} + +// append bytes to history without ever discarding anything. +func (h *history) appendKeep(b []byte) { + h.b = append(h.b, b...) +} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt new file mode 100644 index 000000000..24b53065f --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2016 Caleb Spare + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md new file mode 100644 index 000000000..777290d44 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md @@ -0,0 +1,71 @@ +# xxhash + +VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package. + +xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a +high-quality hashing algorithm that is much faster than anything in the Go +standard library. + +This package provides a straightforward API: + +``` +func Sum64(b []byte) uint64 +func Sum64String(s string) uint64 +type Digest struct{ ... } + func New() *Digest +``` + +The `Digest` type implements hash.Hash64. Its key methods are: + +``` +func (*Digest) Write([]byte) (int, error) +func (*Digest) WriteString(string) (int, error) +func (*Digest) Sum64() uint64 +``` + +The package is written with optimized pure Go and also contains even faster +assembly implementations for amd64 and arm64. If desired, the `purego` build tag +opts into using the Go code even on those architectures. + +[xxHash]: http://cyan4973.github.io/xxHash/ + +## Compatibility + +This package is in a module and the latest code is in version 2 of the module. +You need a version of Go with at least "minimal module compatibility" to use +github.com/cespare/xxhash/v2: + +* 1.9.7+ for Go 1.9 +* 1.10.3+ for Go 1.10 +* Go 1.11 or later + +I recommend using the latest release of Go. + +## Benchmarks + +Here are some quick benchmarks comparing the pure-Go and assembly +implementations of Sum64. + +| input size | purego | asm | +| ---------- | --------- | --------- | +| 4 B | 1.3 GB/s | 1.2 GB/s | +| 16 B | 2.9 GB/s | 3.5 GB/s | +| 100 B | 6.9 GB/s | 8.1 GB/s | +| 4 KB | 11.7 GB/s | 16.7 GB/s | +| 10 MB | 12.0 GB/s | 17.3 GB/s | + +These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C +CPU using the following commands under Go 1.19.2: + +``` +benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$') +benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') +``` + +## Projects using this package + +- [InfluxDB](https://github.com/influxdata/influxdb) +- [Prometheus](https://github.com/prometheus/prometheus) +- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) +- [FreeCache](https://github.com/coocood/freecache) +- [FastCache](https://github.com/VictoriaMetrics/fastcache) diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go new file mode 100644 index 000000000..fc40c8200 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go @@ -0,0 +1,230 @@ +// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described +// at http://cyan4973.github.io/xxHash/. +// THIS IS VENDORED: Go to github.com/cespare/xxhash for original package. + +package xxhash + +import ( + "encoding/binary" + "errors" + "math/bits" +) + +const ( + prime1 uint64 = 11400714785074694791 + prime2 uint64 = 14029467366897019727 + prime3 uint64 = 1609587929392839161 + prime4 uint64 = 9650029242287828579 + prime5 uint64 = 2870177450012600261 +) + +// Store the primes in an array as well. +// +// The consts are used when possible in Go code to avoid MOVs but we need a +// contiguous array of the assembly code. +var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} + +// Digest implements hash.Hash64. +type Digest struct { + v1 uint64 + v2 uint64 + v3 uint64 + v4 uint64 + total uint64 + mem [32]byte + n int // how much of mem is used +} + +// New creates a new Digest that computes the 64-bit xxHash algorithm. +func New() *Digest { + var d Digest + d.Reset() + return &d +} + +// Reset clears the Digest's state so that it can be reused. +func (d *Digest) Reset() { + d.v1 = primes[0] + prime2 + d.v2 = prime2 + d.v3 = 0 + d.v4 = -primes[0] + d.total = 0 + d.n = 0 +} + +// Size always returns 8 bytes. +func (d *Digest) Size() int { return 8 } + +// BlockSize always returns 32 bytes. +func (d *Digest) BlockSize() int { return 32 } + +// Write adds more data to d. It always returns len(b), nil. +func (d *Digest) Write(b []byte) (n int, err error) { + n = len(b) + d.total += uint64(n) + + memleft := d.mem[d.n&(len(d.mem)-1):] + + if d.n+n < 32 { + // This new data doesn't even fill the current block. + copy(memleft, b) + d.n += n + return + } + + if d.n > 0 { + // Finish off the partial block. + c := copy(memleft, b) + d.v1 = round(d.v1, u64(d.mem[0:8])) + d.v2 = round(d.v2, u64(d.mem[8:16])) + d.v3 = round(d.v3, u64(d.mem[16:24])) + d.v4 = round(d.v4, u64(d.mem[24:32])) + b = b[c:] + d.n = 0 + } + + if len(b) >= 32 { + // One or more full blocks left. + nw := writeBlocks(d, b) + b = b[nw:] + } + + // Store any remaining partial block. + copy(d.mem[:], b) + d.n = len(b) + + return +} + +// Sum appends the current hash to b and returns the resulting slice. +func (d *Digest) Sum(b []byte) []byte { + s := d.Sum64() + return append( + b, + byte(s>>56), + byte(s>>48), + byte(s>>40), + byte(s>>32), + byte(s>>24), + byte(s>>16), + byte(s>>8), + byte(s), + ) +} + +// Sum64 returns the current hash. +func (d *Digest) Sum64() uint64 { + var h uint64 + + if d.total >= 32 { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = d.v3 + prime5 + } + + h += d.total + + b := d.mem[:d.n&(len(d.mem)-1)] + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 + h = rol23(h)*prime2 + prime3 + b = b[4:] + } + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +const ( + magic = "xxh\x06" + marshaledSize = len(magic) + 8*5 + 32 +) + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (d *Digest) MarshalBinary() ([]byte, error) { + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + b = appendUint64(b, d.v1) + b = appendUint64(b, d.v2) + b = appendUint64(b, d.v3) + b = appendUint64(b, d.v4) + b = appendUint64(b, d.total) + b = append(b, d.mem[:d.n]...) + b = b[:len(b)+len(d.mem)-d.n] + return b, nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (d *Digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("xxhash: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("xxhash: invalid hash state size") + } + b = b[len(magic):] + b, d.v1 = consumeUint64(b) + b, d.v2 = consumeUint64(b) + b, d.v3 = consumeUint64(b) + b, d.v4 = consumeUint64(b) + b, d.total = consumeUint64(b) + copy(d.mem[:], b) + d.n = int(d.total % uint64(len(d.mem))) + return nil +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.LittleEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := u64(b) + return b[8:], x +} + +func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } +func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } + +func round(acc, input uint64) uint64 { + acc += input * prime2 + acc = rol31(acc) + acc *= prime1 + return acc +} + +func mergeRound(acc, val uint64) uint64 { + val = round(0, val) + acc ^= val + acc = acc*prime1 + prime4 + return acc +} + +func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } +func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } +func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } +func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } +func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } +func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } +func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } +func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s new file mode 100644 index 000000000..ddb63aa91 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s @@ -0,0 +1,210 @@ +//go:build !appengine && gc && !purego && !noasm +// +build !appengine +// +build gc +// +build !purego +// +build !noasm + +#include "textflag.h" + +// Registers: +#define h AX +#define d AX +#define p SI // pointer to advance through b +#define n DX +#define end BX // loop end +#define v1 R8 +#define v2 R9 +#define v3 R10 +#define v4 R11 +#define x R12 +#define prime1 R13 +#define prime2 R14 +#define prime4 DI + +#define round(acc, x) \ + IMULQ prime2, x \ + ADDQ x, acc \ + ROLQ $31, acc \ + IMULQ prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + IMULQ prime2, x \ + ROLQ $31, x \ + IMULQ prime1, x + +// mergeRound applies a merge round on the two registers acc and x. +// It assumes that prime1, prime2, and prime4 have been loaded. +#define mergeRound(acc, x) \ + round0(x) \ + XORQ x, acc \ + IMULQ prime1, acc \ + ADDQ prime4, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that there is at least one block +// to process. +#define blockLoop() \ +loop: \ + MOVQ +0(p), x \ + round(v1, x) \ + MOVQ +8(p), x \ + round(v2, x) \ + MOVQ +16(p), x \ + round(v3, x) \ + MOVQ +24(p), x \ + round(v4, x) \ + ADDQ $32, p \ + CMPQ p, end \ + JLE loop + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + // Load fixed primes. + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + MOVQ ·primes+24(SB), prime4 + + // Load slice. + MOVQ b_base+0(FP), p + MOVQ b_len+8(FP), n + LEAQ (p)(n*1), end + + // The first loop limit will be len(b)-32. + SUBQ $32, end + + // Check whether we have at least one block. + CMPQ n, $32 + JLT noBlocks + + // Set up initial state (v1, v2, v3, v4). + MOVQ prime1, v1 + ADDQ prime2, v1 + MOVQ prime2, v2 + XORQ v3, v3 + XORQ v4, v4 + SUBQ prime1, v4 + + blockLoop() + + MOVQ v1, h + ROLQ $1, h + MOVQ v2, x + ROLQ $7, x + ADDQ x, h + MOVQ v3, x + ROLQ $12, x + ADDQ x, h + MOVQ v4, x + ROLQ $18, x + ADDQ x, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) + + JMP afterBlocks + +noBlocks: + MOVQ ·primes+32(SB), h + +afterBlocks: + ADDQ n, h + + ADDQ $24, end + CMPQ p, end + JG try4 + +loop8: + MOVQ (p), x + ADDQ $8, p + round0(x) + XORQ x, h + ROLQ $27, h + IMULQ prime1, h + ADDQ prime4, h + + CMPQ p, end + JLE loop8 + +try4: + ADDQ $4, end + CMPQ p, end + JG try1 + + MOVL (p), x + ADDQ $4, p + IMULQ prime1, x + XORQ x, h + + ROLQ $23, h + IMULQ prime2, h + ADDQ ·primes+16(SB), h + +try1: + ADDQ $4, end + CMPQ p, end + JGE finalize + +loop1: + MOVBQZX (p), x + ADDQ $1, p + IMULQ ·primes+32(SB), x + XORQ x, h + ROLQ $11, h + IMULQ prime1, h + + CMPQ p, end + JL loop1 + +finalize: + MOVQ h, x + SHRQ $33, x + XORQ x, h + IMULQ prime2, h + MOVQ h, x + SHRQ $29, x + XORQ x, h + IMULQ ·primes+16(SB), h + MOVQ h, x + SHRQ $32, x + XORQ x, h + + MOVQ h, ret+24(FP) + RET + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + // Load fixed primes needed for round. + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + + // Load slice. + MOVQ b_base+8(FP), p + MOVQ b_len+16(FP), n + LEAQ (p)(n*1), end + SUBQ $32, end + + // Load vN from d. + MOVQ s+0(FP), d + MOVQ 0(d), v1 + MOVQ 8(d), v2 + MOVQ 16(d), v3 + MOVQ 24(d), v4 + + // We don't need to check the loop condition here; this function is + // always called with at least one block of data to process. + blockLoop() + + // Copy vN back to d. + MOVQ v1, 0(d) + MOVQ v2, 8(d) + MOVQ v3, 16(d) + MOVQ v4, 24(d) + + // The number of bytes written is p minus the old base pointer. + SUBQ b_base+8(FP), p + MOVQ p, ret+32(FP) + + RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s new file mode 100644 index 000000000..17901e080 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s @@ -0,0 +1,184 @@ +//go:build !appengine && gc && !purego && !noasm +// +build !appengine +// +build gc +// +build !purego +// +build !noasm + +#include "textflag.h" + +// Registers: +#define digest R1 +#define h R2 // return value +#define p R3 // input pointer +#define n R4 // input length +#define nblocks R5 // n / 32 +#define prime1 R7 +#define prime2 R8 +#define prime3 R9 +#define prime4 R10 +#define prime5 R11 +#define v1 R12 +#define v2 R13 +#define v3 R14 +#define v4 R15 +#define x1 R20 +#define x2 R21 +#define x3 R22 +#define x4 R23 + +#define round(acc, x) \ + MADD prime2, acc, x, acc \ + ROR $64-31, acc \ + MUL prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + MUL prime2, x \ + ROR $64-31, x \ + MUL prime1, x + +#define mergeRound(acc, x) \ + round0(x) \ + EOR x, acc \ + MADD acc, prime4, prime1, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that n >= 32. +#define blockLoop() \ + LSR $5, n, nblocks \ + PCALIGN $16 \ + loop: \ + LDP.P 16(p), (x1, x2) \ + LDP.P 16(p), (x3, x4) \ + round(v1, x1) \ + round(v2, x2) \ + round(v3, x3) \ + round(v4, x4) \ + SUB $1, nblocks \ + CBNZ nblocks, loop + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + LDP b_base+0(FP), (p, n) + + LDP ·primes+0(SB), (prime1, prime2) + LDP ·primes+16(SB), (prime3, prime4) + MOVD ·primes+32(SB), prime5 + + CMP $32, n + CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 } + BLT afterLoop + + ADD prime1, prime2, v1 + MOVD prime2, v2 + MOVD $0, v3 + NEG prime1, v4 + + blockLoop() + + ROR $64-1, v1, x1 + ROR $64-7, v2, x2 + ADD x1, x2 + ROR $64-12, v3, x3 + ROR $64-18, v4, x4 + ADD x3, x4 + ADD x2, x4, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) + +afterLoop: + ADD n, h + + TBZ $4, n, try8 + LDP.P 16(p), (x1, x2) + + round0(x1) + + // NOTE: here and below, sequencing the EOR after the ROR (using a + // rotated register) is worth a small but measurable speedup for small + // inputs. + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + + round0(x2) + ROR $64-27, h + EOR x2 @> 64-27, h, h + MADD h, prime4, prime1, h + +try8: + TBZ $3, n, try4 + MOVD.P 8(p), x1 + + round0(x1) + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + +try4: + TBZ $2, n, try2 + MOVWU.P 4(p), x2 + + MUL prime1, x2 + ROR $64-23, h + EOR x2 @> 64-23, h, h + MADD h, prime3, prime2, h + +try2: + TBZ $1, n, try1 + MOVHU.P 2(p), x3 + AND $255, x3, x1 + LSR $8, x3, x2 + + MUL prime5, x1 + ROR $64-11, h + EOR x1 @> 64-11, h, h + MUL prime1, h + + MUL prime5, x2 + ROR $64-11, h + EOR x2 @> 64-11, h, h + MUL prime1, h + +try1: + TBZ $0, n, finalize + MOVBU (p), x4 + + MUL prime5, x4 + ROR $64-11, h + EOR x4 @> 64-11, h, h + MUL prime1, h + +finalize: + EOR h >> 33, h + MUL prime2, h + EOR h >> 29, h + MUL prime3, h + EOR h >> 32, h + + MOVD h, ret+24(FP) + RET + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + LDP ·primes+0(SB), (prime1, prime2) + + // Load state. Assume v[1-4] are stored contiguously. + MOVD d+0(FP), digest + LDP 0(digest), (v1, v2) + LDP 16(digest), (v3, v4) + + LDP b_base+8(FP), (p, n) + + blockLoop() + + // Store updated state. + STP (v1, v2), 0(digest) + STP (v3, v4), 16(digest) + + BIC $31, n + MOVD n, ret+32(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go new file mode 100644 index 000000000..d4221edf4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go @@ -0,0 +1,16 @@ +//go:build (amd64 || arm64) && !appengine && gc && !purego && !noasm +// +build amd64 arm64 +// +build !appengine +// +build gc +// +build !purego +// +build !noasm + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +// +//go:noescape +func Sum64(b []byte) uint64 + +//go:noescape +func writeBlocks(s *Digest, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go new file mode 100644 index 000000000..0be16cefc --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go @@ -0,0 +1,76 @@ +//go:build (!amd64 && !arm64) || appengine || !gc || purego || noasm +// +build !amd64,!arm64 appengine !gc purego noasm + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +func Sum64(b []byte) uint64 { + // A simpler version would be + // d := New() + // d.Write(b) + // return d.Sum64() + // but this is faster, particularly for small inputs. + + n := len(b) + var h uint64 + + if n >= 32 { + v1 := primes[0] + prime2 + v2 := prime2 + v3 := uint64(0) + v4 := -primes[0] + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = prime5 + } + + h += uint64(n) + + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 + h = rol23(h)*prime2 + prime3 + b = b[4:] + } + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +func writeBlocks(d *Digest, b []byte) int { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + n := len(b) + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 + return n - len(b) +} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go new file mode 100644 index 000000000..6f3b0cb10 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go @@ -0,0 +1,11 @@ +package xxhash + +// Sum64String computes the 64-bit xxHash digest of s. +func Sum64String(s string) uint64 { + return Sum64([]byte(s)) +} + +// WriteString adds more data to d. It always returns len(s), nil. +func (d *Digest) WriteString(s string) (n int, err error) { + return d.Write([]byte(s)) +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go new file mode 100644 index 000000000..27fdf90fb --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -0,0 +1,505 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "io" +) + +type seq struct { + litLen uint32 + matchLen uint32 + offset uint32 + + // Codes are stored here for the encoder + // so they only have to be looked up once. + llCode, mlCode, ofCode uint8 +} + +type seqVals struct { + ll, ml, mo int +} + +func (s seq) String() string { + if s.offset <= 3 { + if s.offset == 0 { + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset: INVALID (0)") + } + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset, " (repeat)") + } + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset-3, " (new)") +} + +type seqCompMode uint8 + +const ( + compModePredefined seqCompMode = iota + compModeRLE + compModeFSE + compModeRepeat +) + +type sequenceDec struct { + // decoder keeps track of the current state and updates it from the bitstream. + fse *fseDecoder + state fseState + repeat bool +} + +// init the state of the decoder with input from stream. +func (s *sequenceDec) init(br *bitReader) error { + if s.fse == nil { + return errors.New("sequence decoder not defined") + } + s.state.init(br, s.fse.actualTableLog, s.fse.dt[:1< cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with hist %d, dict %d, literals: %d into %d bytes\n", len(seqs), len(hist), len(s.dict), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Copy from dictionary... + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + if len(s.dict) == 0 { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // we may be in dictionary. + dictO := len(s.dict) - (seq.mo - (t + len(hist))) + if dictO < 0 || dictO >= len(s.dict) { + return fmt.Errorf("match offset (%d) bigger than current history+dict (%d)", seq.mo, t+len(hist)+len(s.dict)) + } + end := dictO + seq.ml + if end > len(s.dict) { + n := len(s.dict) - dictO + copy(out[t:], s.dict[dictO:]) + t += n + seq.ml -= n + } else { + copy(out[t:], s.dict[dictO:end]) + t += end - dictO + continue + } + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + // We must be in current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + continue + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} + +// decode sequences from the stream with the provided history. +func (s *sequenceDecs) decodeSync(hist []byte) error { + supported, err := s.decodeSyncSimple(hist) + if supported { + return err + } + + br := s.br + seqs := s.nSeqs + startSize := len(s.out) + // Grab full sizes tables, to avoid bounds checks. + llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] + llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + out := s.out + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + for i := seqs - 1; i >= 0; i-- { + if br.overread() { + printf("reading sequence %d, exceeded available data\n", seqs-i) + return io.ErrUnexpectedEOF + } + var ll, mo, ml int + if br.off > 4+((maxOffsetBits+16+16)>>3) { + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("WARNING: temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } + br.fillFast() + } else { + ll, mo, ml = s.next(br, llState, mlState, ofState) + br.fill() + } + + if debugSequences { + println("Seq", seqs-i-1, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) + } + + if ll > len(s.literals) { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals)) + } + size := ll + ml + len(out) + if size-startSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + if size > cap(out) { + // Not enough size, which can happen under high volume block streaming conditions + // but could be if destination slice is too small for sync operations. + // over-allocating here can create a large amount of GC pressure so we try to keep + // it as contained as possible + used := len(out) - startSize + addBytes := 256 + ll + ml + used>>2 + // Clamp to max block size. + if used+addBytes > maxBlockSize { + addBytes = maxBlockSize - used + } + out = append(out, make([]byte, addBytes)...) + out = out[:len(out)-addBytes] + } + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + } + + // Add literals + out = append(out, s.literals[:ll]...) + s.literals = s.literals[ll:] + + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + + if mo > len(out)+len(hist) || mo > s.windowSize { + if len(s.dict) == 0 { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) + } + + // we may be in dictionary. + dictO := len(s.dict) - (mo - (len(out) + len(hist))) + if dictO < 0 || dictO >= len(s.dict) { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) + } + end := dictO + ml + if end > len(s.dict) { + out = append(out, s.dict[dictO:]...) + ml -= len(s.dict) - dictO + } else { + out = append(out, s.dict[dictO:end]...) + mo = 0 + ml = 0 + } + } + + // Copy from history. + // TODO: Blocks without history could be made to ignore this completely. + if v := mo - len(out); v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if ml > v { + // Some goes into current block. + // Copy remainder of history + out = append(out, hist[start:]...) + ml -= v + } else { + out = append(out, hist[start:start+ml]...) + ml = 0 + } + } + // We must be in current buffer now + if ml > 0 { + start := len(out) - mo + if ml <= len(out)-start { + // No overlap + out = append(out, out[start:start+ml]...) + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + out = out[:len(out)+ml] + src := out[start : start+ml] + // Destination is the space we just added. + dst := out[len(out)-ml:] + dst = dst[:len(src)] + for i := range src { + dst[i] = src[i] + } + } + } + if i == 0 { + // This is the last sequence, so we shouldn't update state. + break + } + + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] + } else { + bits := br.get32BitsFast(nBits) + + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] + } + } + + if size := len(s.literals) + len(out) - startSize; size > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + + // Add final literals + s.out = append(out, s.literals...) + return br.close() +} + +var bitMask [16]uint16 + +func init() { + for i := range bitMask[:] { + bitMask[i] = uint16((1 << uint(i)) - 1) + } +} + +func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { + // Final will not read from stream. + ll, llB := llState.final() + ml, mlB := mlState.final() + mo, moB := ofState.final() + + // extra bits are stored in reverse order. + br.fill() + if s.maxBits <= 32 { + mo += br.getBits(moB) + ml += br.getBits(mlB) + ll += br.getBits(llB) + } else { + mo += br.getBits(moB) + br.fill() + // matchlength+literal length, max 32 bits + ml += br.getBits(mlB) + ll += br.getBits(llB) + + } + mo = s.adjustOffset(mo, ll, moB) + return +} + +func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int { + if offsetB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = offset + return offset + } + + if litLen == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + offset++ + } + + if offset == 0 { + return s.prevOffset[0] + } + var temp int + if offset == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[offset] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("temp was 0") + temp = 1 + } + + if offset != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + return temp +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go new file mode 100644 index 000000000..387a30e99 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -0,0 +1,378 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package zstd + +import ( + "fmt" + + "github.com/klauspost/compress/internal/cpuinfo" +) + +type decodeSyncAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + litRemain int + out []byte + outPosition int + literals []byte + litPosition int + history []byte + windowSize int + ll int // set on error (not for all errors, please refer to _generate/gen.go) + ml int // set on error (not for all errors, please refer to _generate/gen.go) + mo int // set on error (not for all errors, please refer to _generate/gen.go) +} + +// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer. +// +//go:noescape +func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer. +// +//go:noescape +func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// decode sequences from the stream with the provided history but without a dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + if len(s.dict) > 0 { + return false, nil + } + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize { + return false, nil + } + + // FIXME: Using unsafe memory copies leads to rare, random crashes + // with fuzz testing. It is therefore disabled for now. + const useSafe = true + /* + useSafe := false + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc { + useSafe = true + } + if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) { + useSafe = true + } + if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { + useSafe = true + } + */ + + br := s.br + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + ctx := decodeSyncAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + iteration: s.nSeqs - 1, + litRemain: len(s.literals), + out: s.out, + outPosition: len(s.out), + literals: s.literals, + windowSize: s.windowSize, + history: hist, + } + + s.seqSize = 0 + startSize := len(s.out) + + var errCode int + if cpuinfo.HasBMI2() { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_bmi2(s, br, &ctx) + } + } else { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_amd64(s, br, &ctx) + } + } + switch errCode { + case noError: + break + + case errorMatchLenOfsMismatch: + return true, fmt.Errorf("zero matchoff and matchlen (%d) > 0", ctx.ml) + + case errorMatchLenTooBig: + return true, fmt.Errorf("match len (%d) bigger than max allowed length", ctx.ml) + + case errorMatchOffTooBig: + return true, fmt.Errorf("match offset (%d) bigger than current history (%d)", + ctx.mo, ctx.outPosition+len(hist)-startSize) + + case errorNotEnoughLiterals: + return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", + ctx.ll, ctx.litRemain+ctx.ll) + + case errorNotEnoughSpace: + size := ctx.outPosition + ctx.ll + ctx.ml + if debugDecoder { + println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize) + } + return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + + default: + return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + return true, err + } + + s.literals = s.literals[ctx.litPosition:] + t := ctx.outPosition + s.out = s.out[:t] + + // Add final literals + s.out = append(s.out, s.literals...) + if debugDecoder { + t += len(s.literals) + if t != len(s.out) { + panic(fmt.Errorf("length mismatch, want %d, got %d", len(s.out), t)) + } + } + + return true, nil +} + +// -------------------------------------------------------------------------------- + +type decodeAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + seqs []seqVals + litRemain int +} + +const noError = 0 + +// error reported when mo == 0 && ml > 0 +const errorMatchLenOfsMismatch = 1 + +// error reported when ml > maxMatchLen +const errorMatchLenTooBig = 2 + +// error reported when mo > available history or mo > s.windowSize +const errorMatchOffTooBig = 3 + +// error reported when the sum of literal lengths exeeceds the literal buffer size +const errorNotEnoughLiterals = 4 + +// error reported when capacity of `out` is too small +const errorNotEnoughSpace = 5 + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + ctx := decodeAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + seqs: seqs, + iteration: len(seqs) - 1, + litRemain: len(s.literals), + } + + s.seqSize = 0 + lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56 + var errCode int + if cpuinfo.HasBMI2() { + if lte56bits { + errCode = sequenceDecs_decode_56_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_bmi2(s, br, &ctx) + } + } else { + if lte56bits { + errCode = sequenceDecs_decode_56_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_amd64(s, br, &ctx) + } + } + if errCode != 0 { + i := len(seqs) - ctx.iteration - 1 + switch errCode { + case errorMatchLenOfsMismatch: + ml := ctx.seqs[i].ml + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + + case errorMatchLenTooBig: + ml := ctx.seqs[i].ml + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + + case errorNotEnoughLiterals: + ll := ctx.seqs[i].ll + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll) + } + + return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode) + } + + if ctx.litRemain < 0 { + return fmt.Errorf("literal count is too big: total available %d, total requested %d", + len(s.literals), len(s.literals)-ctx.litRemain) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// -------------------------------------------------------------------------------- + +type executeAsmContext struct { + seqs []seqVals + seqIndex int + out []byte + history []byte + literals []byte + outPosition int + litPosition int + windowSize int +} + +// sequenceDecs_executeSimple_amd64 implements the main loop of sequenceDecs.executeSimple in x86 asm. +// +// Returns false if a match offset is too big. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool + +// Same as above, but with safe memcopies +// +//go:noescape +func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool + +// executeSimple handles cases when dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize+compressedBlockOverAlloc > cap(s.out) { + addBytes := s.seqSize + len(s.out) + compressedBlockOverAlloc + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + ctx := executeAsmContext{ + seqs: seqs, + seqIndex: 0, + out: out, + history: hist, + outPosition: t, + litPosition: 0, + literals: s.literals, + windowSize: s.windowSize, + } + var ok bool + if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { + ok = sequenceDecs_executeSimple_safe_amd64(&ctx) + } else { + ok = sequenceDecs_executeSimple_amd64(&ctx) + } + if !ok { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", + seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist)) + } + s.literals = s.literals[ctx.litPosition:] + t = ctx.outPosition + + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s new file mode 100644 index 000000000..b94993a07 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -0,0 +1,4079 @@ +// Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm + +// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_amd64(SB), $8-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_end + +sequenceDecs_decode_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_byte_by_byte + +sequenceDecs_decode_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_of_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_of_update_zero: + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_ml_update_zero: + MOVQ AX, 8(R10) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_2_end + +sequenceDecs_decode_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_2_end + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte + +sequenceDecs_decode_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_ll_update_zero: + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRQ $0x10, DI + MOVWQZX DI, DI + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_amd64_after_adjust + +sequenceDecs_decode_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_amd64_adjust_offset_nonzero + +sequenceDecs_decode_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_amd64_after_adjust + +sequenceDecs_decode_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_amd64_adjust_zero + JEQ sequenceDecs_decode_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_amd64_adjust_three + JMP sequenceDecs_decode_amd64_adjust_two + +sequenceDecs_decode_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_amd64_after_adjust: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + +// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_56_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_56_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_56_amd64_fill_end + +sequenceDecs_decode_56_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_56_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decode_56_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte + +sequenceDecs_decode_56_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_of_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_of_update_zero: + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_ml_update_zero: + MOVQ AX, 8(R10) + + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_ll_update_zero: + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRQ $0x10, DI + MOVWQZX DI, DI + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_56_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_56_amd64_after_adjust + +sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_56_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_amd64_adjust_offset_nonzero + +sequenceDecs_decode_56_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_56_amd64_after_adjust + +sequenceDecs_decode_56_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_amd64_adjust_zero + JEQ sequenceDecs_decode_56_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_amd64_adjust_three + JMP sequenceDecs_decode_56_amd64_adjust_two + +sequenceDecs_decode_56_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_56_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_56_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_56_amd64_after_adjust: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_56_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_56_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + +// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_end + +sequenceDecs_decode_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_byte_by_byte + +sequenceDecs_decode_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_2_end + +sequenceDecs_decode_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_2_end + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte + +sequenceDecs_decode_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_bmi2_skip_update + LEAQ (SI)(DI*1), R14 + ADDQ R8, R14 + MOVBQZX R14, R14 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + + // Update Offset State + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R15, CX + MOVQ $0x00001010, R14 + BEXTRQ R14, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decode_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_bmi2_after_adjust + +sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_bmi2_after_adjust + +sequenceDecs_decode_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_bmi2_adjust_zero + JEQ sequenceDecs_decode_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_bmi2_adjust_three + JMP sequenceDecs_decode_bmi2_adjust_two + +sequenceDecs_decode_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_bmi2_after_adjust: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + +// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_56_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_56_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_56_bmi2_fill_end + +sequenceDecs_decode_56_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_56_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decode_56_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte + +sequenceDecs_decode_56_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_bmi2_skip_update + LEAQ (SI)(DI*1), R14 + ADDQ R8, R14 + MOVBQZX R14, R14 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + + // Update Offset State + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R15, CX + MOVQ $0x00001010, R14 + BEXTRQ R14, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decode_56_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_56_bmi2_after_adjust + +sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_56_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_56_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_56_bmi2_after_adjust + +sequenceDecs_decode_56_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_bmi2_adjust_zero + JEQ sequenceDecs_decode_56_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_bmi2_adjust_three + JMP sequenceDecs_decode_56_bmi2_adjust_two + +sequenceDecs_decode_56_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_56_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_56_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_56_bmi2_after_adjust: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_56_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_56_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + +// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool +// Requires: SSE +TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9 + MOVQ ctx+0(FP), R10 + MOVQ 8(R10), CX + TESTQ CX, CX + JZ empty_seqs + MOVQ (R10), AX + MOVQ 24(R10), DX + MOVQ 32(R10), BX + MOVQ 80(R10), SI + MOVQ 104(R10), DI + MOVQ 120(R10), R8 + MOVQ 56(R10), R9 + MOVQ 64(R10), R10 + ADDQ R10, R9 + + // seqsBase += 24 * seqIndex + LEAQ (DX)(DX*2), R11 + SHLQ $0x03, R11 + ADDQ R11, AX + + // outBase += outPosition + ADDQ DI, BX + +main_loop: + MOVQ (AX), R11 + MOVQ 16(AX), R12 + MOVQ 8(AX), R13 + + // Copy literals + TESTQ R11, R11 + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (SI)(R14*1), X0 + MOVUPS X0, (BX)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, R11 + JB copy_1 + ADDQ R11, SI + ADDQ R11, BX + ADDQ R11, DI + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + LEAQ (DI)(R10*1), R11 + CMPQ R12, R11 + JG error_match_off_too_big + CMPQ R12, R8 + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, R11 + SUBQ DI, R11 + JLS copy_match + MOVQ R9, R14 + SUBQ R11, R14 + CMPQ R13, R11 + JG copy_all_from_history + MOVQ R13, R11 + SUBQ $0x10, R11 + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R11 + JAE copy_4_loop + LEAQ 16(R14)(R11*1), R14 + LEAQ 16(BX)(R11*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), R11 + MOVB 2(R14), R12 + MOVW R11, (BX) + MOVB R12, 2(BX) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), R11 + MOVL -4(R14)(R13*1), R12 + MOVL R11, (BX) + MOVL R12, -4(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), R11 + MOVQ -8(R14)(R13*1), R12 + MOVQ R11, (BX) + MOVQ R12, -8(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + +copy_4_end: + ADDQ R13, DI + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + JMP loop_finished + +copy_all_from_history: + MOVQ R11, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(BX)(R15*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_5_end + +copy_5_small: + CMPQ R11, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ R11, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(R11*1), BP + MOVB R15, (BX) + MOVB BP, -1(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (BX) + MOVB BP, 2(BX) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(R11*1), BP + MOVL R15, (BX) + MOVL BP, -4(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(R11*1), BP + MOVQ R15, (BX) + MOVQ BP, -8(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + +copy_5_end: + ADDQ R11, DI + SUBQ R11, R13 + + // Copy match from the current buffer +copy_match: + MOVQ BX, R11 + SUBQ R12, R11 + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, DI + MOVQ BX, R12 + ADDQ R13, BX + +copy_2: + MOVUPS (R11), X0 + MOVUPS X0, (R12) + ADDQ $0x10, R11 + ADDQ $0x10, R12 + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, DI + +copy_slow_3: + MOVB (R11), R12 + MOVB R12, (BX) + INCQ R11 + INCQ BX + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + +loop_finished: + // Return value + MOVB $0x01, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +error_match_off_too_big: + // Return value + MOVB $0x00, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +empty_seqs: + // Return value + MOVB $0x01, ret+8(FP) + RET + +// func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool +// Requires: SSE +TEXT ·sequenceDecs_executeSimple_safe_amd64(SB), $8-9 + MOVQ ctx+0(FP), R10 + MOVQ 8(R10), CX + TESTQ CX, CX + JZ empty_seqs + MOVQ (R10), AX + MOVQ 24(R10), DX + MOVQ 32(R10), BX + MOVQ 80(R10), SI + MOVQ 104(R10), DI + MOVQ 120(R10), R8 + MOVQ 56(R10), R9 + MOVQ 64(R10), R10 + ADDQ R10, R9 + + // seqsBase += 24 * seqIndex + LEAQ (DX)(DX*2), R11 + SHLQ $0x03, R11 + ADDQ R11, AX + + // outBase += outPosition + ADDQ DI, BX + +main_loop: + MOVQ (AX), R11 + MOVQ 16(AX), R12 + MOVQ 8(AX), R13 + + // Copy literals + TESTQ R11, R11 + JZ check_offset + MOVQ R11, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (SI), X0 + MOVUPS X0, (BX) + ADDQ $0x10, SI + ADDQ $0x10, BX + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(SI)(R14*1), SI + LEAQ 16(BX)(R14*1), BX + MOVUPS -16(SI), X0 + MOVUPS X0, -16(BX) + JMP copy_1_end + +copy_1_small: + CMPQ R11, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ R11, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (SI), R14 + MOVB -1(SI)(R11*1), R15 + MOVB R14, (BX) + MOVB R15, -1(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_3: + MOVW (SI), R14 + MOVB 2(SI), R15 + MOVW R14, (BX) + MOVB R15, 2(BX) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_4through7: + MOVL (SI), R14 + MOVL -4(SI)(R11*1), R15 + MOVL R14, (BX) + MOVL R15, -4(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (SI), R14 + MOVQ -8(SI)(R11*1), R15 + MOVQ R14, (BX) + MOVQ R15, -8(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + +copy_1_end: + ADDQ R11, DI + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + LEAQ (DI)(R10*1), R11 + CMPQ R12, R11 + JG error_match_off_too_big + CMPQ R12, R8 + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, R11 + SUBQ DI, R11 + JLS copy_match + MOVQ R9, R14 + SUBQ R11, R14 + CMPQ R13, R11 + JG copy_all_from_history + MOVQ R13, R11 + SUBQ $0x10, R11 + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R11 + JAE copy_4_loop + LEAQ 16(R14)(R11*1), R14 + LEAQ 16(BX)(R11*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), R11 + MOVB 2(R14), R12 + MOVW R11, (BX) + MOVB R12, 2(BX) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), R11 + MOVL -4(R14)(R13*1), R12 + MOVL R11, (BX) + MOVL R12, -4(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), R11 + MOVQ -8(R14)(R13*1), R12 + MOVQ R11, (BX) + MOVQ R12, -8(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + +copy_4_end: + ADDQ R13, DI + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + JMP loop_finished + +copy_all_from_history: + MOVQ R11, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(BX)(R15*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_5_end + +copy_5_small: + CMPQ R11, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ R11, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(R11*1), BP + MOVB R15, (BX) + MOVB BP, -1(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (BX) + MOVB BP, 2(BX) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(R11*1), BP + MOVL R15, (BX) + MOVL BP, -4(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(R11*1), BP + MOVQ R15, (BX) + MOVQ BP, -8(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + +copy_5_end: + ADDQ R11, DI + SUBQ R11, R13 + + // Copy match from the current buffer +copy_match: + MOVQ BX, R11 + SUBQ R12, R11 + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, DI + MOVQ R13, R12 + SUBQ $0x10, R12 + JB copy_2_small + +copy_2_loop: + MOVUPS (R11), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R11 + ADDQ $0x10, BX + SUBQ $0x10, R12 + JAE copy_2_loop + LEAQ 16(R11)(R12*1), R11 + LEAQ 16(BX)(R12*1), BX + MOVUPS -16(R11), X0 + MOVUPS X0, -16(BX) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (R11), R12 + MOVB -1(R11)(R13*1), R14 + MOVB R12, (BX) + MOVB R14, -1(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_3: + MOVW (R11), R12 + MOVB 2(R11), R14 + MOVW R12, (BX) + MOVB R14, 2(BX) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_4through7: + MOVL (R11), R12 + MOVL -4(R11)(R13*1), R14 + MOVL R12, (BX) + MOVL R14, -4(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (R11), R12 + MOVQ -8(R11)(R13*1), R14 + MOVQ R12, (BX) + MOVQ R14, -8(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, DI + +copy_slow_3: + MOVB (R11), R12 + MOVB R12, (BX) + INCQ R11 + INCQ BX + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + +loop_finished: + // Return value + MOVB $0x01, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +error_match_off_too_big: + // Return value + MOVB $0x00, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +empty_seqs: + // Return value + MOVB $0x01, ret+8(FP) + RET + +// func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + XORQ CX, CX + MOVQ CX, 8(SP) + MOVQ CX, 16(SP) + MOVQ CX, 24(SP) + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_end + +sequenceDecs_decodeSync_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_of_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_of_update_zero: + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_ml_update_zero: + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_2_end + +sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_2_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_ll_update_zero: + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRQ $0x10, DI + MOVWQZX DI, DI + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_amd64_after_adjust + +sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_amd64_after_adjust + +sequenceDecs_decodeSync_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + ADDQ 144(CX)(AX*8), R14 + JNZ sequenceDecs_decodeSync_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_amd64_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (R11)(R14*1), X0 + MOVUPS X0, (R10)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, AX + JB copy_1 + ADDQ AX, R11 + ADDQ AX, R10 + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JG copy_all_from_history + MOVQ R13, AX + SUBQ $0x10, AX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, AX + JAE copy_4_loop + LEAQ 16(R14)(AX*1), R14 + LEAQ 16(R10)(AX*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), AX + MOVB 2(R14), CL + MOVW AX, (R10) + MOVB CL, 2(R10) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), AX + MOVL -4(R14)(R13*1), CX + MOVL AX, (R10) + MOVL CX, -4(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), AX + MOVQ -8(R14)(R13*1), CX + MOVQ AX, (R10) + MOVQ CX, -8(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + +copy_4_end: + ADDQ R13, R12 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ AX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R10)(R15*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_5_end + +copy_5_small: + CMPQ AX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ AX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(AX*1), BP + MOVB R15, (R10) + MOVB BP, -1(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R10) + MOVB BP, 2(R10) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(AX*1), BP + MOVL R15, (R10) + MOVL BP, -4(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(AX*1), BP + MOVQ R15, (R10) + MOVQ BP, -8(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + +copy_5_end: + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R12 + MOVQ R10, CX + ADDQ R13, R10 + +copy_2: + MOVUPS (AX), X0 + MOVUPS X0, (CX) + ADDQ $0x10, AX + ADDQ $0x10, CX + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R12 + +copy_slow_3: + MOVB (AX), CL + MOVB CL, (R10) + INCQ AX + INCQ R10 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + XORQ R9, R9 + MOVQ R9, 8(SP) + MOVQ R9, 16(SP) + MOVQ R9, 24(SP) + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_end + +sequenceDecs_decodeSync_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_2_end + +sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_2_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_bmi2_skip_update + LEAQ (SI)(DI*1), R13 + ADDQ R8, R13 + MOVBQZX R13, R13 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + + // Update Offset State + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R14, CX + MOVQ $0x00001010, R13 + BEXTRQ R13, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decodeSync_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_bmi2_after_adjust + +sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_bmi2_after_adjust + +sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + ADDQ 144(CX)(R12*8), R14 + JNZ sequenceDecs_decodeSync_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_bmi2_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (R10)(R14*1), X0 + MOVUPS X0, (R9)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, CX + JB copy_1 + ADDQ CX, R10 + ADDQ CX, R9 + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JG copy_all_from_history + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, CX + JAE copy_4_loop + LEAQ 16(R14)(CX*1), R14 + LEAQ 16(R9)(CX*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), CX + MOVB 2(R14), R12 + MOVW CX, (R9) + MOVB R12, 2(R9) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), CX + MOVL -4(R14)(R13*1), R12 + MOVL CX, (R9) + MOVL R12, -4(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), CX + MOVQ -8(R14)(R13*1), R12 + MOVQ CX, (R9) + MOVQ R12, -8(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + +copy_4_end: + ADDQ R13, R11 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ CX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R9)(R15*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_5_end + +copy_5_small: + CMPQ CX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ CX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(CX*1), BP + MOVB R15, (R9) + MOVB BP, -1(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R9) + MOVB BP, 2(R9) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(CX*1), BP + MOVL R15, (R9) + MOVL BP, -4(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(CX*1), BP + MOVQ R15, (R9) + MOVQ BP, -8(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + +copy_5_end: + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R11 + MOVQ R9, R12 + ADDQ R13, R9 + +copy_2: + MOVUPS (CX), X0 + MOVUPS X0, (R12) + ADDQ $0x10, CX + ADDQ $0x10, R12 + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R11 + +copy_slow_3: + MOVB (CX), R12 + MOVB R12, (R9) + INCQ CX + INCQ R9 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + XORQ CX, CX + MOVQ CX, 8(SP) + MOVQ CX, 16(SP) + MOVQ CX, 24(SP) + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_safe_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_end + +sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_of_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_of_update_zero: + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_ml_update_zero: + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_end + +sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_ll_update_zero: + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRQ $0x10, DI + MOVWQZX DI, DI + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_safe_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_amd64_after_adjust + +sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_amd64_after_adjust + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + ADDQ 144(CX)(AX*8), R14 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_safe_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_amd64_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + MOVQ AX, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (R11), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R11 + ADDQ $0x10, R10 + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(R11)(R14*1), R11 + LEAQ 16(R10)(R14*1), R10 + MOVUPS -16(R11), X0 + MOVUPS X0, -16(R10) + JMP copy_1_end + +copy_1_small: + CMPQ AX, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ AX, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (R11), R14 + MOVB -1(R11)(AX*1), R15 + MOVB R14, (R10) + MOVB R15, -1(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_3: + MOVW (R11), R14 + MOVB 2(R11), R15 + MOVW R14, (R10) + MOVB R15, 2(R10) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_4through7: + MOVL (R11), R14 + MOVL -4(R11)(AX*1), R15 + MOVL R14, (R10) + MOVL R15, -4(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (R11), R14 + MOVQ -8(R11)(AX*1), R15 + MOVQ R14, (R10) + MOVQ R15, -8(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + +copy_1_end: + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JG copy_all_from_history + MOVQ R13, AX + SUBQ $0x10, AX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, AX + JAE copy_4_loop + LEAQ 16(R14)(AX*1), R14 + LEAQ 16(R10)(AX*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), AX + MOVB 2(R14), CL + MOVW AX, (R10) + MOVB CL, 2(R10) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), AX + MOVL -4(R14)(R13*1), CX + MOVL AX, (R10) + MOVL CX, -4(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), AX + MOVQ -8(R14)(R13*1), CX + MOVQ AX, (R10) + MOVQ CX, -8(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + +copy_4_end: + ADDQ R13, R12 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ AX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R10)(R15*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_5_end + +copy_5_small: + CMPQ AX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ AX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(AX*1), BP + MOVB R15, (R10) + MOVB BP, -1(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R10) + MOVB BP, 2(R10) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(AX*1), BP + MOVL R15, (R10) + MOVL BP, -4(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(AX*1), BP + MOVQ R15, (R10) + MOVQ BP, -8(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + +copy_5_end: + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R12 + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_2_small + +copy_2_loop: + MOVUPS (AX), X0 + MOVUPS X0, (R10) + ADDQ $0x10, AX + ADDQ $0x10, R10 + SUBQ $0x10, CX + JAE copy_2_loop + LEAQ 16(AX)(CX*1), AX + LEAQ 16(R10)(CX*1), R10 + MOVUPS -16(AX), X0 + MOVUPS X0, -16(R10) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (AX), CL + MOVB -1(AX)(R13*1), R14 + MOVB CL, (R10) + MOVB R14, -1(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_3: + MOVW (AX), CX + MOVB 2(AX), R14 + MOVW CX, (R10) + MOVB R14, 2(R10) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_4through7: + MOVL (AX), CX + MOVL -4(AX)(R13*1), R14 + MOVL CX, (R10) + MOVL R14, -4(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (AX), CX + MOVQ -8(AX)(R13*1), R14 + MOVQ CX, (R10) + MOVQ R14, -8(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R12 + +copy_slow_3: + MOVB (AX), CL + MOVB CL, (R10) + INCQ AX + INCQ R10 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_safe_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + XORQ R9, R9 + MOVQ R9, 8(SP) + MOVQ R9, 16(SP) + MOVQ R9, 24(SP) + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_safe_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_end + +sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_end + +sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_bmi2_skip_update + LEAQ (SI)(DI*1), R13 + ADDQ R8, R13 + MOVBQZX R13, R13 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + + // Update Offset State + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R14, CX + MOVQ $0x00001010, R13 + BEXTRQ R13, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decodeSync_safe_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust + +sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + ADDQ 144(CX)(R12*8), R14 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_safe_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_bmi2_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + MOVQ CX, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (R10), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R10 + ADDQ $0x10, R9 + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(R10)(R14*1), R10 + LEAQ 16(R9)(R14*1), R9 + MOVUPS -16(R10), X0 + MOVUPS X0, -16(R9) + JMP copy_1_end + +copy_1_small: + CMPQ CX, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ CX, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (R10), R14 + MOVB -1(R10)(CX*1), R15 + MOVB R14, (R9) + MOVB R15, -1(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_3: + MOVW (R10), R14 + MOVB 2(R10), R15 + MOVW R14, (R9) + MOVB R15, 2(R9) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_4through7: + MOVL (R10), R14 + MOVL -4(R10)(CX*1), R15 + MOVL R14, (R9) + MOVL R15, -4(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (R10), R14 + MOVQ -8(R10)(CX*1), R15 + MOVQ R14, (R9) + MOVQ R15, -8(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + +copy_1_end: + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JG copy_all_from_history + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, CX + JAE copy_4_loop + LEAQ 16(R14)(CX*1), R14 + LEAQ 16(R9)(CX*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), CX + MOVB 2(R14), R12 + MOVW CX, (R9) + MOVB R12, 2(R9) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), CX + MOVL -4(R14)(R13*1), R12 + MOVL CX, (R9) + MOVL R12, -4(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), CX + MOVQ -8(R14)(R13*1), R12 + MOVQ CX, (R9) + MOVQ R12, -8(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + +copy_4_end: + ADDQ R13, R11 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ CX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R9)(R15*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_5_end + +copy_5_small: + CMPQ CX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ CX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(CX*1), BP + MOVB R15, (R9) + MOVB BP, -1(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R9) + MOVB BP, 2(R9) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(CX*1), BP + MOVL R15, (R9) + MOVL BP, -4(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(CX*1), BP + MOVQ R15, (R9) + MOVQ BP, -8(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + +copy_5_end: + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R11 + MOVQ R13, R12 + SUBQ $0x10, R12 + JB copy_2_small + +copy_2_loop: + MOVUPS (CX), X0 + MOVUPS X0, (R9) + ADDQ $0x10, CX + ADDQ $0x10, R9 + SUBQ $0x10, R12 + JAE copy_2_loop + LEAQ 16(CX)(R12*1), CX + LEAQ 16(R9)(R12*1), R9 + MOVUPS -16(CX), X0 + MOVUPS X0, -16(R9) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (CX), R12 + MOVB -1(CX)(R13*1), R14 + MOVB R12, (R9) + MOVB R14, -1(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_3: + MOVW (CX), R12 + MOVB 2(CX), R14 + MOVW R12, (R9) + MOVB R14, 2(R9) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_4through7: + MOVL (CX), R12 + MOVL -4(CX)(R13*1), R14 + MOVL R12, (R9) + MOVL R14, -4(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (CX), R12 + MOVQ -8(CX)(R13*1), R14 + MOVQ R12, (R9) + MOVQ R14, -8(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R11 + +copy_slow_3: + MOVB (CX), R12 + MOVB R12, (R9) + INCQ CX + INCQ R9 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_safe_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go new file mode 100644 index 000000000..ac2a80d29 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go @@ -0,0 +1,237 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package zstd + +import ( + "fmt" + "io" +) + +// decode sequences from the stream with the provided history but without dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + return false, nil +} + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + // Grab full sizes tables, to avoid bounds checks. + llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] + llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + s.seqSize = 0 + litRemain := len(s.literals) + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + for i := range seqs { + var ll, mo, ml int + if br.off > 4+((maxOffsetBits+16+16)>>3) { + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("WARNING: temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } + br.fillFast() + } else { + if br.overread() { + if debugDecoder { + printf("reading sequence %d, exceeded available data\n", i) + } + return io.ErrUnexpectedEOF + } + ll, mo, ml = s.next(br, llState, mlState, ofState) + br.fill() + } + + if debugSequences { + println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) + } + // Evaluate. + // We might be doing this async, so do it early. + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + } + s.seqSize += ll + ml + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + litRemain -= ll + if litRemain < 0 { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll) + } + seqs[i] = seqVals{ + ll: ll, + ml: ml, + mo: mo, + } + if i == len(seqs)-1 { + // This is the last sequence, so we shouldn't update state. + break + } + + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] + } else { + bits := br.get32BitsFast(nBits) + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] + } + } + s.seqSize += litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// executeSimple handles cases when a dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize > cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Malformed input + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into the current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + + // We must be in the current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqenc.go b/vendor/github.com/klauspost/compress/zstd/seqenc.go new file mode 100644 index 000000000..8014174a7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqenc.go @@ -0,0 +1,114 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "math/bits" + +type seqCoders struct { + llEnc, ofEnc, mlEnc *fseEncoder + llPrev, ofPrev, mlPrev *fseEncoder +} + +// swap coders with another (block). +func (s *seqCoders) swap(other *seqCoders) { + *s, *other = *other, *s +} + +// setPrev will update the previous encoders to the actually used ones +// and make sure a fresh one is in the main slot. +func (s *seqCoders) setPrev(ll, ml, of *fseEncoder) { + compareSwap := func(used *fseEncoder, current, prev **fseEncoder) { + // We used the new one, more current to history and reuse the previous history + if *current == used { + *prev, *current = *current, *prev + c := *current + p := *prev + c.reUsed = false + p.reUsed = true + return + } + if used == *prev { + return + } + // Ensure we cannot reuse by accident + prevEnc := *prev + prevEnc.symbolLen = 0 + } + compareSwap(ll, &s.llEnc, &s.llPrev) + compareSwap(ml, &s.mlEnc, &s.mlPrev) + compareSwap(of, &s.ofEnc, &s.ofPrev) +} + +func highBit(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} + +var llCodeTable = [64]byte{0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 16, 17, 17, 18, 18, 19, 19, + 20, 20, 20, 20, 21, 21, 21, 21, + 22, 22, 22, 22, 22, 22, 22, 22, + 23, 23, 23, 23, 23, 23, 23, 23, + 24, 24, 24, 24, 24, 24, 24, 24, + 24, 24, 24, 24, 24, 24, 24, 24} + +// Up to 6 bits +const maxLLCode = 35 + +// llBitsTable translates from ll code to number of bits. +var llBitsTable = [maxLLCode + 1]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 2, 2, 3, 3, + 4, 6, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16} + +// llCode returns the code that represents the literal length requested. +func llCode(litLength uint32) uint8 { + const llDeltaCode = 19 + if litLength <= 63 { + // Compiler insists on bounds check (Go 1.12) + return llCodeTable[litLength&63] + } + return uint8(highBit(litLength)) + llDeltaCode +} + +var mlCodeTable = [128]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, + 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, + 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, + 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42} + +// Up to 6 bits +const maxMLCode = 52 + +// mlBitsTable translates from ml code to number of bits. +var mlBitsTable = [maxMLCode + 1]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 2, 2, 3, 3, + 4, 4, 5, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16} + +// note : mlBase = matchLength - MINMATCH; +// because it's the format it's stored in seqStore->sequences +func mlCode(mlBase uint32) uint8 { + const mlDeltaCode = 36 + if mlBase <= 127 { + // Compiler insists on bounds check (Go 1.12) + return mlCodeTable[mlBase&127] + } + return uint8(highBit(mlBase)) + mlDeltaCode +} + +func ofCode(offset uint32) uint8 { + // A valid offset will always be > 0. + return uint8(bits.Len32(offset) - 1) +} diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go new file mode 100644 index 000000000..9e1baad73 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/snappy.go @@ -0,0 +1,435 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "hash/crc32" + "io" + + "github.com/klauspost/compress/huff0" + snappy "github.com/klauspost/compress/internal/snapref" +) + +const ( + snappyTagLiteral = 0x00 + snappyTagCopy1 = 0x01 + snappyTagCopy2 = 0x02 + snappyTagCopy4 = 0x03 +) + +const ( + snappyChecksumSize = 4 + snappyMagicBody = "sNaPpY" + + // snappyMaxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + snappyMaxBlockSize = 65536 + + // snappyMaxEncodedLenOfMaxBlockSize equals MaxEncodedLen(snappyMaxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + snappyMaxEncodedLenOfMaxBlockSize = 76490 +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var ( + // ErrSnappyCorrupt reports that the input is invalid. + ErrSnappyCorrupt = errors.New("snappy: corrupt input") + // ErrSnappyTooLarge reports that the uncompressed length is too large. + ErrSnappyTooLarge = errors.New("snappy: decoded block is too large") + // ErrSnappyUnsupported reports that the input isn't supported. + ErrSnappyUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// SnappyConverter can read SnappyConverter-compressed streams and convert them to zstd. +// Conversion is done by converting the stream directly from Snappy without intermediate +// full decoding. +// Therefore the compression ratio is much less than what can be done by a full decompression +// and compression, and a faulty Snappy stream may lead to a faulty Zstandard stream without +// any errors being generated. +// No CRC value is being generated and not all CRC values of the Snappy stream are checked. +// However, it provides really fast recompression of Snappy streams. +// The converter can be reused to avoid allocations, even after errors. +type SnappyConverter struct { + r io.Reader + err error + buf []byte + block *blockEnc +} + +// Convert the Snappy stream supplied in 'in' and write the zStandard stream to 'w'. +// If any error is detected on the Snappy stream it is returned. +// The number of bytes written is returned. +func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { + initPredefined() + r.err = nil + r.r = in + if r.block == nil { + r.block = &blockEnc{} + r.block.init() + } + r.block.initNewEncode() + if len(r.buf) != snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize { + r.buf = make([]byte, snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize) + } + r.block.litEnc.Reuse = huff0.ReusePolicyNone + var written int64 + var readHeader bool + { + var header []byte + var n int + header, r.err = frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0]) + + n, r.err = w.Write(header) + if r.err != nil { + return written, r.err + } + written += int64(n) + } + + for { + if !r.readFull(r.buf[:4], true) { + // Add empty last block + r.block.reset(nil) + r.block.last = true + err := r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + n, err := w.Write(r.block.output) + if err != nil { + return written, err + } + written += int64(n) + + return written, r.err + } + chunkType := r.buf[0] + if !readHeader { + if chunkType != chunkTypeStreamIdentifier { + println("chunkType != chunkTypeStreamIdentifier", chunkType) + r.err = ErrSnappyCorrupt + return written, r.err + } + readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + println("chunkLen > len(r.buf)", chunkType) + r.err = ErrSnappyUnsupported + return written, r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < snappyChecksumSize { + println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return written, r.err + } + //checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[snappyChecksumSize:] + + n, hdr, err := snappyDecodedLen(buf) + if err != nil { + r.err = err + return written, r.err + } + buf = buf[hdr:] + if n > snappyMaxBlockSize { + println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.reset(nil) + r.block.pushOffsets() + if err := decodeSnappy(r.block, buf); err != nil { + r.err = err + return written, r.err + } + if r.block.size+r.block.extraLits != n { + printf("invalid size, want %d, got %d\n", n, r.block.size+r.block.extraLits) + r.err = ErrSnappyCorrupt + return written, r.err + } + err = r.block.encode(nil, false, false) + switch err { + case errIncompressible: + r.block.popOffsets() + r.block.reset(nil) + r.block.literals, err = snappy.Decode(r.block.literals[:n], r.buf[snappyChecksumSize:chunkLen]) + if err != nil { + return written, err + } + err = r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + case nil: + default: + return written, err + } + + n, r.err = w.Write(r.block.output) + if r.err != nil { + return written, err + } + written += int64(n) + continue + case chunkTypeUncompressedData: + if debugEncoder { + println("Uncompressed, chunklen", chunkLen) + } + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < snappyChecksumSize { + println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.reset(nil) + buf := r.buf[:snappyChecksumSize] + if !r.readFull(buf, false) { + return written, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - snappyChecksumSize + if n > snappyMaxBlockSize { + println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.literals = r.block.literals[:n] + if !r.readFull(r.block.literals, false) { + return written, r.err + } + if snappyCRC(r.block.literals) != checksum { + println("literals crc mismatch") + r.err = ErrSnappyCorrupt + return written, r.err + } + err := r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + n, r.err = w.Write(r.block.output) + if r.err != nil { + return written, err + } + written += int64(n) + continue + + case chunkTypeStreamIdentifier: + if debugEncoder { + println("stream id", chunkLen, len(snappyMagicBody)) + } + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(snappyMagicBody) { + println("chunkLen != len(snappyMagicBody)", chunkLen, len(snappyMagicBody)) + r.err = ErrSnappyCorrupt + return written, r.err + } + if !r.readFull(r.buf[:len(snappyMagicBody)], false) { + return written, r.err + } + for i := 0; i < len(snappyMagicBody); i++ { + if r.buf[i] != snappyMagicBody[i] { + println("r.buf[i] != snappyMagicBody[i]", r.buf[i], snappyMagicBody[i], i) + r.err = ErrSnappyCorrupt + return written, r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + println("chunkType <= 0x7f") + r.err = ErrSnappyUnsupported + return written, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return written, r.err + } + } +} + +// decodeSnappy writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read. +func decodeSnappy(blk *blockEnc, src []byte) error { + //decodeRef(make([]byte, snappyMaxBlockSize), src) + var s, length int + lits := blk.extraLits + var offset uint32 + for s < len(src) { + switch src[s] & 0x03 { + case snappyTagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + if x > snappyMaxBlockSize { + println("x > snappyMaxBlockSize", x, snappyMaxBlockSize) + return ErrSnappyCorrupt + } + length = int(x) + 1 + if length <= 0 { + println("length <= 0 ", length) + + return errUnsupportedLiteralLength + } + //if length > snappyMaxBlockSize-d || uint32(length) > len(src)-s { + // return ErrSnappyCorrupt + //} + + blk.literals = append(blk.literals, src[s:s+length]...) + //println(length, "litLen") + lits += length + s += length + continue + + case snappyTagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]) + + case snappyTagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = uint32(src[s-2]) | uint32(src[s-1])<<8 + + case snappyTagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + + if offset <= 0 || blk.size+lits < int(offset) /*|| length > len(blk)-d */ { + println("offset <= 0 || blk.size+lits < int(offset)", offset, blk.size+lits, int(offset), blk.size, lits) + + return ErrSnappyCorrupt + } + + // Check if offset is one of the recent offsets. + // Adjusts the output offset accordingly. + // Gives a tiny bit of compression, typically around 1%. + if false { + offset = blk.matchOffset(offset, uint32(lits)) + } else { + offset += 3 + } + + blk.sequences = append(blk.sequences, seq{ + litLen: uint32(lits), + offset: offset, + matchLen: uint32(length) - zstdMinMatch, + }) + blk.size += length + lits + lits = 0 + } + blk.extraLits = lits + return nil +} + +func (r *SnappyConverter) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrSnappyCorrupt + } + return false + } + return true +} + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func snappyCRC(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return c>>15 | c<<17 + 0xa282ead8 +} + +// snappyDecodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func snappyDecodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrSnappyCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrSnappyTooLarge + } + return int(v), n, nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go new file mode 100644 index 000000000..29c15c8c4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/zip.go @@ -0,0 +1,141 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "errors" + "io" + "sync" +) + +// ZipMethodWinZip is the method for Zstandard compressed data inside Zip files for WinZip. +// See https://www.winzip.com/win/en/comp_info.html +const ZipMethodWinZip = 93 + +// ZipMethodPKWare is the original method number used by PKWARE to indicate Zstandard compression. +// Deprecated: This has been deprecated by PKWARE, use ZipMethodWinZip instead for compression. +// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT +const ZipMethodPKWare = 20 + +// zipReaderPool is the default reader pool. +var zipReaderPool = sync.Pool{New: func() interface{} { + z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1)) + if err != nil { + panic(err) + } + return z +}} + +// newZipReader creates a pooled zip decompressor. +func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser { + pool := &zipReaderPool + if len(opts) > 0 { + opts = append([]DOption{WithDecoderLowmem(true), WithDecoderMaxWindow(128 << 20)}, opts...) + // Force concurrency 1 + opts = append(opts, WithDecoderConcurrency(1)) + // Create our own pool + pool = &sync.Pool{} + } + return func(r io.Reader) io.ReadCloser { + dec, ok := pool.Get().(*Decoder) + if ok { + dec.Reset(r) + } else { + d, err := NewReader(r, opts...) + if err != nil { + panic(err) + } + dec = d + } + return &pooledZipReader{dec: dec, pool: pool} + } +} + +type pooledZipReader struct { + mu sync.Mutex // guards Close and Read + pool *sync.Pool + dec *Decoder +} + +func (r *pooledZipReader) Read(p []byte) (n int, err error) { + r.mu.Lock() + defer r.mu.Unlock() + if r.dec == nil { + return 0, errors.New("read after close or EOF") + } + dec, err := r.dec.Read(p) + if err == io.EOF { + r.dec.Reset(nil) + r.pool.Put(r.dec) + r.dec = nil + } + return dec, err +} + +func (r *pooledZipReader) Close() error { + r.mu.Lock() + defer r.mu.Unlock() + var err error + if r.dec != nil { + err = r.dec.Reset(nil) + r.pool.Put(r.dec) + r.dec = nil + } + return err +} + +type pooledZipWriter struct { + mu sync.Mutex // guards Close and Read + enc *Encoder + pool *sync.Pool +} + +func (w *pooledZipWriter) Write(p []byte) (n int, err error) { + w.mu.Lock() + defer w.mu.Unlock() + if w.enc == nil { + return 0, errors.New("Write after Close") + } + return w.enc.Write(p) +} + +func (w *pooledZipWriter) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + var err error + if w.enc != nil { + err = w.enc.Close() + w.pool.Put(w.enc) + w.enc = nil + } + return err +} + +// ZipCompressor returns a compressor that can be registered with zip libraries. +// The provided encoder options will be used on all encodes. +func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) { + var pool sync.Pool + return func(w io.Writer) (io.WriteCloser, error) { + enc, ok := pool.Get().(*Encoder) + if ok { + enc.Reset(w) + } else { + var err error + enc, err = NewWriter(w, opts...) + if err != nil { + return nil, err + } + } + return &pooledZipWriter{enc: enc, pool: &pool}, nil + } +} + +// ZipDecompressor returns a decompressor that can be registered with zip libraries. +// See ZipCompressor for example. +// Options can be specified. WithDecoderConcurrency(1) is forced, +// and by default a 128MB maximum decompression window is specified. +// The window size can be overridden if required. +func ZipDecompressor(opts ...DOption) func(r io.Reader) io.ReadCloser { + return newZipReader(opts...) +} diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go new file mode 100644 index 000000000..5ffa82f5a --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -0,0 +1,143 @@ +// Package zstd provides decompression of zstandard files. +// +// For advanced usage and examples, go to the README: https://github.com/klauspost/compress/tree/master/zstd#zstd +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "log" + "math" + "math/bits" +) + +// enable debug printing +const debug = false + +// enable encoding debug printing +const debugEncoder = debug + +// enable decoding debug printing +const debugDecoder = debug + +// Enable extra assertions. +const debugAsserts = debug || false + +// print sequence details +const debugSequences = false + +// print detailed matching information +const debugMatches = false + +// force encoder to use predefined tables. +const forcePreDef = false + +// zstdMinMatch is the minimum zstd match length. +const zstdMinMatch = 3 + +// fcsUnknown is used for unknown frame content size. +const fcsUnknown = math.MaxUint64 + +var ( + // ErrReservedBlockType is returned when a reserved block type is found. + // Typically this indicates wrong or corrupted input. + ErrReservedBlockType = errors.New("invalid input: reserved block type encountered") + + // ErrCompressedSizeTooBig is returned when a block is bigger than allowed. + // Typically this indicates wrong or corrupted input. + ErrCompressedSizeTooBig = errors.New("invalid input: compressed size too big") + + // ErrBlockTooSmall is returned when a block is too small to be decoded. + // Typically returned on invalid input. + ErrBlockTooSmall = errors.New("block too small") + + // ErrUnexpectedBlockSize is returned when a block has unexpected size. + // Typically returned on invalid input. + ErrUnexpectedBlockSize = errors.New("unexpected block size") + + // ErrMagicMismatch is returned when a "magic" number isn't what is expected. + // Typically this indicates wrong or corrupted input. + ErrMagicMismatch = errors.New("invalid input: magic number mismatch") + + // ErrWindowSizeExceeded is returned when a reference exceeds the valid window size. + // Typically this indicates wrong or corrupted input. + ErrWindowSizeExceeded = errors.New("window size exceeded") + + // ErrWindowSizeTooSmall is returned when no window size is specified. + // Typically this indicates wrong or corrupted input. + ErrWindowSizeTooSmall = errors.New("invalid input: window size was too small") + + // ErrDecoderSizeExceeded is returned if decompressed size exceeds the configured limit. + ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit") + + // ErrUnknownDictionary is returned if the dictionary ID is unknown. + ErrUnknownDictionary = errors.New("unknown dictionary") + + // ErrFrameSizeExceeded is returned if the stated frame size is exceeded. + // This is only returned if SingleSegment is specified on the frame. + ErrFrameSizeExceeded = errors.New("frame size exceeded") + + // ErrFrameSizeMismatch is returned if the stated frame size does not match the expected size. + // This is only returned if SingleSegment is specified on the frame. + ErrFrameSizeMismatch = errors.New("frame size does not match size on stream") + + // ErrCRCMismatch is returned if CRC mismatches. + ErrCRCMismatch = errors.New("CRC check failed") + + // ErrDecoderClosed will be returned if the Decoder was used after + // Close has been called. + ErrDecoderClosed = errors.New("decoder used after Close") + + // ErrDecoderNilInput is returned when a nil Reader was provided + // and an operation other than Reset/DecodeAll/Close was attempted. + ErrDecoderNilInput = errors.New("nil input provided as reader") +) + +func println(a ...interface{}) { + if debug || debugDecoder || debugEncoder { + log.Println(a...) + } +} + +func printf(format string, a ...interface{}) { + if debug || debugDecoder || debugEncoder { + log.Printf(format, a...) + } +} + +// matchLen returns the maximum common prefix length of a and b. +// a must be the shortest of the two. +func matchLen(a, b []byte) (n int) { + for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { + diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) + if diff != 0 { + return n + bits.TrailingZeros64(diff)>>3 + } + n += 8 + } + + for i := range a { + if a[i] != b[i] { + break + } + n++ + } + return n + +} + +func load3232(b []byte, i int32) uint32 { + return binary.LittleEndian.Uint32(b[i:]) +} + +func load6432(b []byte, i int32) uint64 { + return binary.LittleEndian.Uint64(b[i:]) +} + +type byter interface { + Bytes() []byte + Len() int +} + +var _ byter = &bytes.Buffer{} diff --git a/vendor/github.com/klauspost/cpuid/v2/README.md b/vendor/github.com/klauspost/cpuid/v2/README.md index ea7df3dd8..857a93e59 100644 --- a/vendor/github.com/klauspost/cpuid/v2/README.md +++ b/vendor/github.com/klauspost/cpuid/v2/README.md @@ -16,10 +16,17 @@ Package home: https://github.com/klauspost/cpuid ## installing -`go get -u github.com/klauspost/cpuid/v2` using modules. - +`go get -u github.com/klauspost/cpuid/v2` using modules. Drop `v2` for others. +### Homebrew + +For macOS/Linux users, you can install via [brew](https://brew.sh/) + +```sh +$ brew install cpuid +``` + ## example ```Go @@ -77,10 +84,14 @@ We have Streaming SIMD 2 Extensions The `cpuid.CPU` provides access to CPU features. Use `cpuid.CPU.Supports()` to check for CPU features. A faster `cpuid.CPU.Has()` is provided which will usually be inlined by the gc compiler. +To test a larger number of features, they can be combined using `f := CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SYSCALL, SSE, SSE2)`, etc. +This can be using with `cpuid.CPU.HasAll(f)` to quickly test if all features are supported. + Note that for some cpu/os combinations some features will not be detected. `amd64` has rather good support and should work reliably on all platforms. -Note that hypervisors may not pass through all CPU features. +Note that hypervisors may not pass through all CPU features through to the guest OS, +so even if your host supports a feature it may not be visible on guests. ## arm64 feature detection @@ -253,6 +264,218 @@ Exit Code 0 Exit Code 1 ``` + +## Available flags + +### x86 & amd64 + +| Feature Flag | Description | +|--------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| ADX | Intel ADX (Multi-Precision Add-Carry Instruction Extensions) | +| AESNI | Advanced Encryption Standard New Instructions | +| AMD3DNOW | AMD 3DNOW | +| AMD3DNOWEXT | AMD 3DNowExt | +| AMXBF16 | Tile computational operations on BFLOAT16 numbers | +| AMXINT8 | Tile computational operations on 8-bit integers | +| AMXFP16 | Tile computational operations on FP16 numbers | +| AMXTILE | Tile architecture | +| AVX | AVX functions | +| AVX2 | AVX2 functions | +| AVX512BF16 | AVX-512 BFLOAT16 Instructions | +| AVX512BITALG | AVX-512 Bit Algorithms | +| AVX512BW | AVX-512 Byte and Word Instructions | +| AVX512CD | AVX-512 Conflict Detection Instructions | +| AVX512DQ | AVX-512 Doubleword and Quadword Instructions | +| AVX512ER | AVX-512 Exponential and Reciprocal Instructions | +| AVX512F | AVX-512 Foundation | +| AVX512FP16 | AVX-512 FP16 Instructions | +| AVX512IFMA | AVX-512 Integer Fused Multiply-Add Instructions | +| AVX512PF | AVX-512 Prefetch Instructions | +| AVX512VBMI | AVX-512 Vector Bit Manipulation Instructions | +| AVX512VBMI2 | AVX-512 Vector Bit Manipulation Instructions, Version 2 | +| AVX512VL | AVX-512 Vector Length Extensions | +| AVX512VNNI | AVX-512 Vector Neural Network Instructions | +| AVX512VP2INTERSECT | AVX-512 Intersect for D/Q | +| AVX512VPOPCNTDQ | AVX-512 Vector Population Count Doubleword and Quadword | +| AVXIFMA | AVX-IFMA instructions | +| AVXNECONVERT | AVX-NE-CONVERT instructions | +| AVXSLOW | Indicates the CPU performs 2 128 bit operations instead of one | +| AVXVNNI | AVX (VEX encoded) VNNI neural network instructions | +| AVXVNNIINT8 | AVX-VNNI-INT8 instructions | +| BMI1 | Bit Manipulation Instruction Set 1 | +| BMI2 | Bit Manipulation Instruction Set 2 | +| CETIBT | Intel CET Indirect Branch Tracking | +| CETSS | Intel CET Shadow Stack | +| CLDEMOTE | Cache Line Demote | +| CLMUL | Carry-less Multiplication | +| CLZERO | CLZERO instruction supported | +| CMOV | i686 CMOV | +| CMPCCXADD | CMPCCXADD instructions | +| CMPSB_SCADBS_SHORT | Fast short CMPSB and SCASB | +| CMPXCHG8 | CMPXCHG8 instruction | +| CPBOOST | Core Performance Boost | +| CPPC | AMD: Collaborative Processor Performance Control | +| CX16 | CMPXCHG16B Instruction | +| EFER_LMSLE_UNS | AMD: =Core::X86::Msr::EFER[LMSLE] is not supported, and MBZ | +| ENQCMD | Enqueue Command | +| ERMS | Enhanced REP MOVSB/STOSB | +| F16C | Half-precision floating-point conversion | +| FLUSH_L1D | Flush L1D cache | +| FMA3 | Intel FMA 3. Does not imply AVX. | +| FMA4 | Bulldozer FMA4 functions | +| FP128 | AMD: When set, the internal FP/SIMD execution datapath is 128-bits wide | +| FP256 | AMD: When set, the internal FP/SIMD execution datapath is 256-bits wide | +| FSRM | Fast Short Rep Mov | +| FXSR | FXSAVE, FXRESTOR instructions, CR4 bit 9 | +| FXSROPT | FXSAVE/FXRSTOR optimizations | +| GFNI | Galois Field New Instructions. May require other features (AVX, AVX512VL,AVX512F) based on usage. | +| HLE | Hardware Lock Elision | +| HRESET | If set CPU supports history reset and the IA32_HRESET_ENABLE MSR | +| HTT | Hyperthreading (enabled) | +| HWA | Hardware assert supported. Indicates support for MSRC001_10 | +| HYBRID_CPU | This part has CPUs of more than one type. | +| HYPERVISOR | This bit has been reserved by Intel & AMD for use by hypervisors | +| IA32_ARCH_CAP | IA32_ARCH_CAPABILITIES MSR (Intel) | +| IA32_CORE_CAP | IA32_CORE_CAPABILITIES MSR | +| IBPB | Indirect Branch Restricted Speculation (IBRS) and Indirect Branch Predictor Barrier (IBPB) | +| IBRS | AMD: Indirect Branch Restricted Speculation | +| IBRS_PREFERRED | AMD: IBRS is preferred over software solution | +| IBRS_PROVIDES_SMP | AMD: IBRS provides Same Mode Protection | +| IBS | Instruction Based Sampling (AMD) | +| IBSBRNTRGT | Instruction Based Sampling Feature (AMD) | +| IBSFETCHSAM | Instruction Based Sampling Feature (AMD) | +| IBSFFV | Instruction Based Sampling Feature (AMD) | +| IBSOPCNT | Instruction Based Sampling Feature (AMD) | +| IBSOPCNTEXT | Instruction Based Sampling Feature (AMD) | +| IBSOPSAM | Instruction Based Sampling Feature (AMD) | +| IBSRDWROPCNT | Instruction Based Sampling Feature (AMD) | +| IBSRIPINVALIDCHK | Instruction Based Sampling Feature (AMD) | +| IBS_FETCH_CTLX | AMD: IBS fetch control extended MSR supported | +| IBS_OPDATA4 | AMD: IBS op data 4 MSR supported | +| IBS_OPFUSE | AMD: Indicates support for IbsOpFuse | +| IBS_PREVENTHOST | Disallowing IBS use by the host supported | +| IBS_ZEN4 | Fetch and Op IBS support IBS extensions added with Zen4 | +| INT_WBINVD | WBINVD/WBNOINVD are interruptible. | +| INVLPGB | NVLPGB and TLBSYNC instruction supported | +| LAHF | LAHF/SAHF in long mode | +| LAM | If set, CPU supports Linear Address Masking | +| LBRVIRT | LBR virtualization | +| LZCNT | LZCNT instruction | +| MCAOVERFLOW | MCA overflow recovery support. | +| MCDT_NO | Processor do not exhibit MXCSR Configuration Dependent Timing behavior and do not need to mitigate it. | +| MCOMMIT | MCOMMIT instruction supported | +| MD_CLEAR | VERW clears CPU buffers | +| MMX | standard MMX | +| MMXEXT | SSE integer functions or AMD MMX ext | +| MOVBE | MOVBE instruction (big-endian) | +| MOVDIR64B | Move 64 Bytes as Direct Store | +| MOVDIRI | Move Doubleword as Direct Store | +| MOVSB_ZL | Fast Zero-Length MOVSB | +| MPX | Intel MPX (Memory Protection Extensions) | +| MOVU | MOVU SSE instructions are more efficient and should be preferred to SSE MOVL/MOVH. MOVUPS is more efficient than MOVLPS/MOVHPS. MOVUPD is more efficient than MOVLPD/MOVHPD | +| MSRIRC | Instruction Retired Counter MSR available | +| MSR_PAGEFLUSH | Page Flush MSR available | +| NRIPS | Indicates support for NRIP save on VMEXIT | +| NX | NX (No-Execute) bit | +| OSXSAVE | XSAVE enabled by OS | +| PCONFIG | PCONFIG for Intel Multi-Key Total Memory Encryption | +| POPCNT | POPCNT instruction | +| PPIN | AMD: Protected Processor Inventory Number support. Indicates that Protected Processor Inventory Number (PPIN) capability can be enabled | +| PREFETCHI | PREFETCHIT0/1 instructions | +| PSFD | AMD: Predictive Store Forward Disable | +| RDPRU | RDPRU instruction supported | +| RDRAND | RDRAND instruction is available | +| RDSEED | RDSEED instruction is available | +| RDTSCP | RDTSCP Instruction | +| RTM | Restricted Transactional Memory | +| RTM_ALWAYS_ABORT | Indicates that the loaded microcode is forcing RTM abort. | +| SERIALIZE | Serialize Instruction Execution | +| SEV | AMD Secure Encrypted Virtualization supported | +| SEV_64BIT | AMD SEV guest execution only allowed from a 64-bit host | +| SEV_ALTERNATIVE | AMD SEV Alternate Injection supported | +| SEV_DEBUGSWAP | Full debug state swap supported for SEV-ES guests | +| SEV_ES | AMD SEV Encrypted State supported | +| SEV_RESTRICTED | AMD SEV Restricted Injection supported | +| SEV_SNP | AMD SEV Secure Nested Paging supported | +| SGX | Software Guard Extensions | +| SGXLC | Software Guard Extensions Launch Control | +| SHA | Intel SHA Extensions | +| SME | AMD Secure Memory Encryption supported | +| SME_COHERENT | AMD Hardware cache coherency across encryption domains enforced | +| SPEC_CTRL_SSBD | Speculative Store Bypass Disable | +| SRBDS_CTRL | SRBDS mitigation MSR available | +| SSE | SSE functions | +| SSE2 | P4 SSE functions | +| SSE3 | Prescott SSE3 functions | +| SSE4 | Penryn SSE4.1 functions | +| SSE42 | Nehalem SSE4.2 functions | +| SSE4A | AMD Barcelona microarchitecture SSE4a instructions | +| SSSE3 | Conroe SSSE3 functions | +| STIBP | Single Thread Indirect Branch Predictors | +| STIBP_ALWAYSON | AMD: Single Thread Indirect Branch Prediction Mode has Enhanced Performance and may be left Always On | +| STOSB_SHORT | Fast short STOSB | +| SUCCOR | Software uncorrectable error containment and recovery capability. | +| SVM | AMD Secure Virtual Machine | +| SVMDA | Indicates support for the SVM decode assists. | +| SVMFBASID | SVM, Indicates that TLB flush events, including CR3 writes and CR4.PGE toggles, flush only the current ASID's TLB entries. Also indicates support for the extended VMCBTLB_Control | +| SVML | AMD SVM lock. Indicates support for SVM-Lock. | +| SVMNP | AMD SVM nested paging | +| SVMPF | SVM pause intercept filter. Indicates support for the pause intercept filter | +| SVMPFT | SVM PAUSE filter threshold. Indicates support for the PAUSE filter cycle count threshold | +| SYSCALL | System-Call Extension (SCE): SYSCALL and SYSRET instructions. | +| SYSEE | SYSENTER and SYSEXIT instructions | +| TBM | AMD Trailing Bit Manipulation | +| TLB_FLUSH_NESTED | AMD: Flushing includes all the nested translations for guest translations | +| TME | Intel Total Memory Encryption. The following MSRs are supported: IA32_TME_CAPABILITY, IA32_TME_ACTIVATE, IA32_TME_EXCLUDE_MASK, and IA32_TME_EXCLUDE_BASE. | +| TOPEXT | TopologyExtensions: topology extensions support. Indicates support for CPUID Fn8000_001D_EAX_x[N:0]-CPUID Fn8000_001E_EDX. | +| TSCRATEMSR | MSR based TSC rate control. Indicates support for MSR TSC ratio MSRC000_0104 | +| TSXLDTRK | Intel TSX Suspend Load Address Tracking | +| VAES | Vector AES. AVX(512) versions requires additional checks. | +| VMCBCLEAN | VMCB clean bits. Indicates support for VMCB clean bits. | +| VMPL | AMD VM Permission Levels supported | +| VMSA_REGPROT | AMD VMSA Register Protection supported | +| VMX | Virtual Machine Extensions | +| VPCLMULQDQ | Carry-Less Multiplication Quadword. Requires AVX for 3 register versions. | +| VTE | AMD Virtual Transparent Encryption supported | +| WAITPKG | TPAUSE, UMONITOR, UMWAIT | +| WBNOINVD | Write Back and Do Not Invalidate Cache | +| X87 | FPU | +| XGETBV1 | Supports XGETBV with ECX = 1 | +| XOP | Bulldozer XOP functions | +| XSAVE | XSAVE, XRESTOR, XSETBV, XGETBV | +| XSAVEC | Supports XSAVEC and the compacted form of XRSTOR. | +| XSAVEOPT | XSAVEOPT available | +| XSAVES | Supports XSAVES/XRSTORS and IA32_XSS | + +# ARM features: + +| Feature Flag | Description | +|--------------|------------------------------------------------------------------| +| AESARM | AES instructions | +| ARMCPUID | Some CPU ID registers readable at user-level | +| ASIMD | Advanced SIMD | +| ASIMDDP | SIMD Dot Product | +| ASIMDHP | Advanced SIMD half-precision floating point | +| ASIMDRDM | Rounding Double Multiply Accumulate/Subtract (SQRDMLAH/SQRDMLSH) | +| ATOMICS | Large System Extensions (LSE) | +| CRC32 | CRC32/CRC32C instructions | +| DCPOP | Data cache clean to Point of Persistence (DC CVAP) | +| EVTSTRM | Generic timer | +| FCMA | Floatin point complex number addition and multiplication | +| FP | Single-precision and double-precision floating point | +| FPHP | Half-precision floating point | +| GPA | Generic Pointer Authentication | +| JSCVT | Javascript-style double->int convert (FJCVTZS) | +| LRCPC | Weaker release consistency (LDAPR, etc) | +| PMULL | Polynomial Multiply instructions (PMULL/PMULL2) | +| SHA1 | SHA-1 instructions (SHA1C, etc) | +| SHA2 | SHA-2 instructions (SHA256H, etc) | +| SHA3 | SHA-3 instructions (EOR3, RAXI, XAR, BCAX) | +| SHA512 | SHA512 instructions | +| SM3 | SM3 instructions | +| SM4 | SM4 instructions | +| SVE | Scalable Vector Extension | + # license This code is published under an MIT license. See LICENSE file for more information. diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid.go b/vendor/github.com/klauspost/cpuid/v2/cpuid.go index 701f2385b..cf2ae9c51 100644 --- a/vendor/github.com/klauspost/cpuid/v2/cpuid.go +++ b/vendor/github.com/klauspost/cpuid/v2/cpuid.go @@ -73,6 +73,7 @@ const ( AMD3DNOW // AMD 3DNOW AMD3DNOWEXT // AMD 3DNowExt AMXBF16 // Tile computational operations on BFLOAT16 numbers + AMXFP16 // Tile computational operations on FP16 numbers AMXINT8 // Tile computational operations on 8-bit integers AMXTILE // Tile architecture AVX // AVX functions @@ -93,8 +94,11 @@ const ( AVX512VNNI // AVX-512 Vector Neural Network Instructions AVX512VP2INTERSECT // AVX-512 Intersect for D/Q AVX512VPOPCNTDQ // AVX-512 Vector Population Count Doubleword and Quadword + AVXIFMA // AVX-IFMA instructions + AVXNECONVERT // AVX-NE-CONVERT instructions AVXSLOW // Indicates the CPU performs 2 128 bit operations instead of one AVXVNNI // AVX (VEX encoded) VNNI neural network instructions + AVXVNNIINT8 // AVX-VNNI-INT8 instructions BMI1 // Bit Manipulation Instruction Set 1 BMI2 // Bit Manipulation Instruction Set 2 CETIBT // Intel CET Indirect Branch Tracking @@ -103,15 +107,22 @@ const ( CLMUL // Carry-less Multiplication CLZERO // CLZERO instruction supported CMOV // i686 CMOV + CMPCCXADD // CMPCCXADD instructions CMPSB_SCADBS_SHORT // Fast short CMPSB and SCASB CMPXCHG8 // CMPXCHG8 instruction CPBOOST // Core Performance Boost + CPPC // AMD: Collaborative Processor Performance Control CX16 // CMPXCHG16B Instruction + EFER_LMSLE_UNS // AMD: =Core::X86::Msr::EFER[LMSLE] is not supported, and MBZ ENQCMD // Enqueue Command ERMS // Enhanced REP MOVSB/STOSB F16C // Half-precision floating-point conversion + FLUSH_L1D // Flush L1D cache FMA3 // Intel FMA 3. Does not imply AVX. FMA4 // Bulldozer FMA4 functions + FP128 // AMD: When set, the internal FP/SIMD execution datapath is no more than 128-bits wide + FP256 // AMD: When set, the internal FP/SIMD execution datapath is no more than 256-bits wide + FSRM // Fast Short Rep Mov FXSR // FXSAVE, FXRESTOR instructions, CR4 bit 9 FXSROPT // FXSAVE/FXRSTOR optimizations GFNI // Galois Field New Instructions. May require other features (AVX, AVX512VL,AVX512F) based on usage. @@ -119,8 +130,14 @@ const ( HRESET // If set CPU supports history reset and the IA32_HRESET_ENABLE MSR HTT // Hyperthreading (enabled) HWA // Hardware assert supported. Indicates support for MSRC001_10 + HYBRID_CPU // This part has CPUs of more than one type. HYPERVISOR // This bit has been reserved by Intel & AMD for use by hypervisors + IA32_ARCH_CAP // IA32_ARCH_CAPABILITIES MSR (Intel) + IA32_CORE_CAP // IA32_CORE_CAPABILITIES MSR IBPB // Indirect Branch Restricted Speculation (IBRS) and Indirect Branch Predictor Barrier (IBPB) + IBRS // AMD: Indirect Branch Restricted Speculation + IBRS_PREFERRED // AMD: IBRS is preferred over software solution + IBRS_PROVIDES_SMP // AMD: IBRS provides Same Mode Protection IBS // Instruction Based Sampling (AMD) IBSBRNTRGT // Instruction Based Sampling Feature (AMD) IBSFETCHSAM // Instruction Based Sampling Feature (AMD) @@ -130,7 +147,11 @@ const ( IBSOPSAM // Instruction Based Sampling Feature (AMD) IBSRDWROPCNT // Instruction Based Sampling Feature (AMD) IBSRIPINVALIDCHK // Instruction Based Sampling Feature (AMD) + IBS_FETCH_CTLX // AMD: IBS fetch control extended MSR supported + IBS_OPDATA4 // AMD: IBS op data 4 MSR supported + IBS_OPFUSE // AMD: Indicates support for IbsOpFuse IBS_PREVENTHOST // Disallowing IBS use by the host supported + IBS_ZEN4 // AMD: Fetch and Op IBS support IBS extensions added with Zen4 INT_WBINVD // WBINVD/WBNOINVD are interruptible. INVLPGB // NVLPGB and TLBSYNC instruction supported LAHF // LAHF/SAHF in long mode @@ -138,13 +159,16 @@ const ( LBRVIRT // LBR virtualization LZCNT // LZCNT instruction MCAOVERFLOW // MCA overflow recovery support. + MCDT_NO // Processor do not exhibit MXCSR Configuration Dependent Timing behavior and do not need to mitigate it. MCOMMIT // MCOMMIT instruction supported + MD_CLEAR // VERW clears CPU buffers MMX // standard MMX MMXEXT // SSE integer functions or AMD MMX ext MOVBE // MOVBE instruction (big-endian) MOVDIR64B // Move 64 Bytes as Direct Store MOVDIRI // Move Doubleword as Direct Store MOVSB_ZL // Fast Zero-Length MOVSB + MOVU // AMD: MOVU SSE instructions are more efficient and should be preferred to SSE MOVL/MOVH. MOVUPS is more efficient than MOVLPS/MOVHPS. MOVUPD is more efficient than MOVLPD/MOVHPD MPX // Intel MPX (Memory Protection Extensions) MSRIRC // Instruction Retired Counter MSR available MSR_PAGEFLUSH // Page Flush MSR available @@ -153,13 +177,15 @@ const ( OSXSAVE // XSAVE enabled by OS PCONFIG // PCONFIG for Intel Multi-Key Total Memory Encryption POPCNT // POPCNT instruction + PPIN // AMD: Protected Processor Inventory Number support. Indicates that Protected Processor Inventory Number (PPIN) capability can be enabled + PREFETCHI // PREFETCHIT0/1 instructions + PSFD // AMD: Predictive Store Forward Disable RDPRU // RDPRU instruction supported RDRAND // RDRAND instruction is available RDSEED // RDSEED instruction is available RDTSCP // RDTSCP Instruction RTM // Restricted Transactional Memory RTM_ALWAYS_ABORT // Indicates that the loaded microcode is forcing RTM abort. - SCE // SYSENTER and SYSEXIT instructions SERIALIZE // Serialize Instruction Execution SEV // AMD Secure Encrypted Virtualization supported SEV_64BIT // AMD SEV guest execution only allowed from a 64-bit host @@ -173,6 +199,8 @@ const ( SHA // Intel SHA Extensions SME // AMD Secure Memory Encryption supported SME_COHERENT // AMD Hardware cache coherency across encryption domains enforced + SPEC_CTRL_SSBD // Speculative Store Bypass Disable + SRBDS_CTRL // SRBDS mitigation MSR available SSE // SSE functions SSE2 // P4 SSE functions SSE3 // Prescott SSE3 functions @@ -181,6 +209,7 @@ const ( SSE4A // AMD Barcelona microarchitecture SSE4a instructions SSSE3 // Conroe SSSE3 functions STIBP // Single Thread Indirect Branch Predictors + STIBP_ALWAYSON // AMD: Single Thread Indirect Branch Prediction Mode has Enhanced Performance and may be left Always On STOSB_SHORT // Fast short STOSB SUCCOR // Software uncorrectable error containment and recovery capability. SVM // AMD Secure Virtual Machine @@ -190,8 +219,12 @@ const ( SVMNP // AMD SVM nested paging SVMPF // SVM pause intercept filter. Indicates support for the pause intercept filter SVMPFT // SVM PAUSE filter threshold. Indicates support for the PAUSE filter cycle count threshold + SYSCALL // System-Call Extension (SCE): SYSCALL and SYSRET instructions. + SYSEE // SYSENTER and SYSEXIT instructions TBM // AMD Trailing Bit Manipulation + TLB_FLUSH_NESTED // AMD: Flushing includes all the nested translations for guest translations TME // Intel Total Memory Encryption. The following MSRs are supported: IA32_TME_CAPABILITY, IA32_TME_ACTIVATE, IA32_TME_EXCLUDE_MASK, and IA32_TME_EXCLUDE_BASE. + TOPEXT // TopologyExtensions: topology extensions support. Indicates support for CPUID Fn8000_001D_EAX_x[N:0]-CPUID Fn8000_001E_EDX. TSCRATEMSR // MSR based TSC rate control. Indicates support for MSR TSC ratio MSRC000_0104 TSXLDTRK // Intel TSX Suspend Load Address Tracking VAES // Vector AES. AVX(512) versions requires additional checks. @@ -253,6 +286,7 @@ type CPUInfo struct { LogicalCores int // Number of physical cores times threads that can run on each core through the use of hyperthreading. Will be 0 if undetectable. Family int // CPU family number Model int // CPU model number + Stepping int // CPU stepping info CacheLine int // Cache line size in bytes. Will be 0 if undetectable. Hz int64 // Clock speed, if known, 0 otherwise. Will attempt to contain base clock speed. BoostFreq int64 // Max clock speed, if known, 0 otherwise @@ -355,30 +389,61 @@ func (c CPUInfo) Supports(ids ...FeatureID) bool { // Has allows for checking a single feature. // Should be inlined by the compiler. -func (c CPUInfo) Has(id FeatureID) bool { +func (c *CPUInfo) Has(id FeatureID) bool { return c.featureSet.inSet(id) } +// AnyOf returns whether the CPU supports one or more of the requested features. +func (c CPUInfo) AnyOf(ids ...FeatureID) bool { + for _, id := range ids { + if c.featureSet.inSet(id) { + return true + } + } + return false +} + +// Features contains several features combined for a fast check using +// CpuInfo.HasAll +type Features *flagSet + +// CombineFeatures allows to combine several features for a close to constant time lookup. +func CombineFeatures(ids ...FeatureID) Features { + var v flagSet + for _, id := range ids { + v.set(id) + } + return &v +} + +func (c *CPUInfo) HasAll(f Features) bool { + return c.featureSet.hasSetP(f) +} + // https://en.wikipedia.org/wiki/X86-64#Microarchitecture_levels -var level1Features = flagSetWith(CMOV, CMPXCHG8, X87, FXSR, MMX, SCE, SSE, SSE2) -var level2Features = flagSetWith(CMOV, CMPXCHG8, X87, FXSR, MMX, SCE, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3) -var level3Features = flagSetWith(CMOV, CMPXCHG8, X87, FXSR, MMX, SCE, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3, AVX, AVX2, BMI1, BMI2, F16C, FMA3, LZCNT, MOVBE, OSXSAVE) -var level4Features = flagSetWith(CMOV, CMPXCHG8, X87, FXSR, MMX, SCE, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3, AVX, AVX2, BMI1, BMI2, F16C, FMA3, LZCNT, MOVBE, OSXSAVE, AVX512F, AVX512BW, AVX512CD, AVX512DQ, AVX512VL) +var oneOfLevel = CombineFeatures(SYSEE, SYSCALL) +var level1Features = CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SSE, SSE2) +var level2Features = CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3) +var level3Features = CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3, AVX, AVX2, BMI1, BMI2, F16C, FMA3, LZCNT, MOVBE, OSXSAVE) +var level4Features = CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3, AVX, AVX2, BMI1, BMI2, F16C, FMA3, LZCNT, MOVBE, OSXSAVE, AVX512F, AVX512BW, AVX512CD, AVX512DQ, AVX512VL) // X64Level returns the microarchitecture level detected on the CPU. // If features are lacking or non x64 mode, 0 is returned. // See https://en.wikipedia.org/wiki/X86-64#Microarchitecture_levels func (c CPUInfo) X64Level() int { - if c.featureSet.hasSet(level4Features) { + if !c.featureSet.hasOneOf(oneOfLevel) { + return 0 + } + if c.featureSet.hasSetP(level4Features) { return 4 } - if c.featureSet.hasSet(level3Features) { + if c.featureSet.hasSetP(level3Features) { return 3 } - if c.featureSet.hasSet(level2Features) { + if c.featureSet.hasSetP(level2Features) { return 2 } - if c.featureSet.hasSet(level1Features) { + if c.featureSet.hasSetP(level1Features) { return 1 } return 0 @@ -542,7 +607,7 @@ const flagMask = flagBits - 1 // flagSet contains detected cpu features and characteristics in an array of flags type flagSet [(lastID + flagMask) / flagBits]flags -func (s flagSet) inSet(feat FeatureID) bool { +func (s *flagSet) inSet(feat FeatureID) bool { return s[feat>>flagBitsLog2]&(1<<(feat&flagMask)) != 0 } @@ -572,7 +637,7 @@ func (s *flagSet) or(other flagSet) { } // hasSet returns whether all features are present. -func (s flagSet) hasSet(other flagSet) bool { +func (s *flagSet) hasSet(other flagSet) bool { for i, v := range other[:] { if s[i]&v != v { return false @@ -581,8 +646,28 @@ func (s flagSet) hasSet(other flagSet) bool { return true } +// hasSet returns whether all features are present. +func (s *flagSet) hasSetP(other *flagSet) bool { + for i, v := range other[:] { + if s[i]&v != v { + return false + } + } + return true +} + +// hasOneOf returns whether one or more features are present. +func (s *flagSet) hasOneOf(other *flagSet) bool { + for i, v := range other[:] { + if s[i]&v != 0 { + return true + } + } + return false +} + // nEnabled will return the number of enabled flags. -func (s flagSet) nEnabled() (n int) { +func (s *flagSet) nEnabled() (n int) { for _, v := range s[:] { n += bits.OnesCount64(uint64(v)) } @@ -677,7 +762,7 @@ func threadsPerCore() int { if vend == AMD { // Workaround for AMD returning 0, assume 2 if >= Zen 2 // It will be more correct than not. - fam, _ := familyModel() + fam, _, _ := familyModel() _, _, _, d := cpuid(1) if (d&(1<<28)) != 0 && fam >= 23 { return 2 @@ -715,14 +800,27 @@ func logicalCores() int { } } -func familyModel() (int, int) { +func familyModel() (family, model, stepping int) { if maxFunctionID() < 0x1 { - return 0, 0 + return 0, 0, 0 } eax, _, _, _ := cpuid(1) - family := ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff) - model := ((eax >> 4) & 0xf) + ((eax >> 12) & 0xf0) - return int(family), int(model) + // If BaseFamily[3:0] is less than Fh then ExtendedFamily[7:0] is reserved and Family is equal to BaseFamily[3:0]. + family = int((eax >> 8) & 0xf) + extFam := family == 0x6 // Intel is 0x6, needs extended model. + if family == 0xf { + // Add ExtFamily + family += int((eax >> 20) & 0xff) + extFam = true + } + // If BaseFamily[3:0] is less than 0Fh then ExtendedModel[3:0] is reserved and Model is equal to BaseModel[3:0]. + model = int((eax >> 4) & 0xf) + if extFam { + // Add ExtModel + model += int((eax >> 12) & 0xf0) + } + stepping = int(eax & 0xf) + return family, model, stepping } func physicalCores() int { @@ -857,7 +955,7 @@ func (c *CPUInfo) cacheSize() { c.Cache.L2 = int(((ecx >> 16) & 0xFFFF) * 1024) // CPUID Fn8000_001D_EAX_x[N:0] Cache Properties - if maxExtendedFunction() < 0x8000001D { + if maxExtendedFunction() < 0x8000001D || !c.Has(TOPEXT) { return } @@ -974,14 +1072,13 @@ func support() flagSet { if mfi < 0x1 { return fs } - family, model := familyModel() + family, model, _ := familyModel() _, _, c, d := cpuid(1) fs.setIf((d&(1<<0)) != 0, X87) fs.setIf((d&(1<<8)) != 0, CMPXCHG8) - fs.setIf((d&(1<<11)) != 0, SCE) + fs.setIf((d&(1<<11)) != 0, SYSEE) fs.setIf((d&(1<<15)) != 0, CMOV) - fs.setIf((d&(1<<22)) != 0, MMXEXT) fs.setIf((d&(1<<23)) != 0, MMX) fs.setIf((d&(1<<24)) != 0, FXSR) fs.setIf((d&(1<<25)) != 0, FXSROPT) @@ -989,9 +1086,9 @@ func support() flagSet { fs.setIf((d&(1<<26)) != 0, SSE2) fs.setIf((c&1) != 0, SSE3) fs.setIf((c&(1<<5)) != 0, VMX) - fs.setIf((c&0x00000200) != 0, SSSE3) - fs.setIf((c&0x00080000) != 0, SSE4) - fs.setIf((c&0x00100000) != 0, SSE42) + fs.setIf((c&(1<<9)) != 0, SSSE3) + fs.setIf((c&(1<<19)) != 0, SSE4) + fs.setIf((c&(1<<20)) != 0, SSE42) fs.setIf((c&(1<<25)) != 0, AESNI) fs.setIf((c&(1<<1)) != 0, CLMUL) fs.setIf(c&(1<<22) != 0, MOVBE) @@ -1068,21 +1165,36 @@ func support() flagSet { fs.setIf(ecx&(1<<30) != 0, SGXLC) // CPUID.(EAX=7, ECX=0).EDX + fs.setIf(edx&(1<<4) != 0, FSRM) + fs.setIf(edx&(1<<9) != 0, SRBDS_CTRL) + fs.setIf(edx&(1<<10) != 0, MD_CLEAR) fs.setIf(edx&(1<<11) != 0, RTM_ALWAYS_ABORT) fs.setIf(edx&(1<<14) != 0, SERIALIZE) + fs.setIf(edx&(1<<15) != 0, HYBRID_CPU) fs.setIf(edx&(1<<16) != 0, TSXLDTRK) fs.setIf(edx&(1<<18) != 0, PCONFIG) fs.setIf(edx&(1<<20) != 0, CETIBT) fs.setIf(edx&(1<<26) != 0, IBPB) fs.setIf(edx&(1<<27) != 0, STIBP) + fs.setIf(edx&(1<<28) != 0, FLUSH_L1D) + fs.setIf(edx&(1<<29) != 0, IA32_ARCH_CAP) + fs.setIf(edx&(1<<30) != 0, IA32_CORE_CAP) + fs.setIf(edx&(1<<31) != 0, SPEC_CTRL_SSBD) + + // CPUID.(EAX=7, ECX=1).EDX + fs.setIf(edx&(1<<4) != 0, AVXVNNIINT8) + fs.setIf(edx&(1<<5) != 0, AVXNECONVERT) + fs.setIf(edx&(1<<14) != 0, PREFETCHI) - // CPUID.(EAX=7, ECX=1) + // CPUID.(EAX=7, ECX=1).EAX eax1, _, _, _ := cpuidex(7, 1) fs.setIf(fs.inSet(AVX) && eax1&(1<<4) != 0, AVXVNNI) + fs.setIf(eax1&(1<<7) != 0, CMPCCXADD) fs.setIf(eax1&(1<<10) != 0, MOVSB_ZL) fs.setIf(eax1&(1<<11) != 0, STOSB_SHORT) fs.setIf(eax1&(1<<12) != 0, CMPSB_SCADBS_SHORT) fs.setIf(eax1&(1<<22) != 0, HRESET) + fs.setIf(eax1&(1<<23) != 0, AVXIFMA) fs.setIf(eax1&(1<<26) != 0, LAM) // Only detect AVX-512 features if XGETBV is supported @@ -1120,9 +1232,15 @@ func support() flagSet { fs.setIf(edx&(1<<25) != 0, AMXINT8) // eax1 = CPUID.(EAX=7, ECX=1).EAX fs.setIf(eax1&(1<<5) != 0, AVX512BF16) + fs.setIf(eax1&(1<<21) != 0, AMXFP16) } } + + // CPUID.(EAX=7, ECX=2) + _, _, _, edx = cpuidex(7, 2) + fs.setIf(edx&(1<<5) != 0, MCDT_NO) } + // Processor Extended State Enumeration Sub-leaf (EAX = 0DH, ECX = 1) // EAX // Bit 00: XSAVEOPT is available. @@ -1156,20 +1274,24 @@ func support() flagSet { fs.setIf((c&(1<<2)) != 0, SVM) fs.setIf((c&(1<<6)) != 0, SSE4A) fs.setIf((c&(1<<10)) != 0, IBS) + fs.setIf((c&(1<<22)) != 0, TOPEXT) // EDX - fs.setIf((d&(1<<31)) != 0, AMD3DNOW) - fs.setIf((d&(1<<30)) != 0, AMD3DNOWEXT) - fs.setIf((d&(1<<23)) != 0, MMX) - fs.setIf((d&(1<<22)) != 0, MMXEXT) + fs.setIf(d&(1<<11) != 0, SYSCALL) fs.setIf(d&(1<<20) != 0, NX) + fs.setIf(d&(1<<22) != 0, MMXEXT) + fs.setIf(d&(1<<23) != 0, MMX) + fs.setIf(d&(1<<24) != 0, FXSR) + fs.setIf(d&(1<<25) != 0, FXSROPT) fs.setIf(d&(1<<27) != 0, RDTSCP) + fs.setIf(d&(1<<30) != 0, AMD3DNOWEXT) + fs.setIf(d&(1<<31) != 0, AMD3DNOW) /* XOP and FMA4 use the AVX instruction coding scheme, so they can't be * used unless the OS has AVX support. */ if fs.inSet(AVX) { - fs.setIf((c&0x00000800) != 0, XOP) - fs.setIf((c&0x00010000) != 0, FMA4) + fs.setIf((c&(1<<11)) != 0, XOP) + fs.setIf((c&(1<<16)) != 0, FMA4) } } @@ -1183,9 +1305,21 @@ func support() flagSet { if maxExtendedFunction() >= 0x80000008 { _, b, _, _ := cpuid(0x80000008) + fs.setIf(b&(1<<28) != 0, PSFD) + fs.setIf(b&(1<<27) != 0, CPPC) + fs.setIf(b&(1<<24) != 0, SPEC_CTRL_SSBD) + fs.setIf(b&(1<<23) != 0, PPIN) + fs.setIf(b&(1<<21) != 0, TLB_FLUSH_NESTED) + fs.setIf(b&(1<<20) != 0, EFER_LMSLE_UNS) + fs.setIf(b&(1<<19) != 0, IBRS_PROVIDES_SMP) + fs.setIf(b&(1<<18) != 0, IBRS_PREFERRED) + fs.setIf(b&(1<<17) != 0, STIBP_ALWAYSON) + fs.setIf(b&(1<<15) != 0, STIBP) + fs.setIf(b&(1<<14) != 0, IBRS) + fs.setIf((b&(1<<13)) != 0, INT_WBINVD) + fs.setIf(b&(1<<12) != 0, IBPB) fs.setIf((b&(1<<9)) != 0, WBNOINVD) fs.setIf((b&(1<<8)) != 0, MCOMMIT) - fs.setIf((b&(1<<13)) != 0, INT_WBINVD) fs.setIf((b&(1<<4)) != 0, RDPRU) fs.setIf((b&(1<<3)) != 0, INVLPGB) fs.setIf((b&(1<<1)) != 0, MSRIRC) @@ -1206,6 +1340,13 @@ func support() flagSet { fs.setIf((edx>>12)&1 == 1, SVMPFT) } + if maxExtendedFunction() >= 0x8000001a { + eax, _, _, _ := cpuid(0x8000001a) + fs.setIf((eax>>0)&1 == 1, FP128) + fs.setIf((eax>>1)&1 == 1, MOVU) + fs.setIf((eax>>2)&1 == 1, FP256) + } + if maxExtendedFunction() >= 0x8000001b && fs.inSet(IBS) { eax, _, _, _ := cpuid(0x8000001b) fs.setIf((eax>>0)&1 == 1, IBSFFV) @@ -1216,6 +1357,10 @@ func support() flagSet { fs.setIf((eax>>5)&1 == 1, IBSBRNTRGT) fs.setIf((eax>>6)&1 == 1, IBSOPCNTEXT) fs.setIf((eax>>7)&1 == 1, IBSRIPINVALIDCHK) + fs.setIf((eax>>8)&1 == 1, IBS_OPFUSE) + fs.setIf((eax>>9)&1 == 1, IBS_FETCH_CTLX) + fs.setIf((eax>>10)&1 == 1, IBS_OPDATA4) // Doc says "Fixed,0. IBS op data 4 MSR supported", but assuming they mean 1. + fs.setIf((eax>>11)&1 == 1, IBS_ZEN4) } if maxExtendedFunction() >= 0x8000001f && vend == AMD { diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_x86.go b/vendor/github.com/klauspost/cpuid/v2/detect_x86.go index 35678d8a3..c946824ec 100644 --- a/vendor/github.com/klauspost/cpuid/v2/detect_x86.go +++ b/vendor/github.com/klauspost/cpuid/v2/detect_x86.go @@ -24,7 +24,7 @@ func addInfo(c *CPUInfo, safe bool) { c.maxExFunc = maxExtendedFunction() c.BrandName = brandName() c.CacheLine = cacheLine() - c.Family, c.Model = familyModel() + c.Family, c.Model, c.Stepping = familyModel() c.featureSet = support() c.SGX = hasSGX(c.featureSet.inSet(SGX), c.featureSet.inSet(SGXLC)) c.ThreadsPerCore = threadsPerCore() diff --git a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go index a9b3e36c7..8b6cd2b72 100644 --- a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go +++ b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go @@ -13,174 +13,207 @@ func _() { _ = x[AMD3DNOW-3] _ = x[AMD3DNOWEXT-4] _ = x[AMXBF16-5] - _ = x[AMXINT8-6] - _ = x[AMXTILE-7] - _ = x[AVX-8] - _ = x[AVX2-9] - _ = x[AVX512BF16-10] - _ = x[AVX512BITALG-11] - _ = x[AVX512BW-12] - _ = x[AVX512CD-13] - _ = x[AVX512DQ-14] - _ = x[AVX512ER-15] - _ = x[AVX512F-16] - _ = x[AVX512FP16-17] - _ = x[AVX512IFMA-18] - _ = x[AVX512PF-19] - _ = x[AVX512VBMI-20] - _ = x[AVX512VBMI2-21] - _ = x[AVX512VL-22] - _ = x[AVX512VNNI-23] - _ = x[AVX512VP2INTERSECT-24] - _ = x[AVX512VPOPCNTDQ-25] - _ = x[AVXSLOW-26] - _ = x[AVXVNNI-27] - _ = x[BMI1-28] - _ = x[BMI2-29] - _ = x[CETIBT-30] - _ = x[CETSS-31] - _ = x[CLDEMOTE-32] - _ = x[CLMUL-33] - _ = x[CLZERO-34] - _ = x[CMOV-35] - _ = x[CMPSB_SCADBS_SHORT-36] - _ = x[CMPXCHG8-37] - _ = x[CPBOOST-38] - _ = x[CX16-39] - _ = x[ENQCMD-40] - _ = x[ERMS-41] - _ = x[F16C-42] - _ = x[FMA3-43] - _ = x[FMA4-44] - _ = x[FXSR-45] - _ = x[FXSROPT-46] - _ = x[GFNI-47] - _ = x[HLE-48] - _ = x[HRESET-49] - _ = x[HTT-50] - _ = x[HWA-51] - _ = x[HYPERVISOR-52] - _ = x[IBPB-53] - _ = x[IBS-54] - _ = x[IBSBRNTRGT-55] - _ = x[IBSFETCHSAM-56] - _ = x[IBSFFV-57] - _ = x[IBSOPCNT-58] - _ = x[IBSOPCNTEXT-59] - _ = x[IBSOPSAM-60] - _ = x[IBSRDWROPCNT-61] - _ = x[IBSRIPINVALIDCHK-62] - _ = x[IBS_PREVENTHOST-63] - _ = x[INT_WBINVD-64] - _ = x[INVLPGB-65] - _ = x[LAHF-66] - _ = x[LAM-67] - _ = x[LBRVIRT-68] - _ = x[LZCNT-69] - _ = x[MCAOVERFLOW-70] - _ = x[MCOMMIT-71] - _ = x[MMX-72] - _ = x[MMXEXT-73] - _ = x[MOVBE-74] - _ = x[MOVDIR64B-75] - _ = x[MOVDIRI-76] - _ = x[MOVSB_ZL-77] - _ = x[MPX-78] - _ = x[MSRIRC-79] - _ = x[MSR_PAGEFLUSH-80] - _ = x[NRIPS-81] - _ = x[NX-82] - _ = x[OSXSAVE-83] - _ = x[PCONFIG-84] - _ = x[POPCNT-85] - _ = x[RDPRU-86] - _ = x[RDRAND-87] - _ = x[RDSEED-88] - _ = x[RDTSCP-89] - _ = x[RTM-90] - _ = x[RTM_ALWAYS_ABORT-91] - _ = x[SCE-92] - _ = x[SERIALIZE-93] - _ = x[SEV-94] - _ = x[SEV_64BIT-95] - _ = x[SEV_ALTERNATIVE-96] - _ = x[SEV_DEBUGSWAP-97] - _ = x[SEV_ES-98] - _ = x[SEV_RESTRICTED-99] - _ = x[SEV_SNP-100] - _ = x[SGX-101] - _ = x[SGXLC-102] - _ = x[SHA-103] - _ = x[SME-104] - _ = x[SME_COHERENT-105] - _ = x[SSE-106] - _ = x[SSE2-107] - _ = x[SSE3-108] - _ = x[SSE4-109] - _ = x[SSE42-110] - _ = x[SSE4A-111] - _ = x[SSSE3-112] - _ = x[STIBP-113] - _ = x[STOSB_SHORT-114] - _ = x[SUCCOR-115] - _ = x[SVM-116] - _ = x[SVMDA-117] - _ = x[SVMFBASID-118] - _ = x[SVML-119] - _ = x[SVMNP-120] - _ = x[SVMPF-121] - _ = x[SVMPFT-122] - _ = x[TBM-123] - _ = x[TME-124] - _ = x[TSCRATEMSR-125] - _ = x[TSXLDTRK-126] - _ = x[VAES-127] - _ = x[VMCBCLEAN-128] - _ = x[VMPL-129] - _ = x[VMSA_REGPROT-130] - _ = x[VMX-131] - _ = x[VPCLMULQDQ-132] - _ = x[VTE-133] - _ = x[WAITPKG-134] - _ = x[WBNOINVD-135] - _ = x[X87-136] - _ = x[XGETBV1-137] - _ = x[XOP-138] - _ = x[XSAVE-139] - _ = x[XSAVEC-140] - _ = x[XSAVEOPT-141] - _ = x[XSAVES-142] - _ = x[AESARM-143] - _ = x[ARMCPUID-144] - _ = x[ASIMD-145] - _ = x[ASIMDDP-146] - _ = x[ASIMDHP-147] - _ = x[ASIMDRDM-148] - _ = x[ATOMICS-149] - _ = x[CRC32-150] - _ = x[DCPOP-151] - _ = x[EVTSTRM-152] - _ = x[FCMA-153] - _ = x[FP-154] - _ = x[FPHP-155] - _ = x[GPA-156] - _ = x[JSCVT-157] - _ = x[LRCPC-158] - _ = x[PMULL-159] - _ = x[SHA1-160] - _ = x[SHA2-161] - _ = x[SHA3-162] - _ = x[SHA512-163] - _ = x[SM3-164] - _ = x[SM4-165] - _ = x[SVE-166] - _ = x[lastID-167] + _ = x[AMXFP16-6] + _ = x[AMXINT8-7] + _ = x[AMXTILE-8] + _ = x[AVX-9] + _ = x[AVX2-10] + _ = x[AVX512BF16-11] + _ = x[AVX512BITALG-12] + _ = x[AVX512BW-13] + _ = x[AVX512CD-14] + _ = x[AVX512DQ-15] + _ = x[AVX512ER-16] + _ = x[AVX512F-17] + _ = x[AVX512FP16-18] + _ = x[AVX512IFMA-19] + _ = x[AVX512PF-20] + _ = x[AVX512VBMI-21] + _ = x[AVX512VBMI2-22] + _ = x[AVX512VL-23] + _ = x[AVX512VNNI-24] + _ = x[AVX512VP2INTERSECT-25] + _ = x[AVX512VPOPCNTDQ-26] + _ = x[AVXIFMA-27] + _ = x[AVXNECONVERT-28] + _ = x[AVXSLOW-29] + _ = x[AVXVNNI-30] + _ = x[AVXVNNIINT8-31] + _ = x[BMI1-32] + _ = x[BMI2-33] + _ = x[CETIBT-34] + _ = x[CETSS-35] + _ = x[CLDEMOTE-36] + _ = x[CLMUL-37] + _ = x[CLZERO-38] + _ = x[CMOV-39] + _ = x[CMPCCXADD-40] + _ = x[CMPSB_SCADBS_SHORT-41] + _ = x[CMPXCHG8-42] + _ = x[CPBOOST-43] + _ = x[CPPC-44] + _ = x[CX16-45] + _ = x[EFER_LMSLE_UNS-46] + _ = x[ENQCMD-47] + _ = x[ERMS-48] + _ = x[F16C-49] + _ = x[FLUSH_L1D-50] + _ = x[FMA3-51] + _ = x[FMA4-52] + _ = x[FP128-53] + _ = x[FP256-54] + _ = x[FSRM-55] + _ = x[FXSR-56] + _ = x[FXSROPT-57] + _ = x[GFNI-58] + _ = x[HLE-59] + _ = x[HRESET-60] + _ = x[HTT-61] + _ = x[HWA-62] + _ = x[HYBRID_CPU-63] + _ = x[HYPERVISOR-64] + _ = x[IA32_ARCH_CAP-65] + _ = x[IA32_CORE_CAP-66] + _ = x[IBPB-67] + _ = x[IBRS-68] + _ = x[IBRS_PREFERRED-69] + _ = x[IBRS_PROVIDES_SMP-70] + _ = x[IBS-71] + _ = x[IBSBRNTRGT-72] + _ = x[IBSFETCHSAM-73] + _ = x[IBSFFV-74] + _ = x[IBSOPCNT-75] + _ = x[IBSOPCNTEXT-76] + _ = x[IBSOPSAM-77] + _ = x[IBSRDWROPCNT-78] + _ = x[IBSRIPINVALIDCHK-79] + _ = x[IBS_FETCH_CTLX-80] + _ = x[IBS_OPDATA4-81] + _ = x[IBS_OPFUSE-82] + _ = x[IBS_PREVENTHOST-83] + _ = x[IBS_ZEN4-84] + _ = x[INT_WBINVD-85] + _ = x[INVLPGB-86] + _ = x[LAHF-87] + _ = x[LAM-88] + _ = x[LBRVIRT-89] + _ = x[LZCNT-90] + _ = x[MCAOVERFLOW-91] + _ = x[MCDT_NO-92] + _ = x[MCOMMIT-93] + _ = x[MD_CLEAR-94] + _ = x[MMX-95] + _ = x[MMXEXT-96] + _ = x[MOVBE-97] + _ = x[MOVDIR64B-98] + _ = x[MOVDIRI-99] + _ = x[MOVSB_ZL-100] + _ = x[MOVU-101] + _ = x[MPX-102] + _ = x[MSRIRC-103] + _ = x[MSR_PAGEFLUSH-104] + _ = x[NRIPS-105] + _ = x[NX-106] + _ = x[OSXSAVE-107] + _ = x[PCONFIG-108] + _ = x[POPCNT-109] + _ = x[PPIN-110] + _ = x[PREFETCHI-111] + _ = x[PSFD-112] + _ = x[RDPRU-113] + _ = x[RDRAND-114] + _ = x[RDSEED-115] + _ = x[RDTSCP-116] + _ = x[RTM-117] + _ = x[RTM_ALWAYS_ABORT-118] + _ = x[SERIALIZE-119] + _ = x[SEV-120] + _ = x[SEV_64BIT-121] + _ = x[SEV_ALTERNATIVE-122] + _ = x[SEV_DEBUGSWAP-123] + _ = x[SEV_ES-124] + _ = x[SEV_RESTRICTED-125] + _ = x[SEV_SNP-126] + _ = x[SGX-127] + _ = x[SGXLC-128] + _ = x[SHA-129] + _ = x[SME-130] + _ = x[SME_COHERENT-131] + _ = x[SPEC_CTRL_SSBD-132] + _ = x[SRBDS_CTRL-133] + _ = x[SSE-134] + _ = x[SSE2-135] + _ = x[SSE3-136] + _ = x[SSE4-137] + _ = x[SSE42-138] + _ = x[SSE4A-139] + _ = x[SSSE3-140] + _ = x[STIBP-141] + _ = x[STIBP_ALWAYSON-142] + _ = x[STOSB_SHORT-143] + _ = x[SUCCOR-144] + _ = x[SVM-145] + _ = x[SVMDA-146] + _ = x[SVMFBASID-147] + _ = x[SVML-148] + _ = x[SVMNP-149] + _ = x[SVMPF-150] + _ = x[SVMPFT-151] + _ = x[SYSCALL-152] + _ = x[SYSEE-153] + _ = x[TBM-154] + _ = x[TLB_FLUSH_NESTED-155] + _ = x[TME-156] + _ = x[TOPEXT-157] + _ = x[TSCRATEMSR-158] + _ = x[TSXLDTRK-159] + _ = x[VAES-160] + _ = x[VMCBCLEAN-161] + _ = x[VMPL-162] + _ = x[VMSA_REGPROT-163] + _ = x[VMX-164] + _ = x[VPCLMULQDQ-165] + _ = x[VTE-166] + _ = x[WAITPKG-167] + _ = x[WBNOINVD-168] + _ = x[X87-169] + _ = x[XGETBV1-170] + _ = x[XOP-171] + _ = x[XSAVE-172] + _ = x[XSAVEC-173] + _ = x[XSAVEOPT-174] + _ = x[XSAVES-175] + _ = x[AESARM-176] + _ = x[ARMCPUID-177] + _ = x[ASIMD-178] + _ = x[ASIMDDP-179] + _ = x[ASIMDHP-180] + _ = x[ASIMDRDM-181] + _ = x[ATOMICS-182] + _ = x[CRC32-183] + _ = x[DCPOP-184] + _ = x[EVTSTRM-185] + _ = x[FCMA-186] + _ = x[FP-187] + _ = x[FPHP-188] + _ = x[GPA-189] + _ = x[JSCVT-190] + _ = x[LRCPC-191] + _ = x[PMULL-192] + _ = x[SHA1-193] + _ = x[SHA2-194] + _ = x[SHA3-195] + _ = x[SHA512-196] + _ = x[SM3-197] + _ = x[SM4-198] + _ = x[SVE-199] + _ = x[lastID-200] _ = x[firstID-0] } -const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXINT8AMXTILEAVXAVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXSLOWAVXVNNIBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCX16ENQCMDERMSF16CFMA3FMA4FXSRFXSROPTGFNIHLEHRESETHTTHWAHYPERVISORIBPBIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_PREVENTHOSTINT_WBINVDINVLPGBLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCOMMITMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMPXMSRIRCMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTRDPRURDRANDRDSEEDRDTSCPRTMRTM_ALWAYS_ABORTSCESERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTTBMTMETSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID" +const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXTILEAVXAVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8BMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4INT_WBINVDINVLPGBLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRTMRTM_ALWAYS_ABORTSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID" -var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 58, 62, 72, 84, 92, 100, 108, 116, 123, 133, 143, 151, 161, 172, 180, 190, 208, 223, 230, 237, 241, 245, 251, 256, 264, 269, 275, 279, 297, 305, 312, 316, 322, 326, 330, 334, 338, 342, 349, 353, 356, 362, 365, 368, 378, 382, 385, 395, 406, 412, 420, 431, 439, 451, 467, 482, 492, 499, 503, 506, 513, 518, 529, 536, 539, 545, 550, 559, 566, 574, 577, 583, 596, 601, 603, 610, 617, 623, 628, 634, 640, 646, 649, 665, 668, 677, 680, 689, 704, 717, 723, 737, 744, 747, 752, 755, 758, 770, 773, 777, 781, 785, 790, 795, 800, 805, 816, 822, 825, 830, 839, 843, 848, 853, 859, 862, 865, 875, 883, 887, 896, 900, 912, 915, 925, 928, 935, 943, 946, 953, 956, 961, 967, 975, 981, 987, 995, 1000, 1007, 1014, 1022, 1029, 1034, 1039, 1046, 1050, 1052, 1056, 1059, 1064, 1069, 1074, 1078, 1082, 1086, 1092, 1095, 1098, 1101, 1107} +var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 62, 65, 69, 79, 91, 99, 107, 115, 123, 130, 140, 150, 158, 168, 179, 187, 197, 215, 230, 237, 249, 256, 263, 274, 278, 282, 288, 293, 301, 306, 312, 316, 325, 343, 351, 358, 362, 366, 380, 386, 390, 394, 403, 407, 411, 416, 421, 425, 429, 436, 440, 443, 449, 452, 455, 465, 475, 488, 501, 505, 509, 523, 540, 543, 553, 564, 570, 578, 589, 597, 609, 625, 639, 650, 660, 675, 683, 693, 700, 704, 707, 714, 719, 730, 737, 744, 752, 755, 761, 766, 775, 782, 790, 794, 797, 803, 816, 821, 823, 830, 837, 843, 847, 856, 860, 865, 871, 877, 883, 886, 902, 911, 914, 923, 938, 951, 957, 971, 978, 981, 986, 989, 992, 1004, 1018, 1028, 1031, 1035, 1039, 1043, 1048, 1053, 1058, 1063, 1077, 1088, 1094, 1097, 1102, 1111, 1115, 1120, 1125, 1131, 1138, 1143, 1146, 1162, 1165, 1171, 1181, 1189, 1193, 1202, 1206, 1218, 1221, 1231, 1234, 1241, 1249, 1252, 1259, 1262, 1267, 1273, 1281, 1287, 1293, 1301, 1306, 1313, 1320, 1328, 1335, 1340, 1345, 1352, 1356, 1358, 1362, 1365, 1370, 1375, 1380, 1384, 1388, 1392, 1398, 1401, 1404, 1407, 1413} func (i FeatureID) String() string { if i < 0 || i >= FeatureID(len(_FeatureID_index)-1) { diff --git a/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go index d91d02109..84b1acd21 100644 --- a/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go +++ b/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go @@ -83,7 +83,7 @@ func tryToFillCPUInfoFomSysctl(c *CPUInfo) { c.Model = sysctlGetInt(0, "machdep.cpu.model") c.CacheLine = sysctlGetInt64(0, "hw.cachelinesize") c.Cache.L1I = sysctlGetInt64(-1, "hw.l1icachesize") - c.Cache.L1D = sysctlGetInt64(-1, "hw.l1icachesize") + c.Cache.L1D = sysctlGetInt64(-1, "hw.l1dcachesize") c.Cache.L2 = sysctlGetInt64(-1, "hw.l2cachesize") c.Cache.L3 = sysctlGetInt64(-1, "hw.l3cachesize") diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go index 39bbcf00f..d569c0c94 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_bsd.go +++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go @@ -1,5 +1,5 @@ -//go:build (darwin || freebsd || openbsd || netbsd || dragonfly) && !appengine -// +build darwin freebsd openbsd netbsd dragonfly +//go:build (darwin || freebsd || openbsd || netbsd || dragonfly || hurd) && !appengine +// +build darwin freebsd openbsd netbsd dragonfly hurd // +build !appengine package isatty diff --git a/vendor/github.com/miekg/dns/LICENSE b/vendor/github.com/miekg/dns/LICENSE index 55f12ab77..852ab9ced 100644 --- a/vendor/github.com/miekg/dns/LICENSE +++ b/vendor/github.com/miekg/dns/LICENSE @@ -1,30 +1,29 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +BSD 3-Clause License + +Copyright (c) 2009, The Go Authors. Extensions copyright (c) 2011, Miek Gieben. +All rights reserved. Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: +modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. -As this is fork of the official Go code the same license applies. -Extensions of the original work are copyright (c) 2011 Miek Gieben +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/miekg/dns/README.md b/vendor/github.com/miekg/dns/README.md index 5a799d88f..ec9540392 100644 --- a/vendor/github.com/miekg/dns/README.md +++ b/vendor/github.com/miekg/dns/README.md @@ -77,6 +77,10 @@ A not-so-up-to-date-list-that-may-be-actually-current: * https://ping.sx/dig * https://fleetdeck.io/ * https://github.com/markdingo/autoreverse +* https://github.com/slackhq/nebula +* https://github.com/dnschecktool/dow-proxy +* https://dnscheck.tools/ +* https://github.com/egbakou/domainverifier Send pull request if you want to be listed here. @@ -140,6 +144,7 @@ Example programs can be found in the `github.com/miekg/exdns` repository. * 340{1,2,3} - NAPTR record * 3445 - Limiting the scope of (DNS)KEY * 3597 - Unknown RRs +* 4025 - A Method for Storing IPsec Keying Material in DNS * 403{3,4,5} - DNSSEC + validation functions * 4255 - SSHFP record * 4343 - Case insensitivity @@ -175,6 +180,7 @@ Example programs can be found in the `github.com/miekg/exdns` repository. * 8080 - EdDSA for DNSSEC * 8499 - DNS Terminology * 8659 - DNS Certification Authority Authorization (CAA) Resource Record +* 8777 - DNS Reverse IP Automatic Multicast Tunneling (AMT) Discovery * 8914 - Extended DNS Errors * 8976 - Message Digest for DNS Zones (ZONEMD RR) diff --git a/vendor/github.com/miekg/dns/acceptfunc.go b/vendor/github.com/miekg/dns/acceptfunc.go index ac479db95..ab2812e33 100644 --- a/vendor/github.com/miekg/dns/acceptfunc.go +++ b/vendor/github.com/miekg/dns/acceptfunc.go @@ -19,7 +19,6 @@ type MsgAcceptFunc func(dh Header) MsgAcceptAction // * has more than 0 RRs in the Authority section // // * has more than 2 RRs in the Additional section -// var DefaultMsgAcceptFunc MsgAcceptFunc = defaultMsgAcceptFunc // MsgAcceptAction represents the action to be taken. diff --git a/vendor/github.com/miekg/dns/client.go b/vendor/github.com/miekg/dns/client.go index 9aa658530..f694ea589 100644 --- a/vendor/github.com/miekg/dns/client.go +++ b/vendor/github.com/miekg/dns/client.go @@ -185,7 +185,7 @@ func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, er // that entails when using "tcp" and especially "tcp-tls" clients. // // When the singleflight is set for this client the context is _not_ forwarded to the (shared) exchange, to -// prevent one cancelation from canceling all outstanding requests. +// prevent one cancellation from canceling all outstanding requests. func (c *Client) ExchangeWithConn(m *Msg, conn *Conn) (r *Msg, rtt time.Duration, err error) { return c.exchangeWithConnContext(context.Background(), m, conn) } @@ -198,7 +198,7 @@ func (c *Client) exchangeWithConnContext(ctx context.Context, m *Msg, conn *Conn q := m.Question[0] key := fmt.Sprintf("%s:%d:%d", q.Name, q.Qtype, q.Qclass) r, rtt, err, shared := c.group.Do(key, func() (*Msg, time.Duration, error) { - // When we're doing singleflight we don't want one context cancelation, cancel _all_ outstanding queries. + // When we're doing singleflight we don't want one context cancellation, cancel _all_ outstanding queries. // Hence we ignore the context and use Background(). return c.exchangeContext(context.Background(), m, conn) }) @@ -431,7 +431,6 @@ func ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, err error) // co.WriteMsg(m) // in, _ := co.ReadMsg() // co.Close() -// func ExchangeConn(c net.Conn, m *Msg) (r *Msg, err error) { println("dns: ExchangeConn: this function is deprecated") co := new(Conn) diff --git a/vendor/github.com/miekg/dns/doc.go b/vendor/github.com/miekg/dns/doc.go index f00f5722c..586ab6917 100644 --- a/vendor/github.com/miekg/dns/doc.go +++ b/vendor/github.com/miekg/dns/doc.go @@ -13,28 +13,28 @@ names in a message will result in a packing failure. Resource records are native types. They are not stored in wire format. Basic usage pattern for creating a new resource record: - r := new(dns.MX) - r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX, Class: dns.ClassINET, Ttl: 3600} - r.Preference = 10 - r.Mx = "mx.miek.nl." + r := new(dns.MX) + r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX, Class: dns.ClassINET, Ttl: 3600} + r.Preference = 10 + r.Mx = "mx.miek.nl." Or directly from a string: - mx, err := dns.NewRR("miek.nl. 3600 IN MX 10 mx.miek.nl.") + mx, err := dns.NewRR("miek.nl. 3600 IN MX 10 mx.miek.nl.") Or when the default origin (.) and TTL (3600) and class (IN) suit you: - mx, err := dns.NewRR("miek.nl MX 10 mx.miek.nl") + mx, err := dns.NewRR("miek.nl MX 10 mx.miek.nl") Or even: - mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek") + mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek") In the DNS messages are exchanged, these messages contain resource records (sets). Use pattern for creating a message: - m := new(dns.Msg) - m.SetQuestion("miek.nl.", dns.TypeMX) + m := new(dns.Msg) + m.SetQuestion("miek.nl.", dns.TypeMX) Or when not certain if the domain name is fully qualified: @@ -45,17 +45,17 @@ records for the miek.nl. zone. The following is slightly more verbose, but more flexible: - m1 := new(dns.Msg) - m1.Id = dns.Id() - m1.RecursionDesired = true - m1.Question = make([]dns.Question, 1) - m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET} + m1 := new(dns.Msg) + m1.Id = dns.Id() + m1.RecursionDesired = true + m1.Question = make([]dns.Question, 1) + m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET} After creating a message it can be sent. Basic use pattern for synchronous querying the DNS at a server configured on 127.0.0.1 and port 53: - c := new(dns.Client) - in, rtt, err := c.Exchange(m1, "127.0.0.1:53") + c := new(dns.Client) + in, rtt, err := c.Exchange(m1, "127.0.0.1:53") Suppressing multiple outstanding queries (with the same question, type and class) is as easy as setting: @@ -72,7 +72,7 @@ and port to use for the connection: Port: 12345, Zone: "", } - c.Dialer := &net.Dialer{ + c.Dialer = &net.Dialer{ Timeout: 200 * time.Millisecond, LocalAddr: &laddr, } @@ -96,7 +96,7 @@ the Answer section: // do something with t.Txt } -Domain Name and TXT Character String Representations +# Domain Name and TXT Character String Representations Both domain names and TXT character strings are converted to presentation form both when unpacked and when converted to strings. @@ -108,7 +108,7 @@ be escaped. Bytes below 32 and above 127 will be converted to \DDD form. For domain names, in addition to the above rules brackets, periods, spaces, semicolons and the at symbol are escaped. -DNSSEC +# DNSSEC DNSSEC (DNS Security Extension) adds a layer of security to the DNS. It uses public key cryptography to sign resource records. The public keys are stored in @@ -117,12 +117,12 @@ DNSKEY records and the signatures in RRSIG records. Requesting DNSSEC information for a zone is done by adding the DO (DNSSEC OK) bit to a request. - m := new(dns.Msg) - m.SetEdns0(4096, true) + m := new(dns.Msg) + m.SetEdns0(4096, true) Signature generation, signature verification and key generation are all supported. -DYNAMIC UPDATES +# DYNAMIC UPDATES Dynamic updates reuses the DNS message format, but renames three of the sections. Question is Zone, Answer is Prerequisite, Authority is Update, only @@ -133,30 +133,30 @@ certain resource records or names in a zone to specify if resource records should be added or removed. The table from RFC 2136 supplemented with the Go DNS function shows which functions exist to specify the prerequisites. - 3.2.4 - Table Of Metavalues Used In Prerequisite Section + 3.2.4 - Table Of Metavalues Used In Prerequisite Section - CLASS TYPE RDATA Meaning Function - -------------------------------------------------------------- - ANY ANY empty Name is in use dns.NameUsed - ANY rrset empty RRset exists (value indep) dns.RRsetUsed - NONE ANY empty Name is not in use dns.NameNotUsed - NONE rrset empty RRset does not exist dns.RRsetNotUsed - zone rrset rr RRset exists (value dep) dns.Used + CLASS TYPE RDATA Meaning Function + -------------------------------------------------------------- + ANY ANY empty Name is in use dns.NameUsed + ANY rrset empty RRset exists (value indep) dns.RRsetUsed + NONE ANY empty Name is not in use dns.NameNotUsed + NONE rrset empty RRset does not exist dns.RRsetNotUsed + zone rrset rr RRset exists (value dep) dns.Used The prerequisite section can also be left empty. If you have decided on the prerequisites you can tell what RRs should be added or deleted. The next table shows the options you have and what functions to call. - 3.4.2.6 - Table Of Metavalues Used In Update Section + 3.4.2.6 - Table Of Metavalues Used In Update Section - CLASS TYPE RDATA Meaning Function - --------------------------------------------------------------- - ANY ANY empty Delete all RRsets from name dns.RemoveName - ANY rrset empty Delete an RRset dns.RemoveRRset - NONE rrset rr Delete an RR from RRset dns.Remove - zone rrset rr Add to an RRset dns.Insert + CLASS TYPE RDATA Meaning Function + --------------------------------------------------------------- + ANY ANY empty Delete all RRsets from name dns.RemoveName + ANY rrset empty Delete an RRset dns.RemoveRRset + NONE rrset rr Delete an RR from RRset dns.Remove + zone rrset rr Add to an RRset dns.Insert -TRANSACTION SIGNATURE +# TRANSACTION SIGNATURE An TSIG or transaction signature adds a HMAC TSIG record to each message sent. The supported algorithms include: HmacSHA1, HmacSHA256 and HmacSHA512. @@ -239,7 +239,7 @@ Basic use pattern validating and replying to a message that has TSIG set. w.WriteMsg(m) } -PRIVATE RRS +# PRIVATE RRS RFC 6895 sets aside a range of type codes for private use. This range is 65,280 - 65,534 (0xFF00 - 0xFFFE). When experimenting with new Resource Records these @@ -248,7 +248,7 @@ can be used, before requesting an official type code from IANA. See https://miek.nl/2014/september/21/idn-and-private-rr-in-go-dns/ for more information. -EDNS0 +# EDNS0 EDNS0 is an extension mechanism for the DNS defined in RFC 2671 and updated by RFC 6891. It defines a new RR type, the OPT RR, which is then completely @@ -279,9 +279,9 @@ SIG(0) From RFC 2931: - SIG(0) provides protection for DNS transactions and requests .... - ... protection for glue records, DNS requests, protection for message headers - on requests and responses, and protection of the overall integrity of a response. + SIG(0) provides protection for DNS transactions and requests .... + ... protection for glue records, DNS requests, protection for message headers + on requests and responses, and protection of the overall integrity of a response. It works like TSIG, except that SIG(0) uses public key cryptography, instead of the shared secret approach in TSIG. Supported algorithms: ECDSAP256SHA256, diff --git a/vendor/github.com/miekg/dns/edns.go b/vendor/github.com/miekg/dns/edns.go index 14568c2e9..1c0677c82 100644 --- a/vendor/github.com/miekg/dns/edns.go +++ b/vendor/github.com/miekg/dns/edns.go @@ -78,7 +78,10 @@ func (rr *OPT) String() string { if rr.Do() { s += "flags: do; " } else { - s += "flags: ; " + s += "flags:; " + } + if rr.Hdr.Ttl&0x7FFF != 0 { + s += fmt.Sprintf("MBZ: 0x%04x, ", rr.Hdr.Ttl&0x7FFF) } s += "udp: " + strconv.Itoa(int(rr.UDPSize())) @@ -98,6 +101,8 @@ func (rr *OPT) String() string { s += "\n; SUBNET: " + o.String() case *EDNS0_COOKIE: s += "\n; COOKIE: " + o.String() + case *EDNS0_EXPIRE: + s += "\n; EXPIRE: " + o.String() case *EDNS0_TCP_KEEPALIVE: s += "\n; KEEPALIVE: " + o.String() case *EDNS0_UL: @@ -258,7 +263,7 @@ func (e *EDNS0_NSID) copy() EDNS0 { return &EDNS0_NSID{e.Code, e.Nsid} // o.Hdr.Name = "." // o.Hdr.Rrtype = dns.TypeOPT // e := new(dns.EDNS0_SUBNET) -// e.Code = dns.EDNS0SUBNET +// e.Code = dns.EDNS0SUBNET // by default this is filled in through unpacking OPT packets (unpackDataOpt) // e.Family = 1 // 1 for IPv4 source address, 2 for IPv6 // e.SourceNetmask = 32 // 32 for IPV4, 128 for IPv6 // e.SourceScope = 0 diff --git a/vendor/github.com/miekg/dns/fuzz.go b/vendor/github.com/miekg/dns/fuzz.go index 57410acda..505ae4308 100644 --- a/vendor/github.com/miekg/dns/fuzz.go +++ b/vendor/github.com/miekg/dns/fuzz.go @@ -1,3 +1,4 @@ +//go:build fuzz // +build fuzz package dns diff --git a/vendor/github.com/miekg/dns/labels.go b/vendor/github.com/miekg/dns/labels.go index f9faacfeb..cd498d2e9 100644 --- a/vendor/github.com/miekg/dns/labels.go +++ b/vendor/github.com/miekg/dns/labels.go @@ -122,7 +122,7 @@ func Split(s string) []int { } // NextLabel returns the index of the start of the next label in the -// string s starting at offset. +// string s starting at offset. A negative offset will cause a panic. // The bool end is true when the end of the string has been reached. // Also see PrevLabel. func NextLabel(s string, offset int) (i int, end bool) { diff --git a/vendor/github.com/miekg/dns/listen_no_reuseport.go b/vendor/github.com/miekg/dns/listen_no_reuseport.go index b9201417a..65ac91021 100644 --- a/vendor/github.com/miekg/dns/listen_no_reuseport.go +++ b/vendor/github.com/miekg/dns/listen_no_reuseport.go @@ -1,3 +1,4 @@ +//go:build !go1.11 || (!aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd) // +build !go1.11 !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd package dns diff --git a/vendor/github.com/miekg/dns/listen_reuseport.go b/vendor/github.com/miekg/dns/listen_reuseport.go index fad195cfe..89e6c98bc 100644 --- a/vendor/github.com/miekg/dns/listen_reuseport.go +++ b/vendor/github.com/miekg/dns/listen_reuseport.go @@ -1,3 +1,4 @@ +//go:build go1.11 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd) // +build go1.11 // +build aix darwin dragonfly freebsd linux netbsd openbsd diff --git a/vendor/github.com/miekg/dns/msg.go b/vendor/github.com/miekg/dns/msg.go index 89ebb64ab..f4d334e4f 100644 --- a/vendor/github.com/miekg/dns/msg.go +++ b/vendor/github.com/miekg/dns/msg.go @@ -680,9 +680,9 @@ func unpackRRslice(l int, msg []byte, off int) (dst1 []RR, off1 int, err error) // Convert a MsgHdr to a string, with dig-like headers: // -//;; opcode: QUERY, status: NOERROR, id: 48404 +// ;; opcode: QUERY, status: NOERROR, id: 48404 // -//;; flags: qr aa rd ra; +// ;; flags: qr aa rd ra; func (h *MsgHdr) String() string { if h == nil { return " MsgHdr" diff --git a/vendor/github.com/miekg/dns/msg_helpers.go b/vendor/github.com/miekg/dns/msg_helpers.go index ea2035cd2..42d5cd535 100644 --- a/vendor/github.com/miekg/dns/msg_helpers.go +++ b/vendor/github.com/miekg/dns/msg_helpers.go @@ -810,3 +810,37 @@ func unpackDataAplPrefix(msg []byte, off int) (APLPrefix, int, error) { Network: ipnet, }, off, nil } + +func unpackIPSECGateway(msg []byte, off int, gatewayType uint8) (net.IP, string, int, error) { + var retAddr net.IP + var retString string + var err error + + switch gatewayType { + case IPSECGatewayNone: // do nothing + case IPSECGatewayIPv4: + retAddr, off, err = unpackDataA(msg, off) + case IPSECGatewayIPv6: + retAddr, off, err = unpackDataAAAA(msg, off) + case IPSECGatewayHost: + retString, off, err = UnpackDomainName(msg, off) + } + + return retAddr, retString, off, err +} + +func packIPSECGateway(gatewayAddr net.IP, gatewayString string, msg []byte, off int, gatewayType uint8, compression compressionMap, compress bool) (int, error) { + var err error + + switch gatewayType { + case IPSECGatewayNone: // do nothing + case IPSECGatewayIPv4: + off, err = packDataA(gatewayAddr, msg, off) + case IPSECGatewayIPv6: + off, err = packDataAAAA(gatewayAddr, msg, off) + case IPSECGatewayHost: + off, err = packDomainName(gatewayString, msg, off, compression, compress) + } + + return off, err +} diff --git a/vendor/github.com/miekg/dns/scan_rr.go b/vendor/github.com/miekg/dns/scan_rr.go index e398484da..68dbff690 100644 --- a/vendor/github.com/miekg/dns/scan_rr.go +++ b/vendor/github.com/miekg/dns/scan_rr.go @@ -3,6 +3,7 @@ package dns import ( "bytes" "encoding/base64" + "errors" "net" "strconv" "strings" @@ -1216,6 +1217,117 @@ func (rr *DS) parse(c *zlexer, o string) *ParseError { return rr.parseDS(c, func (rr *DLV) parse(c *zlexer, o string) *ParseError { return rr.parseDS(c, o, "DLV") } func (rr *CDS) parse(c *zlexer, o string) *ParseError { return rr.parseDS(c, o, "CDS") } +func (rr *IPSECKEY) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + num, err := strconv.ParseUint(l.token, 10, 8) + if err != nil || l.err { + return &ParseError{"", "bad IPSECKEY value", l} + } + rr.Precedence = uint8(num) + c.Next() // zBlank + + l, _ = c.Next() + num, err = strconv.ParseUint(l.token, 10, 8) + if err != nil || l.err { + return &ParseError{"", "bad IPSECKEY value", l} + } + rr.GatewayType = uint8(num) + c.Next() // zBlank + + l, _ = c.Next() + num, err = strconv.ParseUint(l.token, 10, 8) + if err != nil || l.err { + return &ParseError{"", "bad IPSECKEY value", l} + } + rr.Algorithm = uint8(num) + c.Next() // zBlank + + l, _ = c.Next() + if l.err { + return &ParseError{"", "bad IPSECKEY gateway", l} + } + + rr.GatewayAddr, rr.GatewayHost, err = parseAddrHostUnion(l.token, o, rr.GatewayType) + if err != nil { + return &ParseError{"", "AMTRELAY " + err.Error(), l} + } + + c.Next() // zBlank + + s, pErr := endingToString(c, "bad IPSECKEY PublicKey") + if pErr != nil { + return pErr + } + rr.PublicKey = s + return slurpRemainder(c) +} + +func (rr *AMTRELAY) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + num, err := strconv.ParseUint(l.token, 10, 8) + if err != nil || l.err { + return &ParseError{"", "bad AMTRELAY value", l} + } + rr.Precedence = uint8(num) + c.Next() // zBlank + + l, _ = c.Next() + if l.err || !(l.token == "0" || l.token == "1") { + return &ParseError{"", "bad discovery value", l} + } + if l.token == "1" { + rr.GatewayType = 0x80 + } + + c.Next() // zBlank + + l, _ = c.Next() + num, err = strconv.ParseUint(l.token, 10, 8) + if err != nil || l.err { + return &ParseError{"", "bad AMTRELAY value", l} + } + rr.GatewayType |= uint8(num) + c.Next() // zBlank + + l, _ = c.Next() + if l.err { + return &ParseError{"", "bad AMTRELAY gateway", l} + } + + rr.GatewayAddr, rr.GatewayHost, err = parseAddrHostUnion(l.token, o, rr.GatewayType&0x7f) + if err != nil { + return &ParseError{"", "AMTRELAY " + err.Error(), l} + } + + return slurpRemainder(c) +} + +// same constants and parsing between IPSECKEY and AMTRELAY +func parseAddrHostUnion(token, o string, gatewayType uint8) (addr net.IP, host string, err error) { + switch gatewayType { + case IPSECGatewayNone: + if token != "." { + return addr, host, errors.New("gateway type none with gateway set") + } + case IPSECGatewayIPv4, IPSECGatewayIPv6: + addr = net.ParseIP(token) + if addr == nil { + return addr, host, errors.New("gateway IP invalid") + } + if (addr.To4() == nil) == (gatewayType == IPSECGatewayIPv4) { + return addr, host, errors.New("gateway IP family mismatch") + } + case IPSECGatewayHost: + var ok bool + host, ok = toAbsoluteName(token, o) + if !ok { + return addr, host, errors.New("invalid gateway host") + } + } + + return addr, host, nil +} + func (rr *RKEY) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 16) diff --git a/vendor/github.com/miekg/dns/server.go b/vendor/github.com/miekg/dns/server.go index 4e5a9aa8a..508e9cb37 100644 --- a/vendor/github.com/miekg/dns/server.go +++ b/vendor/github.com/miekg/dns/server.go @@ -18,7 +18,7 @@ import ( const maxTCPQueries = 128 // aLongTimeAgo is a non-zero time, far in the past, used for -// immediate cancelation of network operations. +// immediate cancellation of network operations. var aLongTimeAgo = time.Unix(1, 0) // Handler is implemented by any value that implements ServeDNS. diff --git a/vendor/github.com/miekg/dns/svcb.go b/vendor/github.com/miekg/dns/svcb.go index ea58710da..e17132369 100644 --- a/vendor/github.com/miekg/dns/svcb.go +++ b/vendor/github.com/miekg/dns/svcb.go @@ -353,7 +353,7 @@ func (*SVCBAlpn) Key() SVCBKey { return SVCB_ALPN } func (s *SVCBAlpn) String() string { // An ALPN value is a comma-separated list of values, each of which can be // an arbitrary binary value. In order to allow parsing, the comma and - // backslash characters are themselves excaped. + // backslash characters are themselves escaped. // // However, this escaping is done in addition to the normal escaping which // happens in zone files, meaning that these values must be @@ -563,15 +563,15 @@ func (s *SVCBPort) parse(b string) error { // to the hinted IP address may be terminated and a new connection may be opened. // Basic use pattern for creating an ipv4hint option: // -// h := new(dns.HTTPS) -// h.Hdr = dns.RR_Header{Name: ".", Rrtype: dns.TypeHTTPS, Class: dns.ClassINET} -// e := new(dns.SVCBIPv4Hint) -// e.Hint = []net.IP{net.IPv4(1,1,1,1).To4()} +// h := new(dns.HTTPS) +// h.Hdr = dns.RR_Header{Name: ".", Rrtype: dns.TypeHTTPS, Class: dns.ClassINET} +// e := new(dns.SVCBIPv4Hint) +// e.Hint = []net.IP{net.IPv4(1,1,1,1).To4()} // -// Or +// Or // -// e.Hint = []net.IP{net.ParseIP("1.1.1.1").To4()} -// h.Value = append(h.Value, e) +// e.Hint = []net.IP{net.ParseIP("1.1.1.1").To4()} +// h.Value = append(h.Value, e) type SVCBIPv4Hint struct { Hint []net.IP } diff --git a/vendor/github.com/miekg/dns/tools.go b/vendor/github.com/miekg/dns/tools.go index d11182536..ccf8f6bfc 100644 --- a/vendor/github.com/miekg/dns/tools.go +++ b/vendor/github.com/miekg/dns/tools.go @@ -1,3 +1,4 @@ +//go:build tools // +build tools // We include our tool dependencies for `go generate` here to ensure they're diff --git a/vendor/github.com/miekg/dns/types.go b/vendor/github.com/miekg/dns/types.go index d9becb67c..a34ab602f 100644 --- a/vendor/github.com/miekg/dns/types.go +++ b/vendor/github.com/miekg/dns/types.go @@ -65,6 +65,7 @@ const ( TypeAPL uint16 = 42 TypeDS uint16 = 43 TypeSSHFP uint16 = 44 + TypeIPSECKEY uint16 = 45 TypeRRSIG uint16 = 46 TypeNSEC uint16 = 47 TypeDNSKEY uint16 = 48 @@ -98,6 +99,7 @@ const ( TypeURI uint16 = 256 TypeCAA uint16 = 257 TypeAVC uint16 = 258 + TypeAMTRELAY uint16 = 260 TypeTKEY uint16 = 249 TypeTSIG uint16 = 250 @@ -159,6 +161,22 @@ const ( ZoneMDHashAlgSHA512 = 2 ) +// Used in IPSEC https://datatracker.ietf.org/doc/html/rfc4025#section-2.3 +const ( + IPSECGatewayNone uint8 = iota + IPSECGatewayIPv4 + IPSECGatewayIPv6 + IPSECGatewayHost +) + +// Used in AMTRELAY https://datatracker.ietf.org/doc/html/rfc8777#section-4.2.3 +const ( + AMTRELAYNone = IPSECGatewayNone + AMTRELAYIPv4 = IPSECGatewayIPv4 + AMTRELAYIPv6 = IPSECGatewayIPv6 + AMTRELAYHost = IPSECGatewayHost +) + // Header is the wire format for the DNS packet header. type Header struct { Id uint16 @@ -994,6 +1012,69 @@ func (rr *DNSKEY) String() string { " " + rr.PublicKey } +// IPSECKEY RR. See RFC 4025. +type IPSECKEY struct { + Hdr RR_Header + Precedence uint8 + GatewayType uint8 + Algorithm uint8 + GatewayAddr net.IP `dns:"-"` // packing/unpacking/parsing/etc handled together with GatewayHost + GatewayHost string `dns:"ipsechost"` + PublicKey string `dns:"base64"` +} + +func (rr *IPSECKEY) String() string { + var gateway string + switch rr.GatewayType { + case IPSECGatewayIPv4, IPSECGatewayIPv6: + gateway = rr.GatewayAddr.String() + case IPSECGatewayHost: + gateway = rr.GatewayHost + case IPSECGatewayNone: + fallthrough + default: + gateway = "." + } + + return rr.Hdr.String() + strconv.Itoa(int(rr.Precedence)) + + " " + strconv.Itoa(int(rr.GatewayType)) + + " " + strconv.Itoa(int(rr.Algorithm)) + + " " + gateway + + " " + rr.PublicKey +} + +// AMTRELAY RR. See RFC 8777. +type AMTRELAY struct { + Hdr RR_Header + Precedence uint8 + GatewayType uint8 // discovery is packed in here at bit 0x80 + GatewayAddr net.IP `dns:"-"` // packing/unpacking/parsing/etc handled together with GatewayHost + GatewayHost string `dns:"amtrelayhost"` +} + +func (rr *AMTRELAY) String() string { + var gateway string + switch rr.GatewayType & 0x7f { + case AMTRELAYIPv4, AMTRELAYIPv6: + gateway = rr.GatewayAddr.String() + case AMTRELAYHost: + gateway = rr.GatewayHost + case AMTRELAYNone: + fallthrough + default: + gateway = "." + } + boolS := "0" + if rr.GatewayType&0x80 == 0x80 { + boolS = "1" + } + + return rr.Hdr.String() + strconv.Itoa(int(rr.Precedence)) + + " " + boolS + + " " + strconv.Itoa(int(rr.GatewayType&0x7f)) + + " " + gateway +} + // RKEY RR. See https://www.iana.org/assignments/dns-parameters/RKEY/rkey-completed-template. type RKEY struct { Hdr RR_Header diff --git a/vendor/github.com/miekg/dns/udp.go b/vendor/github.com/miekg/dns/udp.go index a4826ee2f..c018ad43d 100644 --- a/vendor/github.com/miekg/dns/udp.go +++ b/vendor/github.com/miekg/dns/udp.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package dns diff --git a/vendor/github.com/miekg/dns/udp_windows.go b/vendor/github.com/miekg/dns/udp_windows.go index e7dd8ca31..8dffc4bd0 100644 --- a/vendor/github.com/miekg/dns/udp_windows.go +++ b/vendor/github.com/miekg/dns/udp_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package dns diff --git a/vendor/github.com/miekg/dns/version.go b/vendor/github.com/miekg/dns/version.go index b1a872bd5..556022216 100644 --- a/vendor/github.com/miekg/dns/version.go +++ b/vendor/github.com/miekg/dns/version.go @@ -3,7 +3,7 @@ package dns import "fmt" // Version is current version of this library. -var Version = v{1, 1, 50} +var Version = v{1, 1, 51} // v holds the version of this library. type v struct { diff --git a/vendor/github.com/miekg/dns/xfr.go b/vendor/github.com/miekg/dns/xfr.go index 1917e91c8..0a831c880 100644 --- a/vendor/github.com/miekg/dns/xfr.go +++ b/vendor/github.com/miekg/dns/xfr.go @@ -44,7 +44,6 @@ func (t *Transfer) tsigProvider() TsigProvider { // dnscon := &dns.Conn{Conn:con} // transfer = &dns.Transfer{Conn: dnscon} // channel, err := transfer.In(message, master) -// func (t *Transfer) In(q *Msg, a string) (env chan *Envelope, err error) { switch q.Question[0].Qtype { case TypeAXFR, TypeIXFR: diff --git a/vendor/github.com/miekg/dns/zduplicate.go b/vendor/github.com/miekg/dns/zduplicate.go index 9eb1dac29..450bbbc29 100644 --- a/vendor/github.com/miekg/dns/zduplicate.go +++ b/vendor/github.com/miekg/dns/zduplicate.go @@ -43,6 +43,32 @@ func (r1 *AFSDB) isDuplicate(_r2 RR) bool { return true } +func (r1 *AMTRELAY) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*AMTRELAY) + if !ok { + return false + } + _ = r2 + if r1.Precedence != r2.Precedence { + return false + } + if r1.GatewayType != r2.GatewayType { + return false + } + switch r1.GatewayType { + case IPSECGatewayIPv4, IPSECGatewayIPv6: + if !r1.GatewayAddr.Equal(r2.GatewayAddr) { + return false + } + case IPSECGatewayHost: + if !isDuplicateName(r1.GatewayHost, r2.GatewayHost) { + return false + } + } + + return true +} + func (r1 *ANY) isDuplicate(_r2 RR) bool { r2, ok := _r2.(*ANY) if !ok { @@ -423,6 +449,38 @@ func (r1 *HTTPS) isDuplicate(_r2 RR) bool { return true } +func (r1 *IPSECKEY) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*IPSECKEY) + if !ok { + return false + } + _ = r2 + if r1.Precedence != r2.Precedence { + return false + } + if r1.GatewayType != r2.GatewayType { + return false + } + if r1.Algorithm != r2.Algorithm { + return false + } + switch r1.GatewayType { + case IPSECGatewayIPv4, IPSECGatewayIPv6: + if !r1.GatewayAddr.Equal(r2.GatewayAddr) { + return false + } + case IPSECGatewayHost: + if !isDuplicateName(r1.GatewayHost, r2.GatewayHost) { + return false + } + } + + if r1.PublicKey != r2.PublicKey { + return false + } + return true +} + func (r1 *KEY) isDuplicate(_r2 RR) bool { r2, ok := _r2.(*KEY) if !ok { diff --git a/vendor/github.com/miekg/dns/zmsg.go b/vendor/github.com/miekg/dns/zmsg.go index fc0822f98..3ea0eb423 100644 --- a/vendor/github.com/miekg/dns/zmsg.go +++ b/vendor/github.com/miekg/dns/zmsg.go @@ -32,6 +32,22 @@ func (rr *AFSDB) pack(msg []byte, off int, compression compressionMap, compress return off, nil } +func (rr *AMTRELAY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint8(rr.Precedence, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.GatewayType, msg, off) + if err != nil { + return off, err + } + off, err = packIPSECGateway(rr.GatewayAddr, rr.GatewayHost, msg, off, rr.GatewayType, compression, false) + if err != nil { + return off, err + } + return off, nil +} + func (rr *ANY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { return off, nil } @@ -332,6 +348,30 @@ func (rr *HTTPS) pack(msg []byte, off int, compression compressionMap, compress return off, nil } +func (rr *IPSECKEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint8(rr.Precedence, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.GatewayType, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packIPSECGateway(rr.GatewayAddr, rr.GatewayHost, msg, off, rr.GatewayType, compression, false) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + return off, nil +} + func (rr *KEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packUint16(rr.Flags, msg, off) if err != nil { @@ -1180,6 +1220,34 @@ func (rr *AFSDB) unpack(msg []byte, off int) (off1 int, err error) { return off, nil } +func (rr *AMTRELAY) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Precedence, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.GatewayType, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + if off == len(msg) { + return off, nil + } + rr.GatewayAddr, rr.GatewayHost, off, err = unpackIPSECGateway(msg, off, rr.GatewayType) + if err != nil { + return off, err + } + return off, nil +} + func (rr *ANY) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart @@ -1636,6 +1704,48 @@ func (rr *HTTPS) unpack(msg []byte, off int) (off1 int, err error) { return off, nil } +func (rr *IPSECKEY) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Precedence, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.GatewayType, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + if off == len(msg) { + return off, nil + } + rr.GatewayAddr, rr.GatewayHost, off, err = unpackIPSECGateway(msg, off, rr.GatewayType) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return off, err + } + return off, nil +} + func (rr *KEY) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart diff --git a/vendor/github.com/miekg/dns/ztypes.go b/vendor/github.com/miekg/dns/ztypes.go index 5d060cfee..17543ea47 100644 --- a/vendor/github.com/miekg/dns/ztypes.go +++ b/vendor/github.com/miekg/dns/ztypes.go @@ -12,6 +12,7 @@ var TypeToRR = map[uint16]func() RR{ TypeA: func() RR { return new(A) }, TypeAAAA: func() RR { return new(AAAA) }, TypeAFSDB: func() RR { return new(AFSDB) }, + TypeAMTRELAY: func() RR { return new(AMTRELAY) }, TypeANY: func() RR { return new(ANY) }, TypeAPL: func() RR { return new(APL) }, TypeAVC: func() RR { return new(AVC) }, @@ -34,6 +35,7 @@ var TypeToRR = map[uint16]func() RR{ TypeHINFO: func() RR { return new(HINFO) }, TypeHIP: func() RR { return new(HIP) }, TypeHTTPS: func() RR { return new(HTTPS) }, + TypeIPSECKEY: func() RR { return new(IPSECKEY) }, TypeKEY: func() RR { return new(KEY) }, TypeKX: func() RR { return new(KX) }, TypeL32: func() RR { return new(L32) }, @@ -90,6 +92,7 @@ var TypeToString = map[uint16]string{ TypeA: "A", TypeAAAA: "AAAA", TypeAFSDB: "AFSDB", + TypeAMTRELAY: "AMTRELAY", TypeANY: "ANY", TypeAPL: "APL", TypeATMA: "ATMA", @@ -114,6 +117,7 @@ var TypeToString = map[uint16]string{ TypeHINFO: "HINFO", TypeHIP: "HIP", TypeHTTPS: "HTTPS", + TypeIPSECKEY: "IPSECKEY", TypeISDN: "ISDN", TypeIXFR: "IXFR", TypeKEY: "KEY", @@ -176,6 +180,7 @@ var TypeToString = map[uint16]string{ func (rr *A) Header() *RR_Header { return &rr.Hdr } func (rr *AAAA) Header() *RR_Header { return &rr.Hdr } func (rr *AFSDB) Header() *RR_Header { return &rr.Hdr } +func (rr *AMTRELAY) Header() *RR_Header { return &rr.Hdr } func (rr *ANY) Header() *RR_Header { return &rr.Hdr } func (rr *APL) Header() *RR_Header { return &rr.Hdr } func (rr *AVC) Header() *RR_Header { return &rr.Hdr } @@ -198,6 +203,7 @@ func (rr *GPOS) Header() *RR_Header { return &rr.Hdr } func (rr *HINFO) Header() *RR_Header { return &rr.Hdr } func (rr *HIP) Header() *RR_Header { return &rr.Hdr } func (rr *HTTPS) Header() *RR_Header { return &rr.Hdr } +func (rr *IPSECKEY) Header() *RR_Header { return &rr.Hdr } func (rr *KEY) Header() *RR_Header { return &rr.Hdr } func (rr *KX) Header() *RR_Header { return &rr.Hdr } func (rr *L32) Header() *RR_Header { return &rr.Hdr } @@ -270,6 +276,20 @@ func (rr *AFSDB) len(off int, compression map[string]struct{}) int { l += domainNameLen(rr.Hostname, off+l, compression, false) return l } +func (rr *AMTRELAY) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l++ // Precedence + l++ // GatewayType + switch rr.GatewayType { + case AMTRELAYIPv4: + l += net.IPv4len + case AMTRELAYIPv6: + l += net.IPv6len + case AMTRELAYHost: + l += len(rr.GatewayHost) + 1 + } + return l +} func (rr *ANY) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) return l @@ -379,6 +399,22 @@ func (rr *HIP) len(off int, compression map[string]struct{}) int { } return l } +func (rr *IPSECKEY) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l++ // Precedence + l++ // GatewayType + l++ // Algorithm + switch rr.GatewayType { + case IPSECGatewayIPv4: + l += net.IPv4len + case IPSECGatewayIPv6: + l += net.IPv6len + case IPSECGatewayHost: + l += len(rr.GatewayHost) + 1 + } + l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) + return l +} func (rr *KX) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Preference @@ -706,6 +742,9 @@ func (rr *AAAA) copy() RR { func (rr *AFSDB) copy() RR { return &AFSDB{rr.Hdr, rr.Subtype, rr.Hostname} } +func (rr *AMTRELAY) copy() RR { + return &AMTRELAY{rr.Hdr, rr.Precedence, rr.GatewayType, copyIP(rr.GatewayAddr), rr.GatewayHost} +} func (rr *ANY) copy() RR { return &ANY{rr.Hdr} } @@ -782,6 +821,9 @@ func (rr *HIP) copy() RR { func (rr *HTTPS) copy() RR { return &HTTPS{*rr.SVCB.copy().(*SVCB)} } +func (rr *IPSECKEY) copy() RR { + return &IPSECKEY{rr.Hdr, rr.Precedence, rr.GatewayType, rr.Algorithm, copyIP(rr.GatewayAddr), rr.GatewayHost, rr.PublicKey} +} func (rr *KEY) copy() RR { return &KEY{*rr.DNSKEY.copy().(*DNSKEY)} } diff --git a/vendor/github.com/minio/minio-go/v7/.gitignore b/vendor/github.com/minio/minio-go/v7/.gitignore index 8081bd0ff..8ae0384eb 100644 --- a/vendor/github.com/minio/minio-go/v7/.gitignore +++ b/vendor/github.com/minio/minio-go/v7/.gitignore @@ -1,4 +1,6 @@ *~ *.test validator -golangci-lint \ No newline at end of file +golangci-lint +functional_tests +.idea \ No newline at end of file diff --git a/vendor/github.com/minio/minio-go/v7/.golangci.yml b/vendor/github.com/minio/minio-go/v7/.golangci.yml index dfc0c2d53..875b949c6 100644 --- a/vendor/github.com/minio/minio-go/v7/.golangci.yml +++ b/vendor/github.com/minio/minio-go/v7/.golangci.yml @@ -12,8 +12,7 @@ linters: - govet - ineffassign - gosimple - - deadcode - - structcheck + - unused - gocritic issues: @@ -25,3 +24,4 @@ issues: - "captLocal:" - "ifElseChain:" - "elseif:" + - "should have a package comment" diff --git a/vendor/github.com/minio/minio-go/v7/Makefile b/vendor/github.com/minio/minio-go/v7/Makefile index ac4a328f0..40d57c130 100644 --- a/vendor/github.com/minio/minio-go/v7/Makefile +++ b/vendor/github.com/minio/minio-go/v7/Makefile @@ -9,13 +9,15 @@ checks: lint vet test examples functional-test lint: @mkdir -p ${GOPATH}/bin - @echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.45.2 + @echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin @echo "Running $@ check" @GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean @GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=5m --config ./.golangci.yml vet: @GO111MODULE=on go vet ./... + @echo "Installing staticcheck" && go install honnef.co/go/tools/cmd/staticcheck@latest + ${GOPATH}/bin/staticcheck -tests=false -checks="all,-ST1000,-ST1003,-ST1016,-ST1020,-ST1021,-ST1022,-ST1023,-ST1005" test: @GO111MODULE=on SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minio SECRET_KEY=minio123 ENABLE_HTTPS=1 MINT_MODE=full go test -race -v ./... @@ -27,7 +29,8 @@ examples: @cd ./examples/minio && $(foreach v,$(wildcard examples/minio/*.go),go build -mod=mod -o ${TMPDIR}/$(basename $(v)) $(notdir $(v)) || exit 1;) functional-test: - @GO111MODULE=on SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minio SECRET_KEY=minio123 ENABLE_HTTPS=1 MINT_MODE=full go run functional_tests.go + @GO111MODULE=on go build -race functional_tests.go + @SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minio SECRET_KEY=minio123 ENABLE_HTTPS=1 MINT_MODE=full ./functional_tests clean: @echo "Cleaning up all the generated files" diff --git a/vendor/github.com/minio/minio-go/v7/README.md b/vendor/github.com/minio/minio-go/v7/README.md index 4e3abf71d..9b6bbbec3 100644 --- a/vendor/github.com/minio/minio-go/v7/README.md +++ b/vendor/github.com/minio/minio-go/v7/README.md @@ -2,7 +2,7 @@ The MinIO Go Client SDK provides simple APIs to access any Amazon S3 compatible object storage. -This quickstart guide will show you how to install the MinIO client SDK, connect to MinIO, and provide a walkthrough for a simple file uploader. For a complete list of APIs and examples, please take a look at the [Go Client API Reference](https://docs.min.io/docs/golang-client-api-reference). +This quickstart guide will show you how to install the MinIO client SDK, connect to MinIO, and provide a walkthrough for a simple file uploader. For a complete list of APIs and examples, please take a look at the [Go Client API Reference](https://min.io/docs/minio/linux/developers/go/API.html). This document assumes that you have a working [Go development environment](https://golang.org/doc/install). @@ -126,53 +126,53 @@ mc ls play/mymusic/ ## API Reference The full API Reference is available here. -* [Complete API Reference](https://docs.min.io/docs/golang-client-api-reference) +* [Complete API Reference](https://min.io/docs/minio/linux/developers/go/API.html) ### API Reference : Bucket Operations -* [`MakeBucket`](https://docs.min.io/docs/golang-client-api-reference#MakeBucket) -* [`ListBuckets`](https://docs.min.io/docs/golang-client-api-reference#ListBuckets) -* [`BucketExists`](https://docs.min.io/docs/golang-client-api-reference#BucketExists) -* [`RemoveBucket`](https://docs.min.io/docs/golang-client-api-reference#RemoveBucket) -* [`ListObjects`](https://docs.min.io/docs/golang-client-api-reference#ListObjects) -* [`ListIncompleteUploads`](https://docs.min.io/docs/golang-client-api-reference#ListIncompleteUploads) +* [`MakeBucket`](https://min.io/docs/minio/linux/developers/go/API.html#MakeBucket) +* [`ListBuckets`](https://min.io/docs/minio/linux/developers/go/API.html#ListBuckets) +* [`BucketExists`](https://min.io/docs/minio/linux/developers/go/API.html#BucketExists) +* [`RemoveBucket`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveBucket) +* [`ListObjects`](https://min.io/docs/minio/linux/developers/go/API.html#ListObjects) +* [`ListIncompleteUploads`](https://min.io/docs/minio/linux/developers/go/API.html#ListIncompleteUploads) ### API Reference : Bucket policy Operations -* [`SetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#SetBucketPolicy) -* [`GetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#GetBucketPolicy) +* [`SetBucketPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#SetBucketPolicy) +* [`GetBucketPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#GetBucketPolicy) ### API Reference : Bucket notification Operations -* [`SetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#SetBucketNotification) -* [`GetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#GetBucketNotification) -* [`RemoveAllBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#RemoveAllBucketNotification) -* [`ListenBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#ListenBucketNotification) (MinIO Extension) -* [`ListenNotification`](https://docs.min.io/docs/golang-client-api-reference#ListenNotification) (MinIO Extension) +* [`SetBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#SetBucketNotification) +* [`GetBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#GetBucketNotification) +* [`RemoveAllBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveAllBucketNotification) +* [`ListenBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#ListenBucketNotification) (MinIO Extension) +* [`ListenNotification`](https://min.io/docs/minio/linux/developers/go/API.html#ListenNotification) (MinIO Extension) ### API Reference : File Object Operations -* [`FPutObject`](https://docs.min.io/docs/golang-client-api-reference#FPutObject) -* [`FGetObject`](https://docs.min.io/docs/golang-client-api-reference#FGetObject) +* [`FPutObject`](https://min.io/docs/minio/linux/developers/go/API.html#FPutObject) +* [`FGetObject`](https://min.io/docs/minio/linux/developers/go/API.html#FGetObject) ### API Reference : Object Operations -* [`GetObject`](https://docs.min.io/docs/golang-client-api-reference#GetObject) -* [`PutObject`](https://docs.min.io/docs/golang-client-api-reference#PutObject) -* [`PutObjectStreaming`](https://docs.min.io/docs/golang-client-api-reference#PutObjectStreaming) -* [`StatObject`](https://docs.min.io/docs/golang-client-api-reference#StatObject) -* [`CopyObject`](https://docs.min.io/docs/golang-client-api-reference#CopyObject) -* [`RemoveObject`](https://docs.min.io/docs/golang-client-api-reference#RemoveObject) -* [`RemoveObjects`](https://docs.min.io/docs/golang-client-api-reference#RemoveObjects) -* [`RemoveIncompleteUpload`](https://docs.min.io/docs/golang-client-api-reference#RemoveIncompleteUpload) -* [`SelectObjectContent`](https://docs.min.io/docs/golang-client-api-reference#SelectObjectContent) +* [`GetObject`](https://min.io/docs/minio/linux/developers/go/API.html#GetObject) +* [`PutObject`](https://min.io/docs/minio/linux/developers/go/API.html#PutObject) +* [`PutObjectStreaming`](https://min.io/docs/minio/linux/developers/go/API.html#PutObjectStreaming) +* [`StatObject`](https://min.io/docs/minio/linux/developers/go/API.html#StatObject) +* [`CopyObject`](https://min.io/docs/minio/linux/developers/go/API.html#CopyObject) +* [`RemoveObject`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveObject) +* [`RemoveObjects`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveObjects) +* [`RemoveIncompleteUpload`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveIncompleteUpload) +* [`SelectObjectContent`](https://min.io/docs/minio/linux/developers/go/API.html#SelectObjectContent) ### API Reference : Presigned Operations -* [`PresignedGetObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedGetObject) -* [`PresignedPutObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedPutObject) -* [`PresignedHeadObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedHeadObject) -* [`PresignedPostPolicy`](https://docs.min.io/docs/golang-client-api-reference#PresignedPostPolicy) +* [`PresignedGetObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedGetObject) +* [`PresignedPutObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedPutObject) +* [`PresignedHeadObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedHeadObject) +* [`PresignedPostPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedPostPolicy) ### API Reference : Client custom settings -* [`SetAppInfo`](https://docs.min.io/docs/golang-client-api-reference#SetAppInfo) -* [`TraceOn`](https://docs.min.io/docs/golang-client-api-reference#TraceOn) -* [`TraceOff`](https://docs.min.io/docs/golang-client-api-reference#TraceOff) +* [`SetAppInfo`](https://min.io/docs/minio/linux/developers/go/API.html#SetAppInfo) +* [`TraceOn`](https://min.io/docs/minio/linux/developers/go/API.html#TraceOn) +* [`TraceOff`](https://min.io/docs/minio/linux/developers/go/API.html#TraceOff) ## Full Examples @@ -236,8 +236,8 @@ The full API Reference is available here. * [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go) ## Explore Further -* [Complete Documentation](https://docs.min.io) -* [MinIO Go Client SDK API Reference](https://docs.min.io/docs/golang-client-api-reference) +* [Complete Documentation](https://min.io/docs/minio/kubernetes/upstream/index.html) +* [MinIO Go Client SDK API Reference](https://min.io/docs/minio/linux/developers/go/API.html) ## Contribute [Contributors Guide](https://github.com/minio/minio-go/blob/master/CONTRIBUTING.md) diff --git a/vendor/github.com/minio/minio-go/v7/README_zh_CN.md b/vendor/github.com/minio/minio-go/v7/README_zh_CN.md index 64e793411..97087c61e 100644 --- a/vendor/github.com/minio/minio-go/v7/README_zh_CN.md +++ b/vendor/github.com/minio/minio-go/v7/README_zh_CN.md @@ -14,7 +14,7 @@ MinIO Go Client SDK提供了简单的API来访问任何与Amazon S3兼容的对 - Ceph Object Gateway - Riak CS -本文我们将学习如何安装MinIO client SDK,连接到MinIO,并提供一下文件上传的示例。对于完整的API以及示例,请参考[Go Client API Reference](https://docs.min.io/docs/golang-client-api-reference)。 +本文我们将学习如何安装MinIO client SDK,连接到MinIO,并提供一下文件上传的示例。对于完整的API以及示例,请参考[Go Client API Reference](https://min.io/docs/minio/linux/developers/go/API.html)。 本文假设你已经有 [Go开发环境](https://golang.org/doc/install)。 @@ -140,52 +140,52 @@ mc ls play/mymusic/ ## API文档 完整的API文档在这里。 -* [完整API文档](https://docs.min.io/docs/golang-client-api-reference) +* [完整API文档](https://min.io/docs/minio/linux/developers/go/API.html) ### API文档 : 操作存储桶 -* [`MakeBucket`](https://docs.min.io/docs/golang-client-api-reference#MakeBucket) -* [`ListBuckets`](https://docs.min.io/docs/golang-client-api-reference#ListBuckets) -* [`BucketExists`](https://docs.min.io/docs/golang-client-api-reference#BucketExists) -* [`RemoveBucket`](https://docs.min.io/docs/golang-client-api-reference#RemoveBucket) -* [`ListObjects`](https://docs.min.io/docs/golang-client-api-reference#ListObjects) -* [`ListIncompleteUploads`](https://docs.min.io/docs/golang-client-api-reference#ListIncompleteUploads) +* [`MakeBucket`](https://min.io/docs/minio/linux/developers/go/API.html#MakeBucket) +* [`ListBuckets`](https://min.io/docs/minio/linux/developers/go/API.html#ListBuckets) +* [`BucketExists`](https://min.io/docs/minio/linux/developers/go/API.html#BucketExists) +* [`RemoveBucket`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveBucket) +* [`ListObjects`](https://min.io/docs/minio/linux/developers/go/API.html#ListObjects) +* [`ListIncompleteUploads`](https://min.io/docs/minio/linux/developers/go/API.html#ListIncompleteUploads) ### API文档 : 存储桶策略 -* [`SetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#SetBucketPolicy) -* [`GetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#GetBucketPolicy) +* [`SetBucketPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#SetBucketPolicy) +* [`GetBucketPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#GetBucketPolicy) ### API文档 : 存储桶通知 -* [`SetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#SetBucketNotification) -* [`GetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#GetBucketNotification) -* [`RemoveAllBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#RemoveAllBucketNotification) -* [`ListenBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#ListenBucketNotification) (MinIO 扩展) -* [`ListenNotification`](https://docs.min.io/docs/golang-client-api-reference#ListenNotification) (MinIO 扩展) +* [`SetBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#SetBucketNotification) +* [`GetBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#GetBucketNotification) +* [`RemoveAllBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveAllBucketNotification) +* [`ListenBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#ListenBucketNotification) (MinIO 扩展) +* [`ListenNotification`](https://min.io/docs/minio/linux/developers/go/API.html#ListenNotification) (MinIO 扩展) ### API文档 : 操作文件对象 -* [`FPutObject`](https://docs.min.io/docs/golang-client-api-reference#FPutObject) -* [`FGetObject`](https://docs.min.io/docs/golang-client-api-reference#FPutObject) +* [`FPutObject`](https://min.io/docs/minio/linux/developers/go/API.html#FPutObject) +* [`FGetObject`](https://min.io/docs/minio/linux/developers/go/API.html#FPutObject) ### API文档 : 操作对象 -* [`GetObject`](https://docs.min.io/docs/golang-client-api-reference#GetObject) -* [`PutObject`](https://docs.min.io/docs/golang-client-api-reference#PutObject) -* [`PutObjectStreaming`](https://docs.min.io/docs/golang-client-api-reference#PutObjectStreaming) -* [`StatObject`](https://docs.min.io/docs/golang-client-api-reference#StatObject) -* [`CopyObject`](https://docs.min.io/docs/golang-client-api-reference#CopyObject) -* [`RemoveObject`](https://docs.min.io/docs/golang-client-api-reference#RemoveObject) -* [`RemoveObjects`](https://docs.min.io/docs/golang-client-api-reference#RemoveObjects) -* [`RemoveIncompleteUpload`](https://docs.min.io/docs/golang-client-api-reference#RemoveIncompleteUpload) -* [`SelectObjectContent`](https://docs.min.io/docs/golang-client-api-reference#SelectObjectContent) +* [`GetObject`](https://min.io/docs/minio/linux/developers/go/API.html#GetObject) +* [`PutObject`](https://min.io/docs/minio/linux/developers/go/API.html#PutObject) +* [`PutObjectStreaming`](https://min.io/docs/minio/linux/developers/go/API.html#PutObjectStreaming) +* [`StatObject`](https://min.io/docs/minio/linux/developers/go/API.html#StatObject) +* [`CopyObject`](https://min.io/docs/minio/linux/developers/go/API.html#CopyObject) +* [`RemoveObject`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveObject) +* [`RemoveObjects`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveObjects) +* [`RemoveIncompleteUpload`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveIncompleteUpload) +* [`SelectObjectContent`](https://min.io/docs/minio/linux/developers/go/API.html#SelectObjectContent) ### API文档 : Presigned操作 -* [`PresignedGetObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedGetObject) -* [`PresignedPutObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedPutObject) -* [`PresignedHeadObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedHeadObject) -* [`PresignedPostPolicy`](https://docs.min.io/docs/golang-client-api-reference#PresignedPostPolicy) +* [`PresignedGetObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedGetObject) +* [`PresignedPutObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedPutObject) +* [`PresignedHeadObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedHeadObject) +* [`PresignedPostPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedPostPolicy) ### API文档 : 客户端自定义设置 -* [`SetAppInfo`](http://docs.min.io/docs/golang-client-api-reference#SetAppInfo) -* [`TraceOn`](http://docs.min.io/docs/golang-client-api-reference#TraceOn) -* [`TraceOff`](http://docs.min.io/docs/golang-client-api-reference#TraceOff) +* [`SetAppInfo`](https://min.io/docs/minio/linux/developers/go/API.html#SetAppInfo) +* [`TraceOn`](https://min.io/docs/minio/linux/developers/go/API.html#TraceOn) +* [`TraceOff`](https://min.io/docs/minio/linux/developers/go/API.html#TraceOff) ## 完整示例 @@ -253,8 +253,8 @@ mc ls play/mymusic/ * [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go) ## 了解更多 -* [完整文档](https://docs.min.io) -* [MinIO Go Client SDK API文档](https://docs.min.io/docs/golang-client-api-reference) +* [完整文档](https://min.io/docs/minio/kubernetes/upstream/index.html) +* [MinIO Go Client SDK API文档](https://min.io/docs/minio/linux/developers/go/API.html) ## 贡献 [贡献指南](https://github.com/minio/minio-go/blob/master/docs/zh_CN/CONTRIBUTING.md) diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go b/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go index 7e2199732..3f88d0777 100644 --- a/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go @@ -21,7 +21,7 @@ import ( "bytes" "context" "encoding/xml" - "io/ioutil" + "io" "net/http" "net/url" @@ -143,5 +143,5 @@ func (c *Client) getBucketLifecycle(ctx context.Context, bucketName string) ([]b } } - return ioutil.ReadAll(resp.Body) + return io.ReadAll(resp.Body) } diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go b/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go index e7edf9c97..dbb5259a8 100644 --- a/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go @@ -18,7 +18,7 @@ package minio import ( "context" - "io/ioutil" + "io" "net/http" "net/url" "strings" @@ -137,7 +137,7 @@ func (c *Client) getBucketPolicy(ctx context.Context, bucketName string) (string } } - bucketPolicyBuf, err := ioutil.ReadAll(resp.Body) + bucketPolicyBuf, err := io.ReadAll(resp.Body) if err != nil { return "", err } diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go b/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go index c80488eee..d5895dfe0 100644 --- a/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go @@ -22,7 +22,7 @@ import ( "context" "encoding/json" "encoding/xml" - "io/ioutil" + "io" "net/http" "net/url" "time" @@ -180,7 +180,7 @@ func (c *Client) GetBucketReplicationMetrics(ctx context.Context, bucketName str if resp.StatusCode != http.StatusOK { return s, httpRespToErrorResponse(resp, bucketName, "") } - respBytes, err := ioutil.ReadAll(resp.Body) + respBytes, err := io.ReadAll(resp.Body) if err != nil { return s, err } diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go b/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go index 1615f8f87..86d74298a 100644 --- a/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go @@ -22,7 +22,6 @@ import ( "encoding/xml" "errors" "io" - "io/ioutil" "net/http" "net/url" @@ -58,7 +57,7 @@ func (c *Client) GetBucketTagging(ctx context.Context, bucketName string) (*tags return nil, httpRespToErrorResponse(resp, bucketName, "") } - defer io.Copy(ioutil.Discard, resp.Body) + defer io.Copy(io.Discard, resp.Body) return tags.ParseBucketXML(resp.Body) } diff --git a/vendor/github.com/minio/minio-go/v7/api-compose-object.go b/vendor/github.com/minio/minio-go/v7/api-compose-object.go index b59924a3d..1ba68c79e 100644 --- a/vendor/github.com/minio/minio-go/v7/api-compose-object.go +++ b/vendor/github.com/minio/minio-go/v7/api-compose-object.go @@ -21,7 +21,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "net/http" "net/url" "strconv" @@ -221,7 +220,7 @@ func (c *Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBuc headers.Set(minIOBucketSourceETag, dstOpts.Internal.SourceETag) } if dstOpts.Internal.ReplicationRequest { - headers.Set(minIOBucketReplicationRequest, "") + headers.Set(minIOBucketReplicationRequest, "true") } if !dstOpts.Internal.LegalholdTimestamp.IsZero() { headers.Set(minIOBucketReplicationObjectLegalHoldTimestamp, dstOpts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano)) @@ -516,7 +515,7 @@ func (c *Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs .. return UploadInfo{}, err } if dst.Progress != nil { - io.CopyN(ioutil.Discard, dst.Progress, end-start+1) + io.CopyN(io.Discard, dst.Progress, end-start+1) } objParts = append(objParts, complPart) partIndex++ @@ -525,7 +524,7 @@ func (c *Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs .. // 4. Make final complete-multipart request. uploadInfo, err := c.completeMultipartUpload(ctx, dst.Bucket, dst.Object, uploadID, - completeMultipartUpload{Parts: objParts}, PutObjectOptions{}) + completeMultipartUpload{Parts: objParts}, PutObjectOptions{ServerSideEncryption: dst.Encryption}) if err != nil { return UploadInfo{}, err } diff --git a/vendor/github.com/minio/minio-go/v7/api-copy-object.go b/vendor/github.com/minio/minio-go/v7/api-copy-object.go index 1c0ad2be4..0c95d91ec 100644 --- a/vendor/github.com/minio/minio-go/v7/api-copy-object.go +++ b/vendor/github.com/minio/minio-go/v7/api-copy-object.go @@ -20,7 +20,6 @@ package minio import ( "context" "io" - "io/ioutil" "net/http" ) @@ -54,7 +53,7 @@ func (c *Client) CopyObject(ctx context.Context, dst CopyDestOptions, src CopySr // Update the progress properly after successful copy. if dst.Progress != nil { - io.Copy(ioutil.Discard, io.LimitReader(dst.Progress, dst.Size)) + io.Copy(io.Discard, io.LimitReader(dst.Progress, dst.Size)) } cpObjRes := copyObjectResult{} diff --git a/vendor/github.com/minio/minio-go/v7/api-datatypes.go b/vendor/github.com/minio/minio-go/v7/api-datatypes.go index 5a8d473c6..1f4793877 100644 --- a/vendor/github.com/minio/minio-go/v7/api-datatypes.go +++ b/vendor/github.com/minio/minio-go/v7/api-datatypes.go @@ -45,19 +45,20 @@ type StringMap map[string]string // on the first line is initialize it. func (m *StringMap) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { *m = StringMap{} - type xmlMapEntry struct { - XMLName xml.Name - Value string `xml:",chardata"` + type Item struct { + Key string + Value string } for { - var e xmlMapEntry + var e Item err := d.Decode(&e) if err == io.EOF { break - } else if err != nil { + } + if err != nil { return err } - (*m)[e.XMLName.Local] = e.Value + (*m)[e.Key] = e.Value } return nil } @@ -86,6 +87,8 @@ type UploadInfo struct { ExpirationRuleID string // Verified checksum values, if any. + // Values are base64 (standard) encoded. + // For multipart objects this is a checksum of the checksum of each part. ChecksumCRC32 string ChecksumCRC32C string ChecksumSHA1 string @@ -118,7 +121,7 @@ type ObjectInfo struct { Metadata http.Header `json:"metadata" xml:"-"` // x-amz-meta-* headers stripped "x-amz-meta-" prefix containing the first value. - UserMetadata StringMap `json:"userMetadata"` + UserMetadata StringMap `json:"userMetadata,omitempty"` // x-amz-tagging values in their k/v values. UserTags map[string]string `json:"userTags"` @@ -146,7 +149,8 @@ type ObjectInfo struct { // - FAILED // - REPLICA (on the destination) ReplicationStatus string `xml:"ReplicationStatus"` - + // set to true if delete marker has backing object version on target, and eligible to replicate + ReplicationReady bool // Lifecycle expiry-date and ruleID associated with the expiry // not to be confused with `Expires` HTTP header. Expiration time.Time diff --git a/vendor/github.com/minio/minio-go/v7/api-error-response.go b/vendor/github.com/minio/minio-go/v7/api-error-response.go index dd781cae3..4ec0c87c2 100644 --- a/vendor/github.com/minio/minio-go/v7/api-error-response.go +++ b/vendor/github.com/minio/minio-go/v7/api-error-response.go @@ -22,7 +22,6 @@ import ( "encoding/xml" "fmt" "io" - "io/ioutil" "net/http" ) @@ -67,14 +66,14 @@ type ErrorResponse struct { // // For example: // -// import s3 "github.com/minio/minio-go/v7" -// ... -// ... -// reader, stat, err := s3.GetObject(...) -// if err != nil { -// resp := s3.ToErrorResponse(err) -// } -// ... +// import s3 "github.com/minio/minio-go/v7" +// ... +// ... +// reader, stat, err := s3.GetObject(...) +// if err != nil { +// resp := s3.ToErrorResponse(err) +// } +// ... func ToErrorResponse(err error) ErrorResponse { switch err := err.(type) { case ErrorResponse: @@ -108,7 +107,7 @@ const ( func xmlDecodeAndBody(bodyReader io.Reader, v interface{}) ([]byte, error) { // read the whole body (up to 1MB) const maxBodyLength = 1 << 20 - body, err := ioutil.ReadAll(io.LimitReader(bodyReader, maxBodyLength)) + body, err := io.ReadAll(io.LimitReader(bodyReader, maxBodyLength)) if err != nil { return nil, err } @@ -253,26 +252,6 @@ func errUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string) } } -// errInvalidBucketName - Invalid bucket name response. -func errInvalidBucketName(message string) error { - return ErrorResponse{ - StatusCode: http.StatusBadRequest, - Code: "InvalidBucketName", - Message: message, - RequestID: "minio", - } -} - -// errInvalidObjectName - Invalid object name response. -func errInvalidObjectName(message string) error { - return ErrorResponse{ - StatusCode: http.StatusNotFound, - Code: "NoSuchKey", - Message: message, - RequestID: "minio", - } -} - // errInvalidArgument - Invalid argument response. func errInvalidArgument(message string) error { return ErrorResponse{ diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object.go b/vendor/github.com/minio/minio-go/v7/api-get-object.go index b17f3146b..e31e4cf92 100644 --- a/vendor/github.com/minio/minio-go/v7/api-get-object.go +++ b/vendor/github.com/minio/minio-go/v7/api-get-object.go @@ -23,8 +23,6 @@ import ( "fmt" "io" "net/http" - "net/url" - "strconv" "sync" "github.com/minio/minio-go/v7/pkg/s3utils" @@ -654,19 +652,11 @@ func (c *Client) getObject(ctx context.Context, bucketName, objectName string, o return nil, ObjectInfo{}, nil, err } - urlValues := make(url.Values) - if opts.VersionID != "" { - urlValues.Set("versionId", opts.VersionID) - } - if opts.PartNumber > 0 { - urlValues.Set("partNumber", strconv.Itoa(opts.PartNumber)) - } - // Execute GET on objectName. resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ bucketName: bucketName, objectName: objectName, - queryValues: urlValues, + queryValues: opts.toQueryValues(), customHeader: opts.Header(), contentSHA256Hex: emptySHA256Hex, }) diff --git a/vendor/github.com/minio/minio-go/v7/api-get-options.go b/vendor/github.com/minio/minio-go/v7/api-get-options.go index 08ae95c42..bb86a5994 100644 --- a/vendor/github.com/minio/minio-go/v7/api-get-options.go +++ b/vendor/github.com/minio/minio-go/v7/api-get-options.go @@ -20,6 +20,8 @@ package minio import ( "fmt" "net/http" + "net/url" + "strconv" "time" "github.com/minio/minio-go/v7/pkg/encrypt" @@ -27,14 +29,16 @@ import ( // AdvancedGetOptions for internal use by MinIO server - not intended for client use. type AdvancedGetOptions struct { - ReplicationDeleteMarker bool - ReplicationProxyRequest string + ReplicationDeleteMarker bool + IsReplicationReadyForDeleteMarker bool + ReplicationProxyRequest string } // GetObjectOptions are used to specify additional headers or options // during GET requests. type GetObjectOptions struct { headers map[string]string + reqParams url.Values ServerSideEncryption encrypt.ServerSide VersionID string PartNumber int @@ -82,6 +86,34 @@ func (o *GetObjectOptions) Set(key, value string) { o.headers[http.CanonicalHeaderKey(key)] = value } +// SetReqParam - set request query string parameter +// supported key: see supportedQueryValues. +// If an unsupported key is passed in, it will be ignored and nothing will be done. +func (o *GetObjectOptions) SetReqParam(key, value string) { + if !isStandardQueryValue(key) { + // do nothing + return + } + if o.reqParams == nil { + o.reqParams = make(url.Values) + } + o.reqParams.Set(key, value) +} + +// AddReqParam - add request query string parameter +// supported key: see supportedQueryValues. +// If an unsupported key is passed in, it will be ignored and nothing will be done. +func (o *GetObjectOptions) AddReqParam(key, value string) { + if !isStandardQueryValue(key) { + // do nothing + return + } + if o.reqParams == nil { + o.reqParams = make(url.Values) + } + o.reqParams.Add(key, value) +} + // SetMatchETag - set match etag. func (o *GetObjectOptions) SetMatchETag(etag string) error { if etag == "" { @@ -148,3 +180,24 @@ func (o *GetObjectOptions) SetRange(start, end int64) error { } return nil } + +// toQueryValues - Convert the versionId, partNumber, and reqParams in Options to query string parameters. +func (o *GetObjectOptions) toQueryValues() url.Values { + urlValues := make(url.Values) + if o.VersionID != "" { + urlValues.Set("versionId", o.VersionID) + } + if o.PartNumber > 0 { + urlValues.Set("partNumber", strconv.Itoa(o.PartNumber)) + } + + if o.reqParams != nil { + for key, values := range o.reqParams { + for _, value := range values { + urlValues.Add(key, value) + } + } + } + + return urlValues +} diff --git a/vendor/github.com/minio/minio-go/v7/api-list.go b/vendor/github.com/minio/minio-go/v7/api-list.go index b624b0778..bfb2c1151 100644 --- a/vendor/github.com/minio/minio-go/v7/api-list.go +++ b/vendor/github.com/minio/minio-go/v7/api-list.go @@ -32,11 +32,10 @@ import ( // This call requires explicit authentication, no anonymous requests are // allowed for listing buckets. // -// api := client.New(....) -// for message := range api.ListBuckets(context.Background()) { -// fmt.Println(message) -// } -// +// api := client.New(....) +// for message := range api.ListBuckets(context.Background()) { +// fmt.Println(message) +// } func (c *Client) ListBuckets(ctx context.Context) ([]BucketInfo, error) { // Execute GET on service. resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{contentSHA256Hex: emptySHA256Hex}) @@ -71,21 +70,28 @@ func (c *Client) listObjectsV2(ctx context.Context, bucketName string, opts List // Return object owner information by default fetchOwner := true + sendObjectInfo := func(info ObjectInfo) { + select { + case objectStatCh <- info: + case <-ctx.Done(): + } + } + // Validate bucket name. if err := s3utils.CheckValidBucketName(bucketName); err != nil { defer close(objectStatCh) - objectStatCh <- ObjectInfo{ + sendObjectInfo(ObjectInfo{ Err: err, - } + }) return objectStatCh } // Validate incoming object prefix. if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil { defer close(objectStatCh) - objectStatCh <- ObjectInfo{ + sendObjectInfo(ObjectInfo{ Err: err, - } + }) return objectStatCh } @@ -99,9 +105,9 @@ func (c *Client) listObjectsV2(ctx context.Context, bucketName string, opts List result, err := c.listObjectsV2Query(ctx, bucketName, opts.Prefix, continuationToken, fetchOwner, opts.WithMetadata, delimiter, opts.StartAfter, opts.MaxKeys, opts.headers) if err != nil { - objectStatCh <- ObjectInfo{ + sendObjectInfo(ObjectInfo{ Err: err, - } + }) return } @@ -138,6 +144,14 @@ func (c *Client) listObjectsV2(ctx context.Context, bucketName string, opts List if !result.IsTruncated { return } + + // Add this to catch broken S3 API implementations. + if continuationToken == "" { + sendObjectInfo(ObjectInfo{ + Err: fmt.Errorf("listObjectsV2 is truncated without continuationToken, %s S3 server is incompatible with S3 API", c.endpointURL), + }) + return + } } }(objectStatCh) return objectStatCh @@ -263,20 +277,28 @@ func (c *Client) listObjects(ctx context.Context, bucketName string, opts ListOb // If recursive we do not delimit. delimiter = "" } + + sendObjectInfo := func(info ObjectInfo) { + select { + case objectStatCh <- info: + case <-ctx.Done(): + } + } + // Validate bucket name. if err := s3utils.CheckValidBucketName(bucketName); err != nil { defer close(objectStatCh) - objectStatCh <- ObjectInfo{ + sendObjectInfo(ObjectInfo{ Err: err, - } + }) return objectStatCh } // Validate incoming object prefix. if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil { defer close(objectStatCh) - objectStatCh <- ObjectInfo{ + sendObjectInfo(ObjectInfo{ Err: err, - } + }) return objectStatCh } @@ -289,9 +311,9 @@ func (c *Client) listObjects(ctx context.Context, bucketName string, opts ListOb // Get list of objects a maximum of 1000 per request. result, err := c.listObjectsQuery(ctx, bucketName, opts.Prefix, marker, delimiter, opts.MaxKeys, opts.headers) if err != nil { - objectStatCh <- ObjectInfo{ + sendObjectInfo(ObjectInfo{ Err: err, - } + }) return } @@ -344,21 +366,28 @@ func (c *Client) listObjectVersions(ctx context.Context, bucketName string, opts delimiter = "" } + sendObjectInfo := func(info ObjectInfo) { + select { + case resultCh <- info: + case <-ctx.Done(): + } + } + // Validate bucket name. if err := s3utils.CheckValidBucketName(bucketName); err != nil { defer close(resultCh) - resultCh <- ObjectInfo{ + sendObjectInfo(ObjectInfo{ Err: err, - } + }) return resultCh } // Validate incoming object prefix. if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil { defer close(resultCh) - resultCh <- ObjectInfo{ + sendObjectInfo(ObjectInfo{ Err: err, - } + }) return resultCh } @@ -375,9 +404,9 @@ func (c *Client) listObjectVersions(ctx context.Context, bucketName string, opts // Get list of objects a maximum of 1000 per request. result, err := c.listObjectVersionsQuery(ctx, bucketName, opts.Prefix, keyMarker, versionIDMarker, delimiter, opts.MaxKeys, opts.headers) if err != nil { - resultCh <- ObjectInfo{ + sendObjectInfo(ObjectInfo{ Err: err, - } + }) return } @@ -659,11 +688,10 @@ func (o *ListObjectsOptions) Set(key, value string) { // ListObjects returns objects list after evaluating the passed options. // -// api := client.New(....) -// for object := range api.ListObjects(ctx, "mytestbucket", minio.ListObjectsOptions{Prefix: "starthere", Recursive:true}) { -// fmt.Println(object) -// } -// +// api := client.New(....) +// for object := range api.ListObjects(ctx, "mytestbucket", minio.ListObjectsOptions{Prefix: "starthere", Recursive:true}) { +// fmt.Println(object) +// } func (c *Client) ListObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo { if opts.WithVersions { return c.listObjectVersions(ctx, bucketName, opts) @@ -694,12 +722,12 @@ func (c *Client) ListObjects(ctx context.Context, bucketName string, opts ListOb // If you enable recursive as 'true' this function will return back all // the multipart objects in a given bucket name. // -// api := client.New(....) -// // Recurively list all objects in 'mytestbucket' -// recursive := true -// for message := range api.ListIncompleteUploads(context.Background(), "mytestbucket", "starthere", recursive) { -// fmt.Println(message) -// } +// api := client.New(....) +// // Recurively list all objects in 'mytestbucket' +// recursive := true +// for message := range api.ListIncompleteUploads(context.Background(), "mytestbucket", "starthere", recursive) { +// fmt.Println(message) +// } func (c *Client) ListIncompleteUploads(ctx context.Context, bucketName, objectPrefix string, recursive bool) <-chan ObjectMultipartInfo { return c.listIncompleteUploads(ctx, bucketName, objectPrefix, recursive) } @@ -869,6 +897,8 @@ func (c *Client) listMultipartUploadsQuery(ctx context.Context, bucketName, keyM } // listObjectParts list all object parts recursively. +// +//lint:ignore U1000 Keep this around func (c *Client) listObjectParts(ctx context.Context, bucketName, objectName, uploadID string) (partsInfo map[int]ObjectPart, err error) { // Part number marker for the next batch of request. var nextPartNumberMarker int @@ -916,7 +946,7 @@ func (c *Client) findUploadIDs(ctx context.Context, bucketName, objectName strin } // listObjectPartsQuery (List Parts query) -// - lists some or all (up to 1000) parts that have been uploaded +// - lists some or all (up to 1000) parts that have been uploaded // for a specific multipart upload // // You can use the request parameters as selection criteria to return diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-common.go b/vendor/github.com/minio/minio-go/v7/api-put-object-common.go index 149a536e4..7fad7600c 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-common.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-common.go @@ -65,10 +65,9 @@ func isReadAt(reader io.Reader) (ok bool) { // NOTE: Assumption here is that for any object to be uploaded to any S3 compatible // object storage it will have the following parameters as constants. // -// maxPartsCount - 10000 -// minPartSize - 16MiB -// maxMultipartPutObjectSize - 5TiB -// +// maxPartsCount - 10000 +// minPartSize - 16MiB +// maxMultipartPutObjectSize - 5TiB func OptimalPartInfo(objectSize int64, configuredPartSize uint64) (totalPartsCount int, partSize int64, lastPartSize int64, err error) { // object size is '-1' set it to 5TiB. var unknownSize bool diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go index 5fec17a1c..18bdeb24c 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go @@ -26,7 +26,6 @@ import ( "fmt" "hash/crc32" "io" - "io/ioutil" "net/http" "net/url" "sort" @@ -159,8 +158,9 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj crcBytes = append(crcBytes, cSum...) } + p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: rd, partNumber: partNumber, md5Base64: md5Base64, sha256Hex: sha256Hex, size: int64(length), sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader} // Proceed to upload the part. - objPart, uerr := c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber, md5Base64, sha256Hex, int64(length), opts.ServerSideEncryption, !opts.DisableContentSha256, customHeader) + objPart, uerr := c.uploadPart(ctx, p) if uerr != nil { return UploadInfo{}, uerr } @@ -200,7 +200,9 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj // Sort all completed parts. sort.Sort(completedParts(complMultipartUpload.Parts)) - opts = PutObjectOptions{} + opts = PutObjectOptions{ + ServerSideEncryption: opts.ServerSideEncryption, + } if len(crcBytes) > 0 { // Add hash of hashes. crc.Reset() @@ -269,57 +271,73 @@ func (c *Client) initiateMultipartUpload(ctx context.Context, bucketName, object return initiateMultipartUploadResult, nil } +type uploadPartParams struct { + bucketName string + objectName string + uploadID string + reader io.Reader + partNumber int + md5Base64 string + sha256Hex string + size int64 + sse encrypt.ServerSide + streamSha256 bool + customHeader http.Header + trailer http.Header +} + // uploadPart - Uploads a part in a multipart upload. -func (c *Client) uploadPart(ctx context.Context, bucketName string, objectName string, uploadID string, reader io.Reader, partNumber int, md5Base64 string, sha256Hex string, size int64, sse encrypt.ServerSide, streamSha256 bool, customHeader http.Header) (ObjectPart, error) { +func (c *Client) uploadPart(ctx context.Context, p uploadPartParams) (ObjectPart, error) { // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(p.bucketName); err != nil { return ObjectPart{}, err } - if err := s3utils.CheckValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(p.objectName); err != nil { return ObjectPart{}, err } - if size > maxPartSize { - return ObjectPart{}, errEntityTooLarge(size, maxPartSize, bucketName, objectName) + if p.size > maxPartSize { + return ObjectPart{}, errEntityTooLarge(p.size, maxPartSize, p.bucketName, p.objectName) } - if size <= -1 { - return ObjectPart{}, errEntityTooSmall(size, bucketName, objectName) + if p.size <= -1 { + return ObjectPart{}, errEntityTooSmall(p.size, p.bucketName, p.objectName) } - if partNumber <= 0 { + if p.partNumber <= 0 { return ObjectPart{}, errInvalidArgument("Part number cannot be negative or equal to zero.") } - if uploadID == "" { + if p.uploadID == "" { return ObjectPart{}, errInvalidArgument("UploadID cannot be empty.") } // Get resources properly escaped and lined up before using them in http request. urlValues := make(url.Values) // Set part number. - urlValues.Set("partNumber", strconv.Itoa(partNumber)) + urlValues.Set("partNumber", strconv.Itoa(p.partNumber)) // Set upload id. - urlValues.Set("uploadId", uploadID) + urlValues.Set("uploadId", p.uploadID) // Set encryption headers, if any. - if customHeader == nil { - customHeader = make(http.Header) + if p.customHeader == nil { + p.customHeader = make(http.Header) } // https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPart.html // Server-side encryption is supported by the S3 Multipart Upload actions. // Unless you are using a customer-provided encryption key, you don't need // to specify the encryption parameters in each UploadPart request. - if sse != nil && sse.Type() == encrypt.SSEC { - sse.Marshal(customHeader) + if p.sse != nil && p.sse.Type() == encrypt.SSEC { + p.sse.Marshal(p.customHeader) } reqMetadata := requestMetadata{ - bucketName: bucketName, - objectName: objectName, + bucketName: p.bucketName, + objectName: p.objectName, queryValues: urlValues, - customHeader: customHeader, - contentBody: reader, - contentLength: size, - contentMD5Base64: md5Base64, - contentSHA256Hex: sha256Hex, - streamSha256: streamSha256, + customHeader: p.customHeader, + contentBody: p.reader, + contentLength: p.size, + contentMD5Base64: p.md5Base64, + contentSHA256Hex: p.sha256Hex, + streamSha256: p.streamSha256, + trailer: p.trailer, } // Execute PUT on each part. @@ -330,7 +348,7 @@ func (c *Client) uploadPart(ctx context.Context, bucketName string, objectName s } if resp != nil { if resp.StatusCode != http.StatusOK { - return ObjectPart{}, httpRespToErrorResponse(resp, bucketName, objectName) + return ObjectPart{}, httpRespToErrorResponse(resp, p.bucketName, p.objectName) } } // Once successfully uploaded, return completed part. @@ -341,8 +359,8 @@ func (c *Client) uploadPart(ctx context.Context, bucketName string, objectName s ChecksumSHA1: h.Get("x-amz-checksum-sha1"), ChecksumSHA256: h.Get("x-amz-checksum-sha256"), } - objPart.Size = size - objPart.PartNumber = partNumber + objPart.Size = p.size + objPart.PartNumber = p.partNumber // Trim off the odd double quotes from ETag in the beginning and end. objPart.ETag = trimEtag(h.Get("ETag")) return objPart, nil @@ -395,7 +413,7 @@ func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, object // Read resp.Body into a []bytes to parse for Error response inside the body var b []byte - b, err = ioutil.ReadAll(resp.Body) + b, err = io.ReadAll(resp.Body) if err != nil { return UploadInfo{}, err } @@ -431,5 +449,10 @@ func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, object Location: completeMultipartUploadResult.Location, Expiration: expTime, ExpirationRuleID: ruleID, + + ChecksumSHA256: completeMultipartUploadResult.ChecksumSHA256, + ChecksumSHA1: completeMultipartUploadResult.ChecksumSHA1, + ChecksumCRC32: completeMultipartUploadResult.ChecksumCRC32, + ChecksumCRC32C: completeMultipartUploadResult.ChecksumCRC32C, }, nil } diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go index 464bde7f3..55b3f38e6 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go @@ -28,6 +28,7 @@ import ( "net/url" "sort" "strings" + "sync" "github.com/google/uuid" "github.com/minio/minio-go/v7/pkg/s3utils" @@ -44,7 +45,9 @@ import ( func (c *Client) putObjectMultipartStream(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions, ) (info UploadInfo, err error) { - if !isObject(reader) && isReadAt(reader) && !opts.SendContentMd5 { + if opts.ConcurrentStreamParts && opts.NumThreads > 1 { + info, err = c.putObjectMultipartStreamParallel(ctx, bucketName, objectName, reader, opts) + } else if !isObject(reader) && isReadAt(reader) && !opts.SendContentMd5 { // Verify if the reader implements ReadAt and it is not a *minio.Object then we will use parallel uploader. info, err = c.putObjectMultipartStreamFromReadAt(ctx, bucketName, objectName, reader.(io.ReaderAt), size, opts) } else { @@ -107,11 +110,19 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN return UploadInfo{}, err } + withChecksum := c.trailingHeaderSupport + if withChecksum { + if opts.UserMetadata == nil { + opts.UserMetadata = make(map[string]string, 1) + } + opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C" + } // Initiate a new multipart upload. uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) if err != nil { return UploadInfo{}, err } + delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm") // Aborts the multipart upload in progress, if the // function returns any error, since we do not resume @@ -177,14 +188,33 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN // As a special case if partNumber is lastPartNumber, we // calculate the offset based on the last part size. if uploadReq.PartNum == lastPartNumber { - readOffset = (size - lastPartSize) + readOffset = size - lastPartSize partSize = lastPartSize } sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), opts.Progress) + var trailer = make(http.Header, 1) + if withChecksum { + crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) + trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(crc.Sum(nil))) + sectionReader = newHashReaderWrapper(sectionReader, crc, func(hash []byte) { + trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(hash)) + }) + } // Proceed to upload the part. - objPart, err := c.uploadPart(ctx, bucketName, objectName, uploadID, sectionReader, uploadReq.PartNum, "", "", partSize, opts.ServerSideEncryption, !opts.DisableContentSha256, nil) + p := uploadPartParams{bucketName: bucketName, + objectName: objectName, + uploadID: uploadID, + reader: sectionReader, + partNumber: uploadReq.PartNum, + size: partSize, + sse: opts.ServerSideEncryption, + streamSha256: !opts.DisableContentSha256, + sha256Hex: "", + trailer: trailer, + } + objPart, err := c.uploadPart(ctx, p) if err != nil { uploadedPartsCh <- uploadedPartRes{ Error: err, @@ -221,8 +251,12 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN // Update the totalUploadedSize. totalUploadedSize += uploadRes.Size complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ - ETag: uploadRes.Part.ETag, - PartNumber: uploadRes.Part.PartNumber, + ETag: uploadRes.Part.ETag, + PartNumber: uploadRes.Part.PartNumber, + ChecksumCRC32: uploadRes.Part.ChecksumCRC32, + ChecksumCRC32C: uploadRes.Part.ChecksumCRC32C, + ChecksumSHA1: uploadRes.Part.ChecksumSHA1, + ChecksumSHA256: uploadRes.Part.ChecksumSHA256, }) } } @@ -235,7 +269,22 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN // Sort all completed parts. sort.Sort(completedParts(complMultipartUpload.Parts)) - uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, PutObjectOptions{}) + opts = PutObjectOptions{ + ServerSideEncryption: opts.ServerSideEncryption, + } + if withChecksum { + // Add hash of hashes. + crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) + for _, part := range complMultipartUpload.Parts { + cs, err := base64.StdEncoding.DecodeString(part.ChecksumCRC32C) + if err == nil { + crc.Write(cs) + } + } + opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))} + } + + uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts) if err != nil { return UploadInfo{}, err } @@ -339,8 +388,8 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b // Update progress reader appropriately to the latest offset // as we read from the source. hooked := newHook(bytes.NewReader(buf[:length]), opts.Progress) - - objPart, uerr := c.uploadPart(ctx, bucketName, objectName, uploadID, hooked, partNumber, md5Base64, "", partSize, opts.ServerSideEncryption, !opts.DisableContentSha256, customHeader) + p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: hooked, partNumber: partNumber, md5Base64: md5Base64, size: partSize, sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader} + objPart, uerr := c.uploadPart(ctx, p) if uerr != nil { return UploadInfo{}, uerr } @@ -382,6 +431,211 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b // Sort all completed parts. sort.Sort(completedParts(complMultipartUpload.Parts)) + opts = PutObjectOptions{ + ServerSideEncryption: opts.ServerSideEncryption, + } + if len(crcBytes) > 0 { + // Add hash of hashes. + crc.Reset() + crc.Write(crcBytes) + opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))} + } + uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts) + if err != nil { + return UploadInfo{}, err + } + + uploadInfo.Size = totalUploadedSize + return uploadInfo, nil +} + +// putObjectMultipartStreamParallel uploads opts.NumThreads parts in parallel. +// This is expected to take opts.PartSize * opts.NumThreads * (GOGC / 100) bytes of buffer. +func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketName, objectName string, + reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) { + // Input validation. + if err = s3utils.CheckValidBucketName(bucketName); err != nil { + return UploadInfo{}, err + } + + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return UploadInfo{}, err + } + + if !opts.SendContentMd5 { + if opts.UserMetadata == nil { + opts.UserMetadata = make(map[string]string, 1) + } + opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C" + } + + // Cancel all when an error occurs. + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // Calculate the optimal parts info for a given size. + totalPartsCount, partSize, _, err := OptimalPartInfo(-1, opts.PartSize) + if err != nil { + return UploadInfo{}, err + } + + // Initiates a new multipart request + uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) + if err != nil { + return UploadInfo{}, err + } + delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm") + + // Aborts the multipart upload if the function returns + // any error, since we do not resume we should purge + // the parts which have been uploaded to relinquish + // storage space. + defer func() { + if err != nil { + c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) + } + }() + + // Create checksums + // CRC32C is ~50% faster on AMD64 @ 30GB/s + var crcBytes []byte + crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) + + // Total data read and written to server. should be equal to 'size' at the end of the call. + var totalUploadedSize int64 + + // Initialize parts uploaded map. + partsInfo := make(map[int]ObjectPart) + + // Create a buffer. + nBuffers := int64(opts.NumThreads) + bufs := make(chan []byte, nBuffers) + all := make([]byte, nBuffers*partSize) + for i := int64(0); i < nBuffers; i++ { + bufs <- all[i*partSize : i*partSize+partSize] + } + + var wg sync.WaitGroup + var mu sync.Mutex + errCh := make(chan error, opts.NumThreads) + + reader = newHook(reader, opts.Progress) + + // Part number always starts with '1'. + var partNumber int + for partNumber = 1; partNumber <= totalPartsCount; partNumber++ { + // Proceed to upload the part. + var buf []byte + select { + case buf = <-bufs: + case err = <-errCh: + cancel() + wg.Wait() + return UploadInfo{}, err + } + + if int64(len(buf)) != partSize { + return UploadInfo{}, fmt.Errorf("read buffer < %d than expected partSize: %d", len(buf), partSize) + } + + length, rerr := readFull(reader, buf) + if rerr == io.EOF && partNumber > 1 { + // Done + break + } + + if rerr != nil && rerr != io.ErrUnexpectedEOF && err != io.EOF { + cancel() + wg.Wait() + return UploadInfo{}, rerr + } + + // Calculate md5sum. + customHeader := make(http.Header) + if !opts.SendContentMd5 { + // Add CRC32C instead. + crc.Reset() + crc.Write(buf[:length]) + cSum := crc.Sum(nil) + customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum)) + crcBytes = append(crcBytes, cSum...) + } + + wg.Add(1) + go func(partNumber int) { + // Avoid declaring variables in the for loop + var md5Base64 string + + if opts.SendContentMd5 { + md5Hash := c.md5Hasher() + md5Hash.Write(buf[:length]) + md5Base64 = base64.StdEncoding.EncodeToString(md5Hash.Sum(nil)) + md5Hash.Close() + } + + defer wg.Done() + p := uploadPartParams{ + bucketName: bucketName, + objectName: objectName, + uploadID: uploadID, + reader: bytes.NewReader(buf[:length]), + partNumber: partNumber, + md5Base64: md5Base64, + size: int64(length), + sse: opts.ServerSideEncryption, + streamSha256: !opts.DisableContentSha256, + customHeader: customHeader, + } + objPart, uerr := c.uploadPart(ctx, p) + if uerr != nil { + errCh <- uerr + return + } + + // Save successfully uploaded part metadata. + mu.Lock() + partsInfo[partNumber] = objPart + mu.Unlock() + + // Send buffer back so it can be reused. + bufs <- buf + }(partNumber) + + // Save successfully uploaded size. + totalUploadedSize += int64(length) + } + wg.Wait() + + // Collect any error + select { + case err = <-errCh: + return UploadInfo{}, err + default: + } + + // Complete multipart upload. + var complMultipartUpload completeMultipartUpload + + // Loop over total uploaded parts to save them in + // Parts array before completing the multipart request. + for i := 1; i < partNumber; i++ { + part, ok := partsInfo[i] + if !ok { + return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i)) + } + complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ + ETag: part.ETag, + PartNumber: part.PartNumber, + ChecksumCRC32: part.ChecksumCRC32, + ChecksumCRC32C: part.ChecksumCRC32C, + ChecksumSHA1: part.ChecksumSHA1, + ChecksumSHA256: part.ChecksumSHA256, + }) + } + + // Sort all completed parts. + sort.Sort(completedParts(complMultipartUpload.Parts)) + opts = PutObjectOptions{} if len(crcBytes) > 0 { // Add hash of hashes. @@ -419,6 +673,7 @@ func (c *Client) putObject(ctx context.Context, bucketName, objectName string, r return UploadInfo{}, errInvalidArgument("MD5Sum cannot be calculated with size '-1'") } + var readSeeker io.Seeker if size > 0 { if isReadAt(reader) && !isObject(reader) { seeker, ok := reader.(io.Seeker) @@ -428,35 +683,49 @@ func (c *Client) putObject(ctx context.Context, bucketName, objectName string, r return UploadInfo{}, errInvalidArgument(err.Error()) } reader = io.NewSectionReader(reader.(io.ReaderAt), offset, size) + readSeeker = reader.(io.Seeker) } } } var md5Base64 string if opts.SendContentMd5 { - // Create a buffer. - buf := make([]byte, size) + // Calculate md5sum. + hash := c.md5Hasher() + + if readSeeker != nil { + if _, err := io.Copy(hash, reader); err != nil { + return UploadInfo{}, err + } + // Seek back to beginning of io.NewSectionReader's offset. + _, err = readSeeker.Seek(0, io.SeekStart) + if err != nil { + return UploadInfo{}, errInvalidArgument(err.Error()) + } + } else { + // Create a buffer. + buf := make([]byte, size) + + length, err := readFull(reader, buf) + if err != nil && err != io.ErrUnexpectedEOF && err != io.EOF { + return UploadInfo{}, err + } - length, rErr := readFull(reader, buf) - if rErr != nil && rErr != io.ErrUnexpectedEOF && rErr != io.EOF { - return UploadInfo{}, rErr + hash.Write(buf[:length]) + reader = bytes.NewReader(buf[:length]) } - // Calculate md5sum. - hash := c.md5Hasher() - hash.Write(buf[:length]) md5Base64 = base64.StdEncoding.EncodeToString(hash.Sum(nil)) - reader = bytes.NewReader(buf[:length]) hash.Close() } // Update progress reader appropriately to the latest offset as we // read from the source. - readSeeker := newHook(reader, opts.Progress) + progressReader := newHook(reader, opts.Progress) // This function does not calculate sha256 and md5sum for payload. // Execute put object. - return c.putObjectDo(ctx, bucketName, objectName, readSeeker, md5Base64, "", size, opts) + return c.putObjectDo(ctx, bucketName, objectName, progressReader, md5Base64, "", size, opts) } // putObjectDo - executes the put object http operation. diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object.go b/vendor/github.com/minio/minio-go/v7/api-put-object.go index 321ad00aa..b29df17d4 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object.go @@ -87,7 +87,34 @@ type PutObjectOptions struct { SendContentMd5 bool DisableContentSha256 bool DisableMultipart bool - Internal AdvancedPutOptions + + // ConcurrentStreamParts will create NumThreads buffers of PartSize bytes, + // fill them serially and upload them in parallel. + // This can be used for faster uploads on non-seekable or slow-to-seek input. + ConcurrentStreamParts bool + Internal AdvancedPutOptions + + customHeaders http.Header +} + +// SetMatchETag if etag matches while PUT MinIO returns an error +// this is a MinIO specific extension to support optimistic locking +// semantics. +func (opts *PutObjectOptions) SetMatchETag(etag string) { + if opts.customHeaders == nil { + opts.customHeaders = http.Header{} + } + opts.customHeaders.Set("If-Match", "\""+etag+"\"") +} + +// SetMatchETagExcept if etag does not match while PUT MinIO returns an +// error this is a MinIO specific extension to support optimistic locking +// semantics. +func (opts *PutObjectOptions) SetMatchETagExcept(etag string) { + if opts.customHeaders == nil { + opts.customHeaders = http.Header{} + } + opts.customHeaders.Set("If-None-Match", "\""+etag+"\"") } // getNumThreads - gets the number of threads to be used in the multipart @@ -159,7 +186,7 @@ func (opts PutObjectOptions) Header() (header http.Header) { header.Set(minIOBucketSourceETag, opts.Internal.SourceETag) } if opts.Internal.ReplicationRequest { - header.Set(minIOBucketReplicationRequest, "") + header.Set(minIOBucketReplicationRequest, "true") } if !opts.Internal.LegalholdTimestamp.IsZero() { header.Set(minIOBucketReplicationObjectLegalHoldTimestamp, opts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano)) @@ -182,6 +209,12 @@ func (opts PutObjectOptions) Header() (header http.Header) { header.Set("x-amz-meta-"+k, v) } } + + // set any other additional custom headers. + for k, v := range opts.customHeaders { + header[k] = v + } + return } @@ -269,6 +302,12 @@ func (c *Client) putObjectCommon(ctx context.Context, bucketName, objectName str } if size < 0 { + if opts.DisableMultipart { + return UploadInfo{}, errors.New("no length provided and multipart disabled") + } + if opts.ConcurrentStreamParts && opts.NumThreads > 1 { + return c.putObjectMultipartStreamParallel(ctx, bucketName, objectName, reader, opts) + } return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, reader, opts) } @@ -366,7 +405,8 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam rd := newHook(bytes.NewReader(buf[:length]), opts.Progress) // Proceed to upload the part. - objPart, uerr := c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber, md5Base64, "", int64(length), opts.ServerSideEncryption, !opts.DisableContentSha256, customHeader) + p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: rd, partNumber: partNumber, md5Base64: md5Base64, size: int64(length), sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader} + objPart, uerr := c.uploadPart(ctx, p) if uerr != nil { return UploadInfo{}, uerr } diff --git a/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go b/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go index b7502e2d9..849471e33 100644 --- a/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go +++ b/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go @@ -24,7 +24,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "os" "strings" "sync" @@ -60,6 +59,7 @@ type SnowballObject struct { Size int64 // Modtime to apply to the object. + // If Modtime is the zero value current time will be used. ModTime time.Time // Content of the object. @@ -107,7 +107,7 @@ func (c Client) PutObjectsSnowball(ctx context.Context, bucketName string, opts return nopReadSeekCloser{bytes.NewReader(b.Bytes())}, int64(b.Len()), nil } } else { - f, err := ioutil.TempFile("", "s3-putsnowballobjects-*") + f, err := os.CreateTemp("", "s3-putsnowballobjects-*") if err != nil { return err } @@ -173,6 +173,10 @@ objectLoop: ModTime: obj.ModTime, Format: tar.FormatPAX, } + if header.ModTime.IsZero() { + header.ModTime = time.Now().UTC() + } + if err := t.WriteHeader(&header); err != nil { closeObj() return err diff --git a/vendor/github.com/minio/minio-go/v7/api-remove.go b/vendor/github.com/minio/minio-go/v7/api-remove.go index 0fee90226..c0ff31a5b 100644 --- a/vendor/github.com/minio/minio-go/v7/api-remove.go +++ b/vendor/github.com/minio/minio-go/v7/api-remove.go @@ -82,8 +82,8 @@ func (c *Client) RemoveBucketWithOptions(ctx context.Context, bucketName string, // RemoveBucket deletes the bucket name. // -// All objects (including all object versions and delete markers). -// in the bucket must be deleted before successfully attempting this request. +// All objects (including all object versions and delete markers). +// in the bucket must be deleted before successfully attempting this request. func (c *Client) RemoveBucket(ctx context.Context, bucketName string) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { @@ -166,7 +166,7 @@ func (c *Client) removeObject(ctx context.Context, bucketName, objectName string headers.Set(amzBucketReplicationStatus, string(opts.Internal.ReplicationStatus)) } if opts.Internal.ReplicationRequest { - headers.Set(minIOBucketReplicationRequest, "") + headers.Set(minIOBucketReplicationRequest, "true") } if opts.ForceDelete { headers.Set(minIOForceDelete, "true") diff --git a/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go index b1b848f4a..827395ee1 100644 --- a/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go +++ b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go @@ -316,17 +316,15 @@ type completeMultipartUploadResult struct { // CompletePart sub container lists individual part numbers and their // md5sum, part of completeMultipartUpload. type CompletePart struct { - XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Part" json:"-"` - // Part number identifies the part. PartNumber int ETag string // Checksum values - ChecksumCRC32 string - ChecksumCRC32C string - ChecksumSHA1 string - ChecksumSHA256 string + ChecksumCRC32 string `xml:"ChecksumCRC32,omitempty"` + ChecksumCRC32C string `xml:"ChecksumCRC32C,omitempty"` + ChecksumSHA1 string `xml:"ChecksumSHA1,omitempty"` + ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"` } // completeMultipartUpload container for completing multipart upload. diff --git a/vendor/github.com/minio/minio-go/v7/api-select.go b/vendor/github.com/minio/minio-go/v7/api-select.go index 5d47d7ec5..628d967ff 100644 --- a/vendor/github.com/minio/minio-go/v7/api-select.go +++ b/vendor/github.com/minio/minio-go/v7/api-select.go @@ -41,8 +41,8 @@ type CSVFileHeaderInfo string // Constants for file header info. const ( CSVFileHeaderInfoNone CSVFileHeaderInfo = "NONE" - CSVFileHeaderInfoIgnore = "IGNORE" - CSVFileHeaderInfoUse = "USE" + CSVFileHeaderInfoIgnore CSVFileHeaderInfo = "IGNORE" + CSVFileHeaderInfoUse CSVFileHeaderInfo = "USE" ) // SelectCompressionType - is the parameter for what type of compression is @@ -52,15 +52,15 @@ type SelectCompressionType string // Constants for compression types under select API. const ( SelectCompressionNONE SelectCompressionType = "NONE" - SelectCompressionGZIP = "GZIP" - SelectCompressionBZIP = "BZIP2" + SelectCompressionGZIP SelectCompressionType = "GZIP" + SelectCompressionBZIP SelectCompressionType = "BZIP2" // Non-standard compression schemes, supported by MinIO hosts: - SelectCompressionZSTD = "ZSTD" // Zstandard compression. - SelectCompressionLZ4 = "LZ4" // LZ4 Stream - SelectCompressionS2 = "S2" // S2 Stream - SelectCompressionSNAPPY = "SNAPPY" // Snappy stream + SelectCompressionZSTD SelectCompressionType = "ZSTD" // Zstandard compression. + SelectCompressionLZ4 SelectCompressionType = "LZ4" // LZ4 Stream + SelectCompressionS2 SelectCompressionType = "S2" // S2 Stream + SelectCompressionSNAPPY SelectCompressionType = "SNAPPY" // Snappy stream ) // CSVQuoteFields - is the parameter for how CSV fields are quoted. @@ -69,7 +69,7 @@ type CSVQuoteFields string // Constants for csv quote styles. const ( CSVQuoteFieldsAlways CSVQuoteFields = "Always" - CSVQuoteFieldsAsNeeded = "AsNeeded" + CSVQuoteFieldsAsNeeded CSVQuoteFields = "AsNeeded" ) // QueryExpressionType - is of what syntax the expression is, this should only @@ -87,7 +87,7 @@ type JSONType string // Constants for JSONTypes. const ( JSONDocumentType JSONType = "DOCUMENT" - JSONLinesType = "LINES" + JSONLinesType JSONType = "LINES" ) // ParquetInputOptions parquet input specific options @@ -378,8 +378,8 @@ type SelectObjectType string // Constants for input data types. const ( SelectObjectTypeCSV SelectObjectType = "CSV" - SelectObjectTypeJSON = "JSON" - SelectObjectTypeParquet = "Parquet" + SelectObjectTypeJSON SelectObjectType = "JSON" + SelectObjectTypeParquet SelectObjectType = "Parquet" ) // preludeInfo is used for keeping track of necessary information from the @@ -416,7 +416,7 @@ type messageType string const ( errorMsg messageType = "error" - commonMsg = "event" + commonMsg messageType = "event" ) // eventType represents the type of event. @@ -425,9 +425,9 @@ type eventType string // list of event-types returned by Select API. const ( endEvent eventType = "End" - recordsEvent = "Records" - progressEvent = "Progress" - statsEvent = "Stats" + recordsEvent eventType = "Records" + progressEvent eventType = "Progress" + statsEvent eventType = "Stats" ) // contentType represents content type of event. diff --git a/vendor/github.com/minio/minio-go/v7/api-stat.go b/vendor/github.com/minio/minio-go/v7/api-stat.go index 6deb5f5dc..418d6cb25 100644 --- a/vendor/github.com/minio/minio-go/v7/api-stat.go +++ b/vendor/github.com/minio/minio-go/v7/api-stat.go @@ -70,6 +70,9 @@ func (c *Client) StatObject(ctx context.Context, bucketName, objectName string, if opts.Internal.ReplicationDeleteMarker { headers.Set(minIOBucketReplicationDeleteMarker, "true") } + if opts.Internal.IsReplicationReadyForDeleteMarker { + headers.Set(isMinioTgtReplicationReady, "true") + } urlValues := make(url.Values) if opts.VersionID != "" { @@ -90,6 +93,7 @@ func (c *Client) StatObject(ctx context.Context, bucketName, objectName string, if resp != nil { deleteMarker := resp.Header.Get(amzDeleteMarker) == "true" + replicationReady := resp.Header.Get(minioTgtReplicationReady) == "true" if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { if resp.StatusCode == http.StatusMethodNotAllowed && opts.VersionID != "" && deleteMarker { errResp := ErrorResponse{ @@ -105,8 +109,9 @@ func (c *Client) StatObject(ctx context.Context, bucketName, objectName string, }, errResp } return ObjectInfo{ - VersionID: resp.Header.Get(amzVersionID), - IsDeleteMarker: deleteMarker, + VersionID: resp.Header.Get(amzVersionID), + IsDeleteMarker: deleteMarker, + ReplicationReady: replicationReady, // whether delete marker can be replicated }, httpRespToErrorResponse(resp, bucketName, objectName) } } diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go index 6598e9d92..7ec7a620b 100644 --- a/vendor/github.com/minio/minio-go/v7/api.go +++ b/vendor/github.com/minio/minio-go/v7/api.go @@ -20,10 +20,11 @@ package minio import ( "bytes" "context" + "encoding/base64" "errors" "fmt" + "hash/crc32" "io" - "io/ioutil" "math/rand" "net" "net/http" @@ -93,6 +94,8 @@ type Client struct { sha256Hasher func() md5simd.Hasher healthStatus int32 + + trailingHeaderSupport bool } // Options for New method @@ -103,6 +106,10 @@ type Options struct { Region string BucketLookup BucketLookupType + // TrailingHeaders indicates server support of trailing headers. + // Only supported for v4 signatures. + TrailingHeaders bool + // Custom hash routines. Leave nil to use standard. CustomMD5 func() md5simd.Hasher CustomSHA256 func() md5simd.Hasher @@ -111,13 +118,13 @@ type Options struct { // Global constants. const ( libraryName = "minio-go" - libraryVersion = "v7.0.37" + libraryVersion = "v7.0.49" ) // User Agent should always following the below style. // Please open an issue to discuss any new changes here. // -// MinIO (OS; ARCH) LIB/VER APP/VER +// MinIO (OS; ARCH) LIB/VER APP/VER const ( libraryUserAgentPrefix = "MinIO (" + runtime.GOOS + "; " + runtime.GOARCH + ") " libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion @@ -246,6 +253,9 @@ func privateNew(endpoint string, opts *Options) (*Client, error) { if clnt.sha256Hasher == nil { clnt.sha256Hasher = newSHA256Hasher } + + clnt.trailingHeaderSupport = opts.TrailingHeaders && clnt.overrideSignerType.IsV4() + // Sets bucket lookup style, whether server accepts DNS or Path lookup. Default is Auto - determined // by the SDK. When Auto is specified, DNS lookup is used for Amazon/Google cloud endpoints and Path for all other endpoints. clnt.lookup = opts.BucketLookup @@ -312,9 +322,9 @@ func (c *Client) SetS3TransferAccelerate(accelerateEndpoint string) { // Hash materials provides relevant initialized hash algo writers // based on the expected signature type. // -// - For signature v4 request if the connection is insecure compute only sha256. -// - For signature v4 request if the connection is secure compute only md5. -// - For anonymous request compute md5. +// - For signature v4 request if the connection is insecure compute only sha256. +// - For signature v4 request if the connection is secure compute only md5. +// - For anonymous request compute md5. func (c *Client) hashMaterials(isMd5Requested, isSha256Requested bool) (hashAlgos map[string]md5simd.Hasher, hashSums map[string][]byte) { hashSums = make(map[string][]byte) hashAlgos = make(map[string]md5simd.Hasher) @@ -419,6 +429,8 @@ type requestMetadata struct { contentMD5Base64 string // carries base64 encoded md5sum contentSHA256Hex string // carries hex encoded sha256sum streamSha256 bool + addCrc bool + trailer http.Header // (http.Request).Trailer. Requires v4 signature. } // dumpHTTP - dump HTTP request and response. @@ -581,6 +593,17 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ } } + if metadata.addCrc { + if metadata.trailer == nil { + metadata.trailer = make(http.Header, 1) + } + crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) + metadata.contentBody = newHashReaderWrapper(metadata.contentBody, crc, func(hash []byte) { + // Update trailer when done. + metadata.trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(hash)) + }) + metadata.trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(crc.Sum(nil))) + } // Instantiate a new request. var req *http.Request req, err = c.newRequest(ctx, method, metadata) @@ -592,6 +615,7 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ return nil, err } + // Initiate the request. res, err = c.do(req) if err != nil { @@ -610,7 +634,7 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ } // Read the body to be saved later. - errBodyBytes, err := ioutil.ReadAll(res.Body) + errBodyBytes, err := io.ReadAll(res.Body) // res.Body should be closed closeResponse(res) if err != nil { @@ -619,20 +643,20 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ // Save the body. errBodySeeker := bytes.NewReader(errBodyBytes) - res.Body = ioutil.NopCloser(errBodySeeker) + res.Body = io.NopCloser(errBodySeeker) // For errors verify if its retryable otherwise fail quickly. errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName)) // Save the body back again. errBodySeeker.Seek(0, 0) // Seek back to starting point. - res.Body = ioutil.NopCloser(errBodySeeker) + res.Body = io.NopCloser(errBodySeeker) // Bucket region if set in error response and the error // code dictates invalid region, we can retry the request // with the new region. // - // Additionally we should only retry if bucketLocation and custom + // Additionally, we should only retry if bucketLocation and custom // region is empty. if c.region == "" { switch errResponse.Code { @@ -789,7 +813,7 @@ func (c *Client) newRequest(ctx context.Context, method string, metadata request if metadata.contentLength == 0 { req.Body = nil } else { - req.Body = ioutil.NopCloser(metadata.contentBody) + req.Body = io.NopCloser(metadata.contentBody) } // Set incoming content-length. @@ -814,21 +838,30 @@ func (c *Client) newRequest(ctx context.Context, method string, metadata request // Add signature version '2' authorization header. req = signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost) case metadata.streamSha256 && !c.secure: - // Streaming signature is used by default for a PUT object request. Additionally we also - // look if the initialized client is secure, if yes then we don't need to perform - // streaming signature. + if len(metadata.trailer) > 0 { + req.Trailer = metadata.trailer + } + // Streaming signature is used by default for a PUT object request. + // Additionally, we also look if the initialized client is secure, + // if yes then we don't need to perform streaming signature. req = signer.StreamingSignV4(req, accessKeyID, - secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC()) + secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC(), c.sha256Hasher()) default: // Set sha256 sum for signature calculation only with signature version '4'. shaHeader := unsignedPayload if metadata.contentSHA256Hex != "" { shaHeader = metadata.contentSHA256Hex + if len(metadata.trailer) > 0 { + // Sanity check, we should not end up here if upstream is sane. + return nil, errors.New("internal error: contentSHA256Hex with trailer not supported") + } + } else if len(metadata.trailer) > 0 { + shaHeader = unsignedPayloadTrailer } req.Header.Set("X-Amz-Content-Sha256", shaHeader) // Add signature version '4' authorization header. - req = signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, location) + req = signer.SignV4Trailer(*req, accessKeyID, secretAccessKey, sessionToken, location, metadata.trailer) } // Return request. diff --git a/vendor/github.com/minio/minio-go/v7/constants.go b/vendor/github.com/minio/minio-go/v7/constants.go index dee83b870..1c3e8e6f6 100644 --- a/vendor/github.com/minio/minio-go/v7/constants.go +++ b/vendor/github.com/minio/minio-go/v7/constants.go @@ -46,6 +46,10 @@ const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5 // we don't want to sign the request payload const unsignedPayload = "UNSIGNED-PAYLOAD" +// unsignedPayloadTrailer value to be set to X-Amz-Content-Sha256 header when +// we don't want to sign the request payload, but have a trailer. +const unsignedPayloadTrailer = "STREAMING-UNSIGNED-PAYLOAD-TRAILER" + // Total number of parallel workers used for multipart operation. const totalWorkers = 4 @@ -96,6 +100,9 @@ const ( minIOBucketReplicationObjectRetentionTimestamp = "X-Minio-Source-Replication-Retention-Timestamp" // Header indicates last legalhold update time on source minIOBucketReplicationObjectLegalHoldTimestamp = "X-Minio-Source-Replication-LegalHold-Timestamp" - - minIOForceDelete = "x-minio-force-delete" + minIOForceDelete = "x-minio-force-delete" + // Header indicates delete marker replication request can be sent by source now. + minioTgtReplicationReady = "X-Minio-Replication-Ready" + // Header asks if delete marker replication request can be sent by source now. + isMinioTgtReplicationReady = "X-Minio-Check-Replication-Ready" ) diff --git a/vendor/github.com/minio/minio-go/v7/core.go b/vendor/github.com/minio/minio-go/v7/core.go index f6132e79a..207a38702 100644 --- a/vendor/github.com/minio/minio-go/v7/core.go +++ b/vendor/github.com/minio/minio-go/v7/core.go @@ -88,8 +88,19 @@ func (c Core) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarke // PutObjectPart - Upload an object part. func (c Core) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data io.Reader, size int64, md5Base64, sha256Hex string, sse encrypt.ServerSide) (ObjectPart, error) { - streamSha256 := true - return c.uploadPart(ctx, bucket, object, uploadID, data, partID, md5Base64, sha256Hex, size, sse, streamSha256, nil) + p := uploadPartParams{ + bucketName: bucket, + objectName: object, + uploadID: uploadID, + reader: data, + partNumber: partID, + md5Base64: md5Base64, + sha256Hex: sha256Hex, + size: size, + sse: sse, + streamSha256: true, + } + return c.uploadPart(ctx, p) } // ListObjectParts - List uploaded parts of an incomplete upload.x diff --git a/vendor/github.com/minio/minio-go/v7/functional_tests.go b/vendor/github.com/minio/minio-go/v7/functional_tests.go index 483b5cb11..23dd7e9b9 100644 --- a/vendor/github.com/minio/minio-go/v7/functional_tests.go +++ b/vendor/github.com/minio/minio-go/v7/functional_tests.go @@ -31,7 +31,6 @@ import ( "hash" "hash/crc32" "io" - "io/ioutil" "math/rand" "mime/multipart" "net/http" @@ -346,7 +345,7 @@ func getDataReader(fileName string) io.ReadCloser { if _, ok := dataFileCRC32[fileName]; !ok { dataFileCRC32[fileName] = mustCrcReader(newRandomReader(size, size)) } - return ioutil.NopCloser(newRandomReader(size, size)) + return io.NopCloser(newRandomReader(size, size)) } reader, _ := os.Open(getMintDataDirFilePath(fileName)) if _, ok := dataFileCRC32[fileName]; !ok { @@ -989,7 +988,7 @@ func testGetObjectWithVersioning() { for _, testFile := range testFiles { r := getDataReader(testFile) - buf, err := ioutil.ReadAll(r) + buf, err := io.ReadAll(r) if err != nil { logError(testName, function, args, startTime, "", "unexpected failure", err) return @@ -1131,7 +1130,7 @@ func testPutObjectWithVersioning() { var errs [n]error for i := 0; i < n; i++ { r := newRandomReader(int64((1<<20)*i+i), int64(i)) - buf, err := ioutil.ReadAll(r) + buf, err := io.ReadAll(r) if err != nil { logError(testName, function, args, startTime, "", "unexpected failure", err) return @@ -1271,7 +1270,7 @@ func testCopyObjectWithVersioning() { testFiles := []string{"datafile-1-b", "datafile-10-kB"} for _, testFile := range testFiles { r := getDataReader(testFile) - buf, err := ioutil.ReadAll(r) + buf, err := io.ReadAll(r) if err != nil { logError(testName, function, args, startTime, "", "unexpected failure", err) return @@ -1304,7 +1303,7 @@ func testCopyObjectWithVersioning() { return } - oldestContent, err := ioutil.ReadAll(reader) + oldestContent, err := io.ReadAll(reader) if err != nil { logError(testName, function, args, startTime, "", "Reading the oldest object version failed", err) return @@ -1338,7 +1337,7 @@ func testCopyObjectWithVersioning() { } defer readerCopy.Close() - newestContent, err := ioutil.ReadAll(readerCopy) + newestContent, err := io.ReadAll(readerCopy) if err != nil { logError(testName, function, args, startTime, "", "Reading from GetObject reader failed", err) return @@ -1408,7 +1407,7 @@ func testConcurrentCopyObjectWithVersioning() { testFiles := []string{"datafile-10-kB"} for _, testFile := range testFiles { r := getDataReader(testFile) - buf, err := ioutil.ReadAll(r) + buf, err := io.ReadAll(r) if err != nil { logError(testName, function, args, startTime, "", "unexpected failure", err) return @@ -1441,7 +1440,7 @@ func testConcurrentCopyObjectWithVersioning() { return } - oldestContent, err := ioutil.ReadAll(reader) + oldestContent, err := io.ReadAll(reader) if err != nil { logError(testName, function, args, startTime, "", "Reading the oldest object version failed", err) return @@ -1491,7 +1490,7 @@ func testConcurrentCopyObjectWithVersioning() { } defer readerCopy.Close() - newestContent, err := ioutil.ReadAll(readerCopy) + newestContent, err := io.ReadAll(readerCopy) if err != nil { logError(testName, function, args, startTime, "", "Reading from GetObject reader failed", err) return @@ -1571,7 +1570,7 @@ func testComposeObjectWithVersioning() { for _, testFile := range testFiles { r := getDataReader(testFile) - buf, err := ioutil.ReadAll(r) + buf, err := io.ReadAll(r) if err != nil { logError(testName, function, args, startTime, "", "unexpected failure", err) return @@ -1633,7 +1632,7 @@ func testComposeObjectWithVersioning() { } defer readerCopy.Close() - copyContentBytes, err := ioutil.ReadAll(readerCopy) + copyContentBytes, err := io.ReadAll(readerCopy) if err != nil { logError(testName, function, args, startTime, "", "Reading from the copy object reader failed", err) return @@ -1733,12 +1732,39 @@ func testRemoveObjectWithVersioning() { logError(testName, function, args, startTime, "", "Unexpected versioning info, should not have any one ", err) return } - - err = c.RemoveBucket(context.Background(), bucketName) + // test delete marker version id is non-null + _, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader("datafile-10-kB"), int64(dataFileMap["datafile-10-kB"]), minio.PutObjectOptions{}) if err != nil { - logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + // create delete marker + err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "DeleteObject failed", err) return } + objectsInfo = c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + idx := 0 + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + if idx == 0 { + if !info.IsDeleteMarker { + logError(testName, function, args, startTime, "", "Unexpected error - expected delete marker to have been created", err) + return + } + if info.VersionID == "" { + logError(testName, function, args, startTime, "", "Unexpected error - expected delete marker to be versioned", err) + return + } + } + idx++ + } + + defer cleanupBucket(bucketName, c) successLogger(testName, function, args, startTime).Info() } @@ -2054,10 +2080,10 @@ func testPutObjectWithChecksums() { ChecksumSHA1 string ChecksumSHA256 string }{ - {header: "x-amz-checksum-crc32", hasher: crc32.NewIEEE(), ChecksumCRC32: "yXTVFQ=="}, - {header: "x-amz-checksum-crc32c", hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)), ChecksumCRC32C: "zXqj7Q=="}, - {header: "x-amz-checksum-sha1", hasher: sha1.New(), ChecksumSHA1: "SwmAs3F75Sw/sE4dHehkvYtn9UE="}, - {header: "x-amz-checksum-sha256", hasher: sha256.New(), ChecksumSHA256: "8Tlu9msuw/cpmWNEnQx97axliBjiE6gK1doiY0N9WuA="}, + {header: "x-amz-checksum-crc32", hasher: crc32.NewIEEE()}, + {header: "x-amz-checksum-crc32c", hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli))}, + {header: "x-amz-checksum-sha1", hasher: sha1.New()}, + {header: "x-amz-checksum-sha256", hasher: sha256.New()}, } for i, test := range tests { @@ -2113,10 +2139,10 @@ func testPutObjectWithChecksums() { logError(testName, function, args, startTime, "", "PutObject failed", err) return } - cmpChecksum(resp.ChecksumSHA256, test.ChecksumSHA256) - cmpChecksum(resp.ChecksumSHA1, test.ChecksumSHA1) - cmpChecksum(resp.ChecksumCRC32, test.ChecksumCRC32) - cmpChecksum(resp.ChecksumCRC32C, test.ChecksumCRC32C) + cmpChecksum(resp.ChecksumSHA256, meta["x-amz-checksum-sha256"]) + cmpChecksum(resp.ChecksumSHA1, meta["x-amz-checksum-sha1"]) + cmpChecksum(resp.ChecksumCRC32, meta["x-amz-checksum-crc32"]) + cmpChecksum(resp.ChecksumCRC32C, meta["x-amz-checksum-crc32c"]) // Read the data back gopts := minio.GetObjectOptions{Checksum: true} @@ -2132,10 +2158,10 @@ func testPutObjectWithChecksums() { return } - cmpChecksum(st.ChecksumSHA256, test.ChecksumSHA256) - cmpChecksum(st.ChecksumSHA1, test.ChecksumSHA1) - cmpChecksum(st.ChecksumCRC32, test.ChecksumCRC32) - cmpChecksum(st.ChecksumCRC32C, test.ChecksumCRC32C) + cmpChecksum(st.ChecksumSHA256, meta["x-amz-checksum-sha256"]) + cmpChecksum(st.ChecksumSHA1, meta["x-amz-checksum-sha1"]) + cmpChecksum(st.ChecksumCRC32, meta["x-amz-checksum-crc32"]) + cmpChecksum(st.ChecksumCRC32C, meta["x-amz-checksum-crc32c"]) if st.Size != int64(bufSize) { logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err) @@ -2461,7 +2487,7 @@ func testGetObjectSeekEnd() { objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") args["objectName"] = objectName - buf, err := ioutil.ReadAll(reader) + buf, err := io.ReadAll(reader) if err != nil { logError(testName, function, args, startTime, "", "ReadAll failed", err) return @@ -2982,7 +3008,7 @@ func testFPutObjectMultipart() { fileName := getMintDataDirFilePath("datafile-129-MB") if fileName == "" { // Make a temp file with minPartSize bytes of data. - file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest") + file, err := os.CreateTemp(os.TempDir(), "FPutObjectTest") if err != nil { logError(testName, function, args, startTime, "", "TempFile creation failed", err) return @@ -3091,7 +3117,7 @@ func testFPutObject() { fName := getMintDataDirFilePath("datafile-129-MB") if fName == "" { // Make a temp file with minPartSize bytes of data. - file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest") + file, err := os.CreateTemp(os.TempDir(), "FPutObjectTest") if err != nil { logError(testName, function, args, startTime, "", "TempFile creation failed", err) return @@ -3257,7 +3283,7 @@ func testFPutObjectContext() { fName := getMintDataDirFilePath("datafile-1-MB") if fName == "" { // Make a temp file with 1 MiB bytes of data. - file, err := ioutil.TempFile(os.TempDir(), "FPutObjectContextTest") + file, err := os.CreateTemp(os.TempDir(), "FPutObjectContextTest") if err != nil { logError(testName, function, args, startTime, "", "TempFile creation failed", err) return @@ -3357,7 +3383,7 @@ func testFPutObjectContextV2() { fName := getMintDataDirFilePath("datafile-1-MB") if fName == "" { // Make a temp file with 1 MiB bytes of data. - file, err := ioutil.TempFile(os.TempDir(), "FPutObjectContextTest") + file, err := os.CreateTemp(os.TempDir(), "FPutObjectContextTest") if err != nil { logError(testName, function, args, startTime, "", "Temp file creation failed", err) return @@ -3621,7 +3647,7 @@ func testGetObjectS3Zip() { logError(testName, function, args, startTime, "", "file.Open failed", err) return } - want, err := ioutil.ReadAll(zfr) + want, err := io.ReadAll(zfr) if err != nil { logError(testName, function, args, startTime, "", "fzip file read failed", err) return @@ -3638,7 +3664,7 @@ func testGetObjectS3Zip() { } return } - got, err := ioutil.ReadAll(r) + got, err := io.ReadAll(r) if err != nil { logError(testName, function, args, startTime, "", "ReadAll failed", err) return @@ -3722,7 +3748,7 @@ func testGetObjectReadSeekFunctional() { objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") args["objectName"] = objectName - buf, err := ioutil.ReadAll(reader) + buf, err := io.ReadAll(reader) if err != nil { logError(testName, function, args, startTime, "", "ReadAll failed", err) return @@ -3885,7 +3911,7 @@ func testGetObjectReadAtFunctional() { objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") args["objectName"] = objectName - buf, err := ioutil.ReadAll(reader) + buf, err := io.ReadAll(reader) if err != nil { logError(testName, function, args, startTime, "", "ReadAll failed", err) return @@ -4062,7 +4088,7 @@ func testGetObjectReadAtWhenEOFWasReached() { objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") args["objectName"] = objectName - buf, err := ioutil.ReadAll(reader) + buf, err := io.ReadAll(reader) if err != nil { logError(testName, function, args, startTime, "", "ReadAll failed", err) return @@ -4181,7 +4207,7 @@ func testPresignedPostPolicy() { metadataKey := randString(60, rand.NewSource(time.Now().UnixNano()), "user") metadataValue := randString(60, rand.NewSource(time.Now().UnixNano()), "") - buf, err := ioutil.ReadAll(reader) + buf, err := io.ReadAll(reader) if err != nil { logError(testName, function, args, startTime, "", "ReadAll failed", err) return @@ -4204,10 +4230,6 @@ func testPresignedPostPolicy() { logError(testName, function, args, startTime, "", "SetKey did not fail for invalid conditions", err) return } - if err := policy.SetKeyStartsWith(""); err == nil { - logError(testName, function, args, startTime, "", "SetKeyStartsWith did not fail for invalid conditions", err) - return - } if err := policy.SetExpires(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)); err == nil { logError(testName, function, args, startTime, "", "SetExpires did not fail for invalid conditions", err) return @@ -4216,10 +4238,6 @@ func testPresignedPostPolicy() { logError(testName, function, args, startTime, "", "SetContentType did not fail for invalid conditions", err) return } - if err := policy.SetContentTypeStartsWith(""); err == nil { - logError(testName, function, args, startTime, "", "SetContentTypeStartsWith did not fail for invalid conditions", err) - return - } if err := policy.SetContentLengthRange(1024*1024, 1024); err == nil { logError(testName, function, args, startTime, "", "SetContentLengthRange did not fail for invalid conditions", err) return @@ -4253,7 +4271,7 @@ func testPresignedPostPolicy() { filePath := getMintDataDirFilePath("datafile-33-kB") if filePath == "" { // Make a temp file with 33 KB data. - file, err := ioutil.TempFile(os.TempDir(), "PresignedPostPolicyTest") + file, err := os.CreateTemp(os.TempDir(), "PresignedPostPolicyTest") if err != nil { logError(testName, function, args, startTime, "", "TempFile creation failed", err) return @@ -4596,7 +4614,7 @@ func testSSECEncryptedGetObjectReadSeekFunctional() { objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") args["objectName"] = objectName - buf, err := ioutil.ReadAll(reader) + buf, err := io.ReadAll(reader) if err != nil { logError(testName, function, args, startTime, "", "ReadAll failed", err) return @@ -4778,7 +4796,7 @@ func testSSES3EncryptedGetObjectReadSeekFunctional() { objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") args["objectName"] = objectName - buf, err := ioutil.ReadAll(reader) + buf, err := io.ReadAll(reader) if err != nil { logError(testName, function, args, startTime, "", "ReadAll failed", err) return @@ -4952,7 +4970,7 @@ func testSSECEncryptedGetObjectReadAtFunctional() { objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") args["objectName"] = objectName - buf, err := ioutil.ReadAll(reader) + buf, err := io.ReadAll(reader) if err != nil { logError(testName, function, args, startTime, "", "ReadAll failed", err) return @@ -5135,7 +5153,7 @@ func testSSES3EncryptedGetObjectReadAtFunctional() { objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") args["objectName"] = objectName - buf, err := ioutil.ReadAll(reader) + buf, err := io.ReadAll(reader) if err != nil { logError(testName, function, args, startTime, "", "ReadAll failed", err) return @@ -6146,7 +6164,7 @@ func testFunctional() { return } - newReadBytes, err := ioutil.ReadAll(newReader) + newReadBytes, err := io.ReadAll(newReader) if err != nil { logError(testName, function, args, startTime, "", "ReadAll failed", err) return @@ -6277,7 +6295,7 @@ func testFunctional() { logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err) return } - newPresignedBytes, err := ioutil.ReadAll(resp.Body) + newPresignedBytes, err := io.ReadAll(resp.Body) if err != nil { logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) return @@ -6320,7 +6338,7 @@ func testFunctional() { logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err) return } - newPresignedBytes, err = ioutil.ReadAll(resp.Body) + newPresignedBytes, err = io.ReadAll(resp.Body) if err != nil { logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) return @@ -6380,7 +6398,7 @@ func testFunctional() { return } - newReadBytes, err = ioutil.ReadAll(newReader) + newReadBytes, err = io.ReadAll(newReader) if err != nil { logError(testName, function, args, startTime, "", "ReadAll after GetObject failed", err) return @@ -6436,7 +6454,7 @@ func testFunctional() { return } - newReadBytes, err = ioutil.ReadAll(newReader) + newReadBytes, err = io.ReadAll(newReader) if err != nil { logError(testName, function, args, startTime, "", "ReadAll failed during get on custom-presigned put object", err) return @@ -6660,7 +6678,7 @@ func testPutObjectUploadSeekedObject() { } args["fileToUpload"] = fileName } else { - tempfile, err = ioutil.TempFile("", "minio-go-upload-test-") + tempfile, err = os.CreateTemp("", "minio-go-upload-test-") if err != nil { logError(testName, function, args, startTime, "", "TempFile create failed", err) return @@ -6924,7 +6942,7 @@ func testFPutObjectV2() { defer cleanupBucket(bucketName, c) // Make a temp file with 11*1024*1024 bytes of data. - file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest") + file, err := os.CreateTemp(os.TempDir(), "FPutObjectTest") if err != nil { logError(testName, function, args, startTime, "", "TempFile creation failed", err) return @@ -7153,7 +7171,7 @@ func testGetObjectReadSeekFunctionalV2() { objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") args["objectName"] = objectName - buf, err := ioutil.ReadAll(reader) + buf, err := io.ReadAll(reader) if err != nil { logError(testName, function, args, startTime, "", "ReadAll failed", err) return @@ -7307,7 +7325,7 @@ func testGetObjectReadAtFunctionalV2() { objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") args["objectName"] = objectName - buf, err := ioutil.ReadAll(reader) + buf, err := io.ReadAll(reader) if err != nil { logError(testName, function, args, startTime, "", "ReadAll failed", err) return @@ -7845,7 +7863,7 @@ func testEncryptedEmptyObject() { } defer reader.Close() - decBytes, err := ioutil.ReadAll(reader) + decBytes, err := io.ReadAll(reader) if err != nil { logError(testName, function, map[string]interface{}{}, startTime, "", "ReadAll failed", err) return @@ -7923,7 +7941,7 @@ func testEncryptedCopyObjectWrapper(c *minio.Client, bucketName string, sseSrc, return } - decBytes, err := ioutil.ReadAll(reader) + decBytes, err := io.ReadAll(reader) if err != nil { logError(testName, function, args, startTime, "", "ReadAll failed", err) return @@ -7963,7 +7981,7 @@ func testEncryptedCopyObjectWrapper(c *minio.Client, bucketName string, sseSrc, return } - decBytes, err = ioutil.ReadAll(reader) + decBytes, err = io.ReadAll(reader) if err != nil { logError(testName, function, args, startTime, "", "ReadAll failed", err) return @@ -8002,7 +8020,7 @@ func testEncryptedCopyObjectWrapper(c *minio.Client, bucketName string, sseSrc, } defer reader.Close() - decBytes, err = ioutil.ReadAll(reader) + decBytes, err = io.ReadAll(reader) if err != nil { logError(testName, function, args, startTime, "", "ReadAll failed", err) return @@ -11048,7 +11066,7 @@ func testFunctionalV2() { return } - newReadBytes, err := ioutil.ReadAll(newReader) + newReadBytes, err := io.ReadAll(newReader) if err != nil { logError(testName, function, args, startTime, "", "ReadAll failed", err) return @@ -11154,7 +11172,7 @@ func testFunctionalV2() { logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err) return } - newPresignedBytes, err := ioutil.ReadAll(resp.Body) + newPresignedBytes, err := io.ReadAll(resp.Body) if err != nil { logError(testName, function, args, startTime, "", "ReadAll failed", err) return @@ -11193,7 +11211,7 @@ func testFunctionalV2() { logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err) return } - newPresignedBytes, err = ioutil.ReadAll(resp.Body) + newPresignedBytes, err = io.ReadAll(resp.Body) if err != nil { logError(testName, function, args, startTime, "", "ReadAll failed", err) return @@ -11247,7 +11265,7 @@ func testFunctionalV2() { return } - newReadBytes, err = ioutil.ReadAll(newReader) + newReadBytes, err = io.ReadAll(newReader) if err != nil { logError(testName, function, args, startTime, "", "ReadAll failed during get on presigned put object", err) return @@ -11561,7 +11579,7 @@ func testGetObjectRanges() { } for _, test := range tests { wantRC := getDataReader("datafile-129-MB") - io.CopyN(ioutil.Discard, wantRC, test.start) + io.CopyN(io.Discard, wantRC, test.start) want := mustCrcReader(io.LimitReader(wantRC, test.end-test.start+1)) opts := minio.GetObjectOptions{} opts.SetRange(test.start, test.end) diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go index 12ed08427..1c73d1008 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go @@ -19,11 +19,11 @@ package credentials import ( "bytes" + "crypto/sha256" "encoding/hex" "encoding/xml" "errors" "io" - "io/ioutil" "net/http" "net/url" "strconv" @@ -31,7 +31,6 @@ import ( "time" "github.com/minio/minio-go/v7/pkg/signer" - sha256 "github.com/minio/sha256-simd" ) // AssumeRoleResponse contains the result of successful AssumeRole request. @@ -139,7 +138,7 @@ func closeResponse(resp *http.Response) { // Without this closing connection would disallow re-using // the same connection for future uses. // - http://stackoverflow.com/a/17961593/4465767 - io.Copy(ioutil.Discard, resp.Body) + io.Copy(io.Discard, resp.Body) resp.Body.Close() } } @@ -191,7 +190,7 @@ func getAssumeRoleCredentials(clnt *http.Client, endpoint string, opts STSAssume defer closeResponse(resp) if resp.StatusCode != http.StatusOK { var errResp ErrorResponse - buf, err := ioutil.ReadAll(resp.Body) + buf, err := io.ReadAll(resp.Body) if err != nil { return AssumeRoleResponse{}, err } diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go index 6dc8e9d05..ddccfb173 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go @@ -31,18 +31,17 @@ package credentials // will cache that Provider for all calls to IsExpired(), until Retrieve is // called again after IsExpired() is true. // -// creds := credentials.NewChainCredentials( -// []credentials.Provider{ -// &credentials.EnvAWSS3{}, -// &credentials.EnvMinio{}, -// }) -// -// // Usage of ChainCredentials. -// mc, err := minio.NewWithCredentials(endpoint, creds, secure, "us-east-1") -// if err != nil { -// log.Fatalln(err) -// } +// creds := credentials.NewChainCredentials( +// []credentials.Provider{ +// &credentials.EnvAWSS3{}, +// &credentials.EnvMinio{}, +// }) // +// // Usage of ChainCredentials. +// mc, err := minio.NewWithCredentials(endpoint, creds, secure, "us-east-1") +// if err != nil { +// log.Fatalln(err) +// } type Chain struct { Providers []Provider curr Provider diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go index 6b93a27fb..af6104967 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go @@ -65,10 +65,11 @@ type Provider interface { // provider's struct. // // Example: -// type IAMCredentialProvider struct { -// Expiry -// ... -// } +// +// type IAMCredentialProvider struct { +// Expiry +// ... +// } type Expiry struct { // The date/time when to expire on expiration time.Time diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json new file mode 100644 index 000000000..afbfad559 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json @@ -0,0 +1,7 @@ +{ + "Version": 1, + "SessionToken": "token", + "AccessKeyId": "accessKey", + "SecretAccessKey": "secret", + "Expiration": "9999-04-27T16:02:25.000Z" +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample index 7fc91d9d2..e2dc1bfec 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample @@ -10,3 +10,6 @@ aws_secret_access_key = secret [with_colon] aws_access_key_id: accessKey aws_secret_access_key: secret + +[with_process] +credential_process = /bin/cat credentials.json diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go index 0c94477b7..fbfb10549 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go @@ -28,35 +28,33 @@ // // Example of using the environment variable credentials. // -// creds := NewFromEnv() -// // Retrieve the credentials value -// credValue, err := creds.Get() -// if err != nil { -// // handle error -// } +// creds := NewFromEnv() +// // Retrieve the credentials value +// credValue, err := creds.Get() +// if err != nil { +// // handle error +// } // // Example of forcing credentials to expire and be refreshed on the next Get(). // This may be helpful to proactively expire credentials and refresh them sooner // than they would naturally expire on their own. // -// creds := NewFromIAM("") -// creds.Expire() -// credsValue, err := creds.Get() -// // New credentials will be retrieved instead of from cache. +// creds := NewFromIAM("") +// creds.Expire() +// credsValue, err := creds.Get() +// // New credentials will be retrieved instead of from cache. // -// -// Custom Provider +// # Custom Provider // // Each Provider built into this package also provides a helper method to generate // a Credentials pointer setup with the provider. To use a custom Provider just // create a type which satisfies the Provider interface and pass it to the // NewCredentials method. // -// type MyProvider struct{} -// func (m *MyProvider) Retrieve() (Value, error) {...} -// func (m *MyProvider) IsExpired() bool {...} -// -// creds := NewCredentials(&MyProvider{}) -// credValue, err := creds.Get() +// type MyProvider struct{} +// func (m *MyProvider) Retrieve() (Value, error) {...} +// func (m *MyProvider) IsExpired() bool {...} // +// creds := NewCredentials(&MyProvider{}) +// credValue, err := creds.Get() package credentials diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go index f4b027a41..07a9c2f09 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go @@ -22,7 +22,6 @@ import ( "encoding/xml" "fmt" "io" - "io/ioutil" ) // ErrorResponse - Is the typed error returned. @@ -88,7 +87,7 @@ func xmlDecoder(body io.Reader, v interface{}) error { func xmlDecodeAndBody(bodyReader io.Reader, v interface{}) ([]byte, error) { // read the whole body (up to 1MB) const maxBodyLength = 1 << 20 - body, err := ioutil.ReadAll(io.LimitReader(bodyReader, maxBodyLength)) + body, err := io.ReadAll(io.LimitReader(bodyReader, maxBodyLength)) if err != nil { return nil, err } diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go index cbdcfe256..da09707e3 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go @@ -18,17 +18,33 @@ package credentials import ( + "encoding/json" + "errors" "os" + "os/exec" "path/filepath" + "strings" + "time" ini "gopkg.in/ini.v1" ) +// A externalProcessCredentials stores the output of a credential_process +type externalProcessCredentials struct { + Version int + SessionToken string + AccessKeyID string `json:"AccessKeyId"` + SecretAccessKey string + Expiration time.Time +} + // A FileAWSCredentials retrieves credentials from the current user's home // directory, and keeps track if those credentials are expired. // // Profile ini file example: $HOME/.aws/credentials type FileAWSCredentials struct { + Expiry + // Path to the shared credentials file. // // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the @@ -89,6 +105,33 @@ func (p *FileAWSCredentials) Retrieve() (Value, error) { // Default to empty string if not found. token := iniProfile.Key("aws_session_token") + // If credential_process is defined, obtain credentials by executing + // the external process + credentialProcess := strings.TrimSpace(iniProfile.Key("credential_process").String()) + if credentialProcess != "" { + args := strings.Fields(credentialProcess) + if len(args) <= 1 { + return Value{}, errors.New("invalid credential process args") + } + cmd := exec.Command(args[0], args[1:]...) + out, err := cmd.Output() + if err != nil { + return Value{}, err + } + var externalProcessCredentials externalProcessCredentials + err = json.Unmarshal([]byte(out), &externalProcessCredentials) + if err != nil { + return Value{}, err + } + p.retrieved = true + p.SetExpiration(externalProcessCredentials.Expiration, DefaultExpiryWindow) + return Value{ + AccessKeyID: externalProcessCredentials.AccessKeyID, + SecretAccessKey: externalProcessCredentials.SecretAccessKey, + SessionToken: externalProcessCredentials.SessionToken, + SignerType: SignatureV4, + }, nil + } p.retrieved = true return Value{ AccessKeyID: id.String(), @@ -98,11 +141,6 @@ func (p *FileAWSCredentials) Retrieve() (Value, error) { }, nil } -// IsExpired returns if the shared credentials have expired. -func (p *FileAWSCredentials) IsExpired() bool { - return !p.retrieved -} - // loadProfiles loads from the file pointed to by shared credentials filename for profile. // The credentials retrieved from the profile will be returned or error. Error will be // returned if it fails to read from the file, or the data is invalid. diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go index 56437edb2..e4c0f6717 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go @@ -18,7 +18,6 @@ package credentials import ( - "io/ioutil" "os" "path/filepath" "runtime" @@ -114,6 +113,7 @@ type hostConfig struct { type config struct { Version string `json:"version"` Hosts map[string]hostConfig `json:"hosts"` + Aliases map[string]hostConfig `json:"aliases"` } // loadAliass loads from the file pointed to by shared credentials filename for alias. @@ -123,12 +123,17 @@ func loadAlias(filename, alias string) (hostConfig, error) { cfg := &config{} json := jsoniter.ConfigCompatibleWithStandardLibrary - configBytes, err := ioutil.ReadFile(filename) + configBytes, err := os.ReadFile(filename) if err != nil { return hostConfig{}, err } if err = json.Unmarshal(configBytes, cfg); err != nil { return hostConfig{}, err } + + if cfg.Version == "10" { + return cfg.Aliases[alias], nil + } + return cfg.Hosts[alias], nil } diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go index 14369cf10..e641639c9 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go @@ -22,7 +22,7 @@ import ( "context" "errors" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/url" @@ -106,7 +106,7 @@ func (m *IAM) Retrieve() (Value, error) { Client: m.Client, STSEndpoint: endpoint, GetWebIDTokenExpiry: func() (*WebIdentityToken, error) { - token, err := ioutil.ReadFile(os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE")) + token, err := os.ReadFile(os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE")) if err != nil { return nil, err } @@ -268,7 +268,7 @@ func fetchIMDSToken(client *http.Client, endpoint string) (string, error) { return "", err } defer resp.Body.Close() - data, err := ioutil.ReadAll(resp.Body) + data, err := io.ReadAll(resp.Body) if err != nil { return "", err } diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go index 34598bd8e..9e92c1e0f 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go @@ -22,7 +22,7 @@ import ( "encoding/xml" "errors" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "strings" @@ -138,7 +138,7 @@ func getClientGrantsCredentials(clnt *http.Client, endpoint string, defer resp.Body.Close() if resp.StatusCode != http.StatusOK { var errResp ErrorResponse - buf, err := ioutil.ReadAll(resp.Body) + buf, err := io.ReadAll(resp.Body) if err != nil { return AssumeRoleWithClientGrantsResponse{}, err } diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go index 25b45ecb0..ec5f3f097 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go @@ -21,7 +21,7 @@ import ( "bytes" "encoding/xml" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "strings" @@ -156,7 +156,7 @@ func (k *LDAPIdentity) Retrieve() (value Value, err error) { defer resp.Body.Close() if resp.StatusCode != http.StatusOK { var errResp ErrorResponse - buf, err := ioutil.ReadAll(resp.Body) + buf, err := io.ReadAll(resp.Body) if err != nil { return value, err } diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go index c7ac4db3b..dee0a8cbb 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go @@ -21,7 +21,6 @@ import ( "encoding/xml" "errors" "io" - "io/ioutil" "net" "net/http" "net/url" @@ -141,6 +140,9 @@ func (i *STSCertificateIdentity) Retrieve() (Value, error) { if err != nil { return Value{}, err } + if req.Form == nil { + req.Form = url.Values{} + } req.Form.Add("DurationSeconds", strconv.FormatUint(uint64(livetime.Seconds()), 10)) resp, err := i.Client.Do(req) @@ -152,7 +154,7 @@ func (i *STSCertificateIdentity) Retrieve() (Value, error) { } if resp.StatusCode != http.StatusOK { var errResp ErrorResponse - buf, err := ioutil.ReadAll(resp.Body) + buf, err := io.ReadAll(resp.Body) if err != nil { return Value{}, err } diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go index 50f5f1ce6..2e2af50b4 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go @@ -22,7 +22,7 @@ import ( "encoding/xml" "errors" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "strconv" @@ -155,7 +155,7 @@ func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSession defer resp.Body.Close() if resp.StatusCode != http.StatusOK { var errResp ErrorResponse - buf, err := ioutil.ReadAll(resp.Body) + buf, err := io.ReadAll(resp.Body) if err != nil { return AssumeRoleWithWebIdentityResponse{}, err } diff --git a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go new file mode 100644 index 000000000..6db26c036 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go @@ -0,0 +1,24 @@ +//go:build !fips +// +build !fips + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2022 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package encrypt + +// FIPS is true if 'fips' build tag was specified. +const FIPS = false diff --git a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go new file mode 100644 index 000000000..640258242 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go @@ -0,0 +1,24 @@ +//go:build fips +// +build fips + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2022 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package encrypt + +// FIPS is true if 'fips' build tag was specified. +const FIPS = true diff --git a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go index 06e68e736..163fa62b4 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go @@ -67,9 +67,9 @@ var DefaultPBKDF PBKDF = func(password, salt []byte) ServerSide { // Type is the server-side-encryption method. It represents one of // the following encryption methods: -// - SSE-C: server-side-encryption with customer provided keys -// - KMS: server-side-encryption with managed keys -// - S3: server-side-encryption using S3 storage encryption +// - SSE-C: server-side-encryption with customer provided keys +// - KMS: server-side-encryption with managed keys +// - S3: server-side-encryption using S3 storage encryption type Type string const ( diff --git a/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go b/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go index d0a471638..126661a9e 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go @@ -22,14 +22,14 @@ type identity struct { PrincipalID string `json:"principalId"` } -// event bucket metadata. +// event bucket metadata. type bucketMeta struct { Name string `json:"name"` OwnerIdentity identity `json:"ownerIdentity"` ARN string `json:"arn"` } -// event object metadata. +// event object metadata. type objectMeta struct { Key string `json:"key"` Size int64 `json:"size,omitempty"` @@ -40,7 +40,7 @@ type objectMeta struct { Sequencer string `json:"sequencer"` } -// event server specific metadata. +// event server specific metadata. type eventMeta struct { SchemaVersion string `json:"s3SchemaVersion"` ConfigurationID string `json:"configurationId"` diff --git a/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go b/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go index 75a1f6096..fd034fdc8 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go @@ -21,6 +21,7 @@ import ( "encoding/xml" "errors" "fmt" + "strings" "github.com/minio/minio-go/v7/pkg/set" ) @@ -29,22 +30,23 @@ import ( type EventType string // The role of all event types are described in : -// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations +// +// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations const ( ObjectCreatedAll EventType = "s3:ObjectCreated:*" - ObjectCreatedPut = "s3:ObjectCreated:Put" - ObjectCreatedPost = "s3:ObjectCreated:Post" - ObjectCreatedCopy = "s3:ObjectCreated:Copy" - ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload" - ObjectAccessedGet = "s3:ObjectAccessed:Get" - ObjectAccessedHead = "s3:ObjectAccessed:Head" - ObjectAccessedAll = "s3:ObjectAccessed:*" - ObjectRemovedAll = "s3:ObjectRemoved:*" - ObjectRemovedDelete = "s3:ObjectRemoved:Delete" - ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated" - ObjectReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject" - BucketCreatedAll = "s3:BucketCreated:*" - BucketRemovedAll = "s3:BucketRemoved:*" + ObjectCreatedPut EventType = "s3:ObjectCreated:Put" + ObjectCreatedPost EventType = "s3:ObjectCreated:Post" + ObjectCreatedCopy EventType = "s3:ObjectCreated:Copy" + ObjectCreatedCompleteMultipartUpload EventType = "s3:ObjectCreated:CompleteMultipartUpload" + ObjectAccessedGet EventType = "s3:ObjectAccessed:Get" + ObjectAccessedHead EventType = "s3:ObjectAccessed:Head" + ObjectAccessedAll EventType = "s3:ObjectAccessed:*" + ObjectRemovedAll EventType = "s3:ObjectRemoved:*" + ObjectRemovedDelete EventType = "s3:ObjectRemoved:Delete" + ObjectRemovedDeleteMarkerCreated EventType = "s3:ObjectRemoved:DeleteMarkerCreated" + ObjectReducedRedundancyLostObject EventType = "s3:ReducedRedundancyLostObject" + BucketCreatedAll EventType = "s3:BucketCreated:*" + BucketRemovedAll EventType = "s3:BucketRemoved:*" ) // FilterRule - child of S3Key, a tag in the notification xml which @@ -87,6 +89,27 @@ func NewArn(partition, service, region, accountID, resource string) Arn { } } +var ( + // ErrInvalidArnPrefix is returned when ARN string format does not start with 'arn' + ErrInvalidArnPrefix = errors.New("invalid ARN format, must start with 'arn:'") + // ErrInvalidArnFormat is returned when ARN string format is not valid + ErrInvalidArnFormat = errors.New("invalid ARN format, must be 'arn:::::'") +) + +// NewArnFromString parses string representation of ARN into Arn object. +// Returns an error if the string format is incorrect. +func NewArnFromString(arn string) (Arn, error) { + parts := strings.Split(arn, ":") + if len(parts) != 6 { + return Arn{}, ErrInvalidArnFormat + } + if parts[0] != "arn" { + return Arn{}, ErrInvalidArnPrefix + } + + return NewArn(parts[1], parts[2], parts[3], parts[4], parts[5]), nil +} + // String returns the string format of the ARN func (arn Arn) String() string { return "arn:" + arn.Partition + ":" + arn.Service + ":" + arn.Region + ":" + arn.AccountID + ":" + arn.Resource diff --git a/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go index 97abf8df8..645fe18cb 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go @@ -700,6 +700,10 @@ type TargetMetrics struct { PendingCount uint64 `json:"pendingReplicationCount"` // Total number of failed operations including metadata updates FailedCount uint64 `json:"failedReplicationCount"` + // Bandwidth limit in bytes/sec for this target + BandWidthLimitInBytesPerSecond int64 `json:"limitInBits"` + // Current bandwidth used in bytes/sec for this target + CurrentBandwidthInBytesPerSecond float64 `json:"currentBandwidth"` } // Metrics represents inline replication metrics for a bucket. diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go new file mode 100644 index 000000000..77540e2d8 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go @@ -0,0 +1,224 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2022 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package signer + +import ( + "bytes" + "fmt" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +// getUnsignedChunkLength - calculates the length of chunk metadata +func getUnsignedChunkLength(chunkDataSize int64) int64 { + return int64(len(fmt.Sprintf("%x", chunkDataSize))) + + crlfLen + + chunkDataSize + + crlfLen +} + +// getUSStreamLength - calculates the length of the overall stream (data + metadata) +func getUSStreamLength(dataLen, chunkSize int64, trailers http.Header) int64 { + if dataLen <= 0 { + return 0 + } + + chunksCount := int64(dataLen / chunkSize) + remainingBytes := int64(dataLen % chunkSize) + streamLen := int64(0) + streamLen += chunksCount * getUnsignedChunkLength(chunkSize) + if remainingBytes > 0 { + streamLen += getUnsignedChunkLength(remainingBytes) + } + streamLen += getUnsignedChunkLength(0) + if len(trailers) > 0 { + for name, placeholder := range trailers { + if len(placeholder) > 0 { + streamLen += int64(len(name) + len(trailerKVSeparator) + len(placeholder[0]) + 1) + } + } + streamLen += crlfLen + } + + return streamLen +} + +// prepareStreamingRequest - prepares a request with appropriate +// headers before computing the seed signature. +func prepareUSStreamingRequest(req *http.Request, sessionToken string, dataLen int64, timestamp time.Time) { + req.TransferEncoding = []string{"aws-chunked"} + if sessionToken != "" { + req.Header.Set("X-Amz-Security-Token", sessionToken) + } + + req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat)) + // Set content length with streaming signature for each chunk included. + req.ContentLength = getUSStreamLength(dataLen, int64(payloadChunkSize), req.Trailer) +} + +// StreamingUSReader implements chunked upload signature as a reader on +// top of req.Body's ReaderCloser chunk header;data;... repeat +type StreamingUSReader struct { + contentLen int64 // Content-Length from req header + baseReadCloser io.ReadCloser // underlying io.Reader + bytesRead int64 // bytes read from underlying io.Reader + buf bytes.Buffer // holds signed chunk + chunkBuf []byte // holds raw data read from req Body + chunkBufLen int // no. of bytes read so far into chunkBuf + done bool // done reading the underlying reader to EOF + chunkNum int + totalChunks int + lastChunkSize int + trailer http.Header +} + +// writeChunk - signs a chunk read from s.baseReader of chunkLen size. +func (s *StreamingUSReader) writeChunk(chunkLen int, addCrLf bool) { + s.buf.WriteString(strconv.FormatInt(int64(chunkLen), 16) + "\r\n") + + // Write chunk data into streaming buffer + s.buf.Write(s.chunkBuf[:chunkLen]) + + // Write the chunk trailer. + if addCrLf { + s.buf.Write([]byte("\r\n")) + } + + // Reset chunkBufLen for next chunk read. + s.chunkBufLen = 0 + s.chunkNum++ +} + +// addSignedTrailer - adds a trailer with the provided headers, +// then signs a chunk and adds it to output. +func (s *StreamingUSReader) addTrailer(h http.Header) { + olen := len(s.chunkBuf) + s.chunkBuf = s.chunkBuf[:0] + for k, v := range h { + s.chunkBuf = append(s.chunkBuf, []byte(strings.ToLower(k)+trailerKVSeparator+v[0]+"\n")...) + } + + s.buf.Write(s.chunkBuf) + s.buf.WriteString("\r\n\r\n") + + // Reset chunkBufLen for next chunk read. + s.chunkBuf = s.chunkBuf[:olen] + s.chunkBufLen = 0 + s.chunkNum++ +} + +// StreamingUnsignedV4 - provides chunked upload +func StreamingUnsignedV4(req *http.Request, sessionToken string, dataLen int64, reqTime time.Time) *http.Request { + // Set headers needed for streaming signature. + prepareUSStreamingRequest(req, sessionToken, dataLen, reqTime) + + if req.Body == nil { + req.Body = io.NopCloser(bytes.NewReader([]byte(""))) + } + + stReader := &StreamingUSReader{ + baseReadCloser: req.Body, + chunkBuf: make([]byte, payloadChunkSize), + contentLen: dataLen, + chunkNum: 1, + totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1, + lastChunkSize: int(dataLen % payloadChunkSize), + } + if len(req.Trailer) > 0 { + stReader.trailer = req.Trailer + // Remove... + req.Trailer = nil + } + + req.Body = stReader + + return req +} + +// Read - this method performs chunk upload signature providing a +// io.Reader interface. +func (s *StreamingUSReader) Read(buf []byte) (int, error) { + switch { + // After the last chunk is read from underlying reader, we + // never re-fill s.buf. + case s.done: + + // s.buf will be (re-)filled with next chunk when has lesser + // bytes than asked for. + case s.buf.Len() < len(buf): + s.chunkBufLen = 0 + for { + n1, err := s.baseReadCloser.Read(s.chunkBuf[s.chunkBufLen:]) + // Usually we validate `err` first, but in this case + // we are validating n > 0 for the following reasons. + // + // 1. n > 0, err is one of io.EOF, nil (near end of stream) + // A Reader returning a non-zero number of bytes at the end + // of the input stream may return either err == EOF or err == nil + // + // 2. n == 0, err is io.EOF (actual end of stream) + // + // Callers should always process the n > 0 bytes returned + // before considering the error err. + if n1 > 0 { + s.chunkBufLen += n1 + s.bytesRead += int64(n1) + + if s.chunkBufLen == payloadChunkSize || + (s.chunkNum == s.totalChunks-1 && + s.chunkBufLen == s.lastChunkSize) { + // Sign the chunk and write it to s.buf. + s.writeChunk(s.chunkBufLen, true) + break + } + } + if err != nil { + if err == io.EOF { + // No more data left in baseReader - last chunk. + // Done reading the last chunk from baseReader. + s.done = true + + // bytes read from baseReader different than + // content length provided. + if s.bytesRead != s.contentLen { + return 0, fmt.Errorf("http: ContentLength=%d with Body length %d", s.contentLen, s.bytesRead) + } + + // Sign the chunk and write it to s.buf. + s.writeChunk(0, len(s.trailer) == 0) + if len(s.trailer) > 0 { + // Trailer must be set now. + s.addTrailer(s.trailer) + } + break + } + return 0, err + } + + } + } + return s.buf.Read(buf) +} + +// Close - this method makes underlying io.ReadCloser's Close method available. +func (s *StreamingUSReader) Close() error { + return s.baseReadCloser.Close() +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go index b1296d2b1..1c2f1dc9d 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go @@ -22,23 +22,28 @@ import ( "encoding/hex" "fmt" "io" - "io/ioutil" "net/http" "strconv" "strings" "time" + + md5simd "github.com/minio/md5-simd" ) // Reference for constants used below - // http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#example-signature-calculations-streaming const ( - streamingSignAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" - streamingPayloadHdr = "AWS4-HMAC-SHA256-PAYLOAD" - emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" - payloadChunkSize = 64 * 1024 - chunkSigConstLen = 17 // ";chunk-signature=" - signatureStrLen = 64 // e.g. "f2ca1bb6c7e907d06dafe4687e579fce76b37e4e93b7605022da52e6ccc26fd2" - crlfLen = 2 // CRLF + streamingSignAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" + streamingSignTrailerAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER" + streamingPayloadHdr = "AWS4-HMAC-SHA256-PAYLOAD" + streamingTrailerHdr = "AWS4-HMAC-SHA256-TRAILER" + emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + payloadChunkSize = 64 * 1024 + chunkSigConstLen = 17 // ";chunk-signature=" + signatureStrLen = 64 // e.g. "f2ca1bb6c7e907d06dafe4687e579fce76b37e4e93b7605022da52e6ccc26fd2" + crlfLen = 2 // CRLF + trailerKVSeparator = ":" + trailerSignature = "x-amz-trailer-signature" ) // Request headers to be ignored while calculating seed signature for @@ -60,7 +65,7 @@ func getSignedChunkLength(chunkDataSize int64) int64 { } // getStreamLength - calculates the length of the overall stream (data + metadata) -func getStreamLength(dataLen, chunkSize int64) int64 { +func getStreamLength(dataLen, chunkSize int64, trailers http.Header) int64 { if dataLen <= 0 { return 0 } @@ -73,19 +78,42 @@ func getStreamLength(dataLen, chunkSize int64) int64 { streamLen += getSignedChunkLength(remainingBytes) } streamLen += getSignedChunkLength(0) + if len(trailers) > 0 { + for name, placeholder := range trailers { + if len(placeholder) > 0 { + streamLen += int64(len(name) + len(trailerKVSeparator) + len(placeholder[0]) + 1) + } + } + streamLen += int64(len(trailerSignature)+len(trailerKVSeparator)) + signatureStrLen + crlfLen + crlfLen + } + return streamLen } // buildChunkStringToSign - returns the string to sign given chunk data // and previous signature. -func buildChunkStringToSign(t time.Time, region, previousSig string, chunkData []byte) string { +func buildChunkStringToSign(t time.Time, region, previousSig, chunkChecksum string) string { stringToSignParts := []string{ streamingPayloadHdr, t.Format(iso8601DateFormat), getScope(region, t, ServiceTypeS3), previousSig, emptySHA256, - hex.EncodeToString(sum256(chunkData)), + chunkChecksum, + } + + return strings.Join(stringToSignParts, "\n") +} + +// buildTrailerChunkStringToSign - returns the string to sign given chunk data +// and previous signature. +func buildTrailerChunkStringToSign(t time.Time, region, previousSig, chunkChecksum string) string { + stringToSignParts := []string{ + streamingTrailerHdr, + t.Format(iso8601DateFormat), + getScope(region, t, ServiceTypeS3), + previousSig, + chunkChecksum, } return strings.Join(stringToSignParts, "\n") @@ -95,14 +123,23 @@ func buildChunkStringToSign(t time.Time, region, previousSig string, chunkData [ // headers before computing the seed signature. func prepareStreamingRequest(req *http.Request, sessionToken string, dataLen int64, timestamp time.Time) { // Set x-amz-content-sha256 header. - req.Header.Set("X-Amz-Content-Sha256", streamingSignAlgorithm) + if len(req.Trailer) == 0 { + req.Header.Set("X-Amz-Content-Sha256", streamingSignAlgorithm) + } else { + req.Header.Set("X-Amz-Content-Sha256", streamingSignTrailerAlgorithm) + for k := range req.Trailer { + req.Header.Add("X-Amz-Trailer", strings.ToLower(k)) + } + req.TransferEncoding = []string{"aws-chunked"} + } + if sessionToken != "" { req.Header.Set("X-Amz-Security-Token", sessionToken) } req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat)) // Set content length with streaming signature for each chunk included. - req.ContentLength = getStreamLength(dataLen, int64(payloadChunkSize)) + req.ContentLength = getStreamLength(dataLen, int64(payloadChunkSize), req.Trailer) req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(dataLen, 10)) } @@ -113,11 +150,21 @@ func buildChunkHeader(chunkLen int64, signature string) []byte { } // buildChunkSignature - returns chunk signature for a given chunk and previous signature. -func buildChunkSignature(chunkData []byte, reqTime time.Time, region, +func buildChunkSignature(chunkCheckSum string, reqTime time.Time, region, previousSignature, secretAccessKey string, ) string { chunkStringToSign := buildChunkStringToSign(reqTime, region, - previousSignature, chunkData) + previousSignature, chunkCheckSum) + signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3) + return getSignature(signingKey, chunkStringToSign) +} + +// buildChunkSignature - returns chunk signature for a given chunk and previous signature. +func buildTrailerChunkSignature(chunkChecksum string, reqTime time.Time, region, + previousSignature, secretAccessKey string, +) string { + chunkStringToSign := buildTrailerChunkStringToSign(reqTime, region, + previousSignature, chunkChecksum) signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3) return getSignature(signingKey, chunkStringToSign) } @@ -156,12 +203,18 @@ type StreamingReader struct { chunkNum int totalChunks int lastChunkSize int + trailer http.Header + sh256 md5simd.Hasher } // signChunk - signs a chunk read from s.baseReader of chunkLen size. -func (s *StreamingReader) signChunk(chunkLen int) { +func (s *StreamingReader) signChunk(chunkLen int, addCrLf bool) { // Compute chunk signature for next header - signature := buildChunkSignature(s.chunkBuf[:chunkLen], s.reqTime, + s.sh256.Reset() + s.sh256.Write(s.chunkBuf[:chunkLen]) + chunckChecksum := hex.EncodeToString(s.sh256.Sum(nil)) + + signature := buildChunkSignature(chunckChecksum, s.reqTime, s.region, s.prevSignature, s.secretAccessKey) // For next chunk signature computation @@ -175,13 +228,43 @@ func (s *StreamingReader) signChunk(chunkLen int) { s.buf.Write(s.chunkBuf[:chunkLen]) // Write the chunk trailer. - s.buf.Write([]byte("\r\n")) + if addCrLf { + s.buf.Write([]byte("\r\n")) + } // Reset chunkBufLen for next chunk read. s.chunkBufLen = 0 s.chunkNum++ } +// addSignedTrailer - adds a trailer with the provided headers, +// then signs a chunk and adds it to output. +func (s *StreamingReader) addSignedTrailer(h http.Header) { + olen := len(s.chunkBuf) + s.chunkBuf = s.chunkBuf[:0] + for k, v := range h { + s.chunkBuf = append(s.chunkBuf, []byte(strings.ToLower(k)+trailerKVSeparator+v[0]+"\n")...) + } + + s.sh256.Reset() + s.sh256.Write(s.chunkBuf) + chunkChecksum := hex.EncodeToString(s.sh256.Sum(nil)) + // Compute chunk signature + signature := buildTrailerChunkSignature(chunkChecksum, s.reqTime, + s.region, s.prevSignature, s.secretAccessKey) + + // For next chunk signature computation + s.prevSignature = signature + + s.buf.Write(s.chunkBuf) + s.buf.WriteString("\r\n" + trailerSignature + trailerKVSeparator + signature + "\r\n\r\n") + + // Reset chunkBufLen for next chunk read. + s.chunkBuf = s.chunkBuf[:olen] + s.chunkBufLen = 0 + s.chunkNum++ +} + // setStreamingAuthHeader - builds and sets authorization header value // for streaming signature. func (s *StreamingReader) setStreamingAuthHeader(req *http.Request) { @@ -200,13 +283,13 @@ func (s *StreamingReader) setStreamingAuthHeader(req *http.Request) { // StreamingSignV4 - provides chunked upload signatureV4 support by // implementing io.Reader. func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionToken, - region string, dataLen int64, reqTime time.Time, + region string, dataLen int64, reqTime time.Time, sh256 md5simd.Hasher, ) *http.Request { // Set headers needed for streaming signature. prepareStreamingRequest(req, sessionToken, dataLen, reqTime) if req.Body == nil { - req.Body = ioutil.NopCloser(bytes.NewReader([]byte(""))) + req.Body = io.NopCloser(bytes.NewReader([]byte(""))) } stReader := &StreamingReader{ @@ -221,6 +304,12 @@ func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionTok chunkNum: 1, totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1, lastChunkSize: int(dataLen % payloadChunkSize), + sh256: sh256, + } + if len(req.Trailer) > 0 { + stReader.trailer = req.Trailer + // Remove... + req.Trailer = nil } // Add the request headers required for chunk upload signing. @@ -272,7 +361,7 @@ func (s *StreamingReader) Read(buf []byte) (int, error) { (s.chunkNum == s.totalChunks-1 && s.chunkBufLen == s.lastChunkSize) { // Sign the chunk and write it to s.buf. - s.signChunk(s.chunkBufLen) + s.signChunk(s.chunkBufLen, true) break } } @@ -289,7 +378,11 @@ func (s *StreamingReader) Read(buf []byte) (int, error) { } // Sign the chunk and write it to s.buf. - s.signChunk(0) + s.signChunk(0, len(s.trailer) == 0) + if len(s.trailer) > 0 { + // Trailer must be set now. + s.addSignedTrailer(s.trailer) + } break } return 0, err @@ -302,5 +395,9 @@ func (s *StreamingReader) Read(buf []byte) (int, error) { // Close - this method makes underlying io.ReadCloser's Close method available. func (s *StreamingReader) Close() error { + if s.sh256 != nil { + s.sh256.Close() + s.sh256 = nil + } return s.baseReadCloser.Close() } diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go index cf7921d1f..fa4f8c91e 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go @@ -162,11 +162,12 @@ func SignV2(req http.Request, accessKeyID, secretAccessKey string, virtualHost b // From the Amazon docs: // // StringToSign = HTTP-Verb + "\n" + -// Content-Md5 + "\n" + -// Content-Type + "\n" + -// Expires + "\n" + -// CanonicalizedProtocolHeaders + -// CanonicalizedResource; +// +// Content-Md5 + "\n" + +// Content-Type + "\n" + +// Expires + "\n" + +// CanonicalizedProtocolHeaders + +// CanonicalizedResource; func preStringToSignV2(req http.Request, virtualHost bool) string { buf := new(bytes.Buffer) // Write standard headers. @@ -189,11 +190,12 @@ func writePreSignV2Headers(buf *bytes.Buffer, req http.Request) { // From the Amazon docs: // // StringToSign = HTTP-Verb + "\n" + -// Content-Md5 + "\n" + -// Content-Type + "\n" + -// Date + "\n" + -// CanonicalizedProtocolHeaders + -// CanonicalizedResource; +// +// Content-Md5 + "\n" + +// Content-Type + "\n" + +// Date + "\n" + +// CanonicalizedProtocolHeaders + +// CanonicalizedResource; func stringToSignV2(req http.Request, virtualHost bool) string { buf := new(bytes.Buffer) // Write standard headers. @@ -281,8 +283,9 @@ var resourceList = []string{ // From the Amazon docs: // // CanonicalizedResource = [ "/" + Bucket ] + -// + -// [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; +// +// + +// [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request, virtualHost bool) { // Save request URL. requestURL := req.URL diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go index a12608ebb..34914490c 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go @@ -42,7 +42,6 @@ const ( ServiceTypeSTS = "sts" ) -// // Excerpts from @lsegal - // https:/github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258. // @@ -57,7 +56,6 @@ const ( // * Accept-Encoding // Some S3 servers like Hitachi Content Platform do not honor this header for signature // calculation. -// var v4IgnoredHeaders = map[string]bool{ "Accept-Encoding": true, "Authorization": true, @@ -177,12 +175,13 @@ func getSignedHeaders(req http.Request, ignoredHeaders map[string]bool) string { // getCanonicalRequest generate a canonical request of style. // // canonicalRequest = -// \n -// \n -// \n -// \n -// \n -// +// +// \n +// \n +// \n +// \n +// \n +// func getCanonicalRequest(req http.Request, ignoredHeaders map[string]bool, hashedPayload string) string { req.URL.RawQuery = strings.ReplaceAll(req.URL.Query().Encode(), "+", "%20") canonicalRequest := strings.Join([]string{ @@ -264,11 +263,11 @@ func PostPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, l // SignV4STS - signature v4 for STS request. func SignV4STS(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request { - return signV4(req, accessKeyID, secretAccessKey, "", location, ServiceTypeSTS) + return signV4(req, accessKeyID, secretAccessKey, "", location, ServiceTypeSTS, nil) } // Internal function called for different service types. -func signV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location, serviceType string) *http.Request { +func signV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location, serviceType string, trailer http.Header) *http.Request { // Signature calculation is not needed for anonymous credentials. if accessKeyID == "" || secretAccessKey == "" { return &req @@ -285,6 +284,15 @@ func signV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, locati req.Header.Set("X-Amz-Security-Token", sessionToken) } + if len(trailer) > 0 { + for k := range trailer { + req.Header.Add("X-Amz-Trailer", strings.ToLower(k)) + } + + req.TransferEncoding = []string{"aws-chunked"} + req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(req.ContentLength, 10)) + } + hashedPayload := getHashedPayload(req) if serviceType == ServiceTypeSTS { // Content sha256 header is not sent with the request @@ -322,11 +330,22 @@ func signV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, locati auth := strings.Join(parts, ", ") req.Header.Set("Authorization", auth) + if len(trailer) > 0 { + // Use custom chunked encoding. + req.Trailer = trailer + return StreamingUnsignedV4(&req, sessionToken, req.ContentLength, time.Now().UTC()) + } return &req } // SignV4 sign the request before Do(), in accordance with // http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html. func SignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string) *http.Request { - return signV4(req, accessKeyID, secretAccessKey, sessionToken, location, ServiceTypeS3) + return signV4(req, accessKeyID, secretAccessKey, sessionToken, location, ServiceTypeS3, nil) +} + +// SignV4Trailer sign the request before Do(), in accordance with +// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html +func SignV4Trailer(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string, trailer http.Header) *http.Request { + return signV4(req, accessKeyID, secretAccessKey, sessionToken, location, ServiceTypeS3, trailer) } diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go index b54fa4c77..333f1aa25 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go @@ -19,10 +19,9 @@ package signer import ( "crypto/hmac" + "crypto/sha256" "net/http" "strings" - - "github.com/minio/sha256-simd" ) // unsignedPayload - value to be set to X-Amz-Content-Sha256 header when diff --git a/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go b/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go index d7c65af5a..98ae17efa 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go @@ -1,5 +1,6 @@ /* - * MinIO Cloud Storage, (C) 2020 MinIO, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020-2022 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,6 +21,8 @@ import ( "encoding/xml" "io" "net/url" + "regexp" + "sort" "strings" "unicode/utf8" ) @@ -63,8 +66,17 @@ const ( maxTagCount = 50 ) +// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-restrictions +// borrowed from this article and also testing various ASCII characters following regex +// is supported by AWS S3 for both tags and values. +var validTagKeyValue = regexp.MustCompile(`^[a-zA-Z0-9-+\-._:/@ ]+$`) + func checkKey(key string) error { - if len(key) == 0 || utf8.RuneCountInString(key) > maxKeyLength || strings.Contains(key, "&") { + if len(key) == 0 { + return errInvalidTagKey + } + + if utf8.RuneCountInString(key) > maxKeyLength || !validTagKeyValue.MatchString(key) { return errInvalidTagKey } @@ -72,8 +84,10 @@ func checkKey(key string) error { } func checkValue(value string) error { - if utf8.RuneCountInString(value) > maxValueLength || strings.Contains(value, "&") { - return errInvalidTagValue + if value != "" { + if utf8.RuneCountInString(value) > maxValueLength || !validTagKeyValue.MatchString(value) { + return errInvalidTagValue + } } return nil @@ -136,11 +150,26 @@ type tagSet struct { } func (tags tagSet) String() string { - vals := make(url.Values) - for key, value := range tags.tagMap { - vals.Set(key, value) + if len(tags.tagMap) == 0 { + return "" } - return vals.Encode() + var buf strings.Builder + keys := make([]string, 0, len(tags.tagMap)) + for k := range tags.tagMap { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + keyEscaped := url.QueryEscape(k) + valueEscaped := url.QueryEscape(tags.tagMap[k]) + if buf.Len() > 0 { + buf.WriteByte('&') + } + buf.WriteString(keyEscaped) + buf.WriteByte('=') + buf.WriteString(valueEscaped) + } + return buf.String() } func (tags *tagSet) remove(key string) { @@ -175,7 +204,7 @@ func (tags *tagSet) set(key, value string, failOnExist bool) error { } func (tags tagSet) toMap() map[string]string { - m := make(map[string]string) + m := make(map[string]string, len(tags.tagMap)) for key, value := range tags.tagMap { m[key] = value } @@ -188,6 +217,7 @@ func (tags tagSet) MarshalXML(e *xml.Encoder, start xml.StartElement) error { Tags []Tag `xml:"Tag"` }{} + tagList.Tags = make([]Tag, 0, len(tags.tagMap)) for key, value := range tags.tagMap { tagList.Tags = append(tagList.Tags, Tag{key, value}) } @@ -213,7 +243,7 @@ func (tags *tagSet) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { return errTooManyTags } - m := map[string]string{} + m := make(map[string]string, len(tagList.Tags)) for _, tag := range tagList.Tags { if _, found := m[tag.Key]; found { return errDuplicateTagKey @@ -311,14 +341,49 @@ func ParseObjectXML(reader io.Reader) (*Tags, error) { return unmarshalXML(reader, true) } +// stringsCut slices s around the first instance of sep, +// returning the text before and after sep. +// The found result reports whether sep appears in s. +// If sep does not appear in s, cut returns s, "", false. +func stringsCut(s, sep string) (before, after string, found bool) { + if i := strings.Index(s, sep); i >= 0 { + return s[:i], s[i+len(sep):], true + } + return s, "", false +} + +func (tags *tagSet) parseTags(tgs string) (err error) { + for tgs != "" { + var key string + key, tgs, _ = stringsCut(tgs, "&") + if key == "" { + continue + } + key, value, _ := stringsCut(key, "=") + key, err1 := url.QueryUnescape(key) + if err1 != nil { + if err == nil { + err = err1 + } + continue + } + value, err1 = url.QueryUnescape(value) + if err1 != nil { + if err == nil { + err = err1 + } + continue + } + if err = tags.set(key, value, true); err != nil { + return err + } + } + return err +} + // Parse decodes HTTP query formatted string into tags which is limited by isObject. // A query formatted string is like "key1=value1&key2=value2". func Parse(s string, isObject bool) (*Tags, error) { - values, err := url.ParseQuery(s) - if err != nil { - return nil, err - } - tagging := &Tags{ TagSet: &tagSet{ tagMap: make(map[string]string), @@ -326,10 +391,8 @@ func Parse(s string, isObject bool) (*Tags, error) { }, } - for key := range values { - if err := tagging.TagSet.set(key, values.Get(key), true); err != nil { - return nil, err - } + if err := tagging.TagSet.parseTags(s); err != nil { + return nil, err } return tagging, nil diff --git a/vendor/github.com/minio/minio-go/v7/post-policy.go b/vendor/github.com/minio/minio-go/v7/post-policy.go index 7bf560304..31b340dcf 100644 --- a/vendor/github.com/minio/minio-go/v7/post-policy.go +++ b/vendor/github.com/minio/minio-go/v7/post-policy.go @@ -25,19 +25,18 @@ import ( ) // expirationDateFormat date format for expiration key in json policy. -const expirationDateFormat = "2006-01-02T15:04:05.999Z" +const expirationDateFormat = "2006-01-02T15:04:05.000Z" // policyCondition explanation: // http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html // // Example: // -// policyCondition { -// matchType: "$eq", -// key: "$Content-Type", -// value: "image/png", -// } -// +// policyCondition { +// matchType: "$eq", +// key: "$Content-Type", +// value: "image/png", +// } type policyCondition struct { matchType string condition string @@ -98,10 +97,8 @@ func (p *PostPolicy) SetKey(key string) error { // SetKeyStartsWith - Sets an object name that an policy based upload // can start with. +// Can use an empty value ("") to allow any key. func (p *PostPolicy) SetKeyStartsWith(keyStartsWith string) error { - if strings.TrimSpace(keyStartsWith) == "" || keyStartsWith == "" { - return errInvalidArgument("Object prefix is empty.") - } policyCond := policyCondition{ matchType: "starts-with", condition: "$key", @@ -172,10 +169,8 @@ func (p *PostPolicy) SetContentType(contentType string) error { // SetContentTypeStartsWith - Sets what content-type of the object for this policy // based upload can start with. +// Can use an empty value ("") to allow any content-type. func (p *PostPolicy) SetContentTypeStartsWith(contentTypeStartsWith string) error { - if strings.TrimSpace(contentTypeStartsWith) == "" || contentTypeStartsWith == "" { - return errInvalidArgument("No content type specified.") - } policyCond := policyCondition{ matchType: "starts-with", condition: "$Content-Type", @@ -286,10 +281,14 @@ func (p *PostPolicy) SetUserData(key string, value string) error { } // addNewPolicy - internal helper to validate adding new policies. +// Can use starts-with with an empty value ("") to allow any content within a form field. func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error { - if policyCond.matchType == "" || policyCond.condition == "" || policyCond.value == "" { + if policyCond.matchType == "" || policyCond.condition == "" { return errInvalidArgument("Policy fields are empty.") } + if policyCond.matchType != "starts-with" && policyCond.value == "" { + return errInvalidArgument("Policy value is empty.") + } p.conditions = append(p.conditions, policyCond) return nil } diff --git a/vendor/github.com/minio/minio-go/v7/s3-endpoints.go b/vendor/github.com/minio/minio-go/v7/s3-endpoints.go index 3a4cacfe8..8bf2e5baa 100644 --- a/vendor/github.com/minio/minio-go/v7/s3-endpoints.go +++ b/vendor/github.com/minio/minio-go/v7/s3-endpoints.go @@ -28,8 +28,10 @@ var awsS3EndpointMap = map[string]string{ "eu-west-2": "s3.dualstack.eu-west-2.amazonaws.com", "eu-west-3": "s3.dualstack.eu-west-3.amazonaws.com", "eu-central-1": "s3.dualstack.eu-central-1.amazonaws.com", + "eu-central-2": "s3.dualstack.eu-central-2.amazonaws.com", "eu-north-1": "s3.dualstack.eu-north-1.amazonaws.com", "eu-south-1": "s3.dualstack.eu-south-1.amazonaws.com", + "eu-south-2": "s3.dualstack.eu-south-2.amazonaws.com", "ap-east-1": "s3.dualstack.ap-east-1.amazonaws.com", "ap-south-1": "s3.dualstack.ap-south-1.amazonaws.com", "ap-southeast-1": "s3.dualstack.ap-southeast-1.amazonaws.com", @@ -38,6 +40,7 @@ var awsS3EndpointMap = map[string]string{ "ap-northeast-2": "s3.dualstack.ap-northeast-2.amazonaws.com", "ap-northeast-3": "s3.dualstack.ap-northeast-3.amazonaws.com", "af-south-1": "s3.dualstack.af-south-1.amazonaws.com", + "me-central-1": "s3.dualstack.me-central-1.amazonaws.com", "me-south-1": "s3.dualstack.me-south-1.amazonaws.com", "sa-east-1": "s3.dualstack.sa-east-1.amazonaws.com", "us-gov-west-1": "s3.dualstack.us-gov-west-1.amazonaws.com", @@ -45,6 +48,7 @@ var awsS3EndpointMap = map[string]string{ "cn-north-1": "s3.dualstack.cn-north-1.amazonaws.com.cn", "cn-northwest-1": "s3.dualstack.cn-northwest-1.amazonaws.com.cn", "ap-southeast-3": "s3.dualstack.ap-southeast-3.amazonaws.com", + "ap-southeast-4": "s3.dualstack.ap-southeast-4.amazonaws.com", } // getS3Endpoint get Amazon S3 endpoint based on the bucket location. diff --git a/vendor/github.com/minio/minio-go/v7/transport.go b/vendor/github.com/minio/minio-go/v7/transport.go index a88477b73..1bff66462 100644 --- a/vendor/github.com/minio/minio-go/v7/transport.go +++ b/vendor/github.com/minio/minio-go/v7/transport.go @@ -23,7 +23,6 @@ package minio import ( "crypto/tls" "crypto/x509" - "io/ioutil" "net" "net/http" "os" @@ -73,7 +72,7 @@ var DefaultTransport = func(secure bool) (*http.Transport, error) { } if f := os.Getenv("SSL_CERT_FILE"); f != "" { rootCAs := mustGetSystemCertPool() - data, err := ioutil.ReadFile(f) + data, err := os.ReadFile(f) if err == nil { rootCAs.AppendCertsFromPEM(data) } diff --git a/vendor/github.com/minio/minio-go/v7/utils.go b/vendor/github.com/minio/minio-go/v7/utils.go index f32f84ab0..9389a7faf 100644 --- a/vendor/github.com/minio/minio-go/v7/utils.go +++ b/vendor/github.com/minio/minio-go/v7/utils.go @@ -20,6 +20,7 @@ package minio import ( "context" "crypto/md5" + fipssha256 "crypto/sha256" "encoding/base64" "encoding/hex" "encoding/xml" @@ -27,7 +28,6 @@ import ( "fmt" "hash" "io" - "io/ioutil" "math/rand" "net" "net/http" @@ -39,6 +39,7 @@ import ( "time" md5simd "github.com/minio/md5-simd" + "github.com/minio/minio-go/v7/pkg/encrypt" "github.com/minio/minio-go/v7/pkg/s3utils" "github.com/minio/sha256-simd" ) @@ -140,7 +141,7 @@ func closeResponse(resp *http.Response) { // Without this closing connection would disallow re-using // the same connection for future uses. // - http://stackoverflow.com/a/17961593/4465767 - io.Copy(ioutil.Discard, resp.Body) + io.Copy(io.Discard, resp.Body) resp.Body.Close() } } @@ -510,6 +511,23 @@ func isAmzHeader(headerKey string) bool { return strings.HasPrefix(key, "x-amz-meta-") || strings.HasPrefix(key, "x-amz-grant-") || key == "x-amz-acl" || isSSEHeader(headerKey) || strings.HasPrefix(key, "x-amz-checksum-") } +// supportedQueryValues is a list of query strings that can be passed in when using GetObject. +var supportedQueryValues = map[string]bool{ + "partNumber": true, + "versionId": true, + "response-cache-control": true, + "response-content-disposition": true, + "response-content-encoding": true, + "response-content-language": true, + "response-content-type": true, + "response-expires": true, +} + +// isStandardQueryValue will return true when the passed in query string parameter is supported rather than customized. +func isStandardQueryValue(qsKey string) bool { + return supportedQueryValues[qsKey] +} + var ( md5Pool = sync.Pool{New: func() interface{} { return md5.New() }} sha256Pool = sync.Pool{New: func() interface{} { return sha256.New() }} @@ -520,6 +538,9 @@ func newMd5Hasher() md5simd.Hasher { } func newSHA256Hasher() md5simd.Hasher { + if encrypt.FIPS { + return &hashWrapper{Hash: fipssha256.New(), isSHA256: true} + } return &hashWrapper{Hash: sha256Pool.Get().(hash.Hash), isSHA256: true} } @@ -627,3 +648,38 @@ func IsNetworkOrHostDown(err error, expectTimeouts bool) bool { } return false } + +// newHashReaderWrapper will hash all reads done through r. +// When r returns io.EOF the done function will be called with the sum. +func newHashReaderWrapper(r io.Reader, h hash.Hash, done func(hash []byte)) *hashReaderWrapper { + return &hashReaderWrapper{ + r: r, + h: h, + done: done, + } +} + +type hashReaderWrapper struct { + r io.Reader + h hash.Hash + done func(hash []byte) +} + +// Read implements the io.Reader interface. +func (h *hashReaderWrapper) Read(p []byte) (n int, err error) { + n, err = h.r.Read(p) + if n > 0 { + n2, err := h.h.Write(p[:n]) + if err != nil { + return 0, err + } + if n2 != n { + return 0, io.ErrShortWrite + } + } + if err == io.EOF { + // Call back + h.done(h.h.Sum(nil)) + } + return n, err +} diff --git a/vendor/github.com/oklog/run/.gitignore b/vendor/github.com/oklog/run/.gitignore new file mode 100644 index 000000000..a1338d685 --- /dev/null +++ b/vendor/github.com/oklog/run/.gitignore @@ -0,0 +1,14 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ diff --git a/vendor/github.com/oklog/run/LICENSE b/vendor/github.com/oklog/run/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/oklog/run/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/oklog/run/README.md b/vendor/github.com/oklog/run/README.md new file mode 100644 index 000000000..eba7d11cf --- /dev/null +++ b/vendor/github.com/oklog/run/README.md @@ -0,0 +1,75 @@ +# run + +[![GoDoc](https://godoc.org/github.com/oklog/run?status.svg)](https://godoc.org/github.com/oklog/run) +[![Build Status](https://img.shields.io/endpoint.svg?url=https%3A%2F%2Factions-badge.atrox.dev%2Foklog%2Frun%2Fbadge&style=flat-square&label=build)](https://github.com/oklog/run/actions?query=workflow%3ATest) +[![Go Report Card](https://goreportcard.com/badge/github.com/oklog/run)](https://goreportcard.com/report/github.com/oklog/run) +[![Apache 2 licensed](https://img.shields.io/badge/license-Apache2-blue.svg)](https://raw.githubusercontent.com/oklog/run/master/LICENSE) + +run.Group is a universal mechanism to manage goroutine lifecycles. + +Create a zero-value run.Group, and then add actors to it. Actors are defined as +a pair of functions: an **execute** function, which should run synchronously; +and an **interrupt** function, which, when invoked, should cause the execute +function to return. Finally, invoke Run, which concurrently runs all of the +actors, waits until the first actor exits, invokes the interrupt functions, and +finally returns control to the caller only once all actors have returned. This +general-purpose API allows callers to model pretty much any runnable task, and +achieve well-defined lifecycle semantics for the group. + +run.Group was written to manage component lifecycles in func main for +[OK Log](https://github.com/oklog/oklog). +But it's useful in any circumstance where you need to orchestrate multiple +goroutines as a unit whole. +[Click here](https://www.youtube.com/watch?v=LHe1Cb_Ud_M&t=15m45s) to see a +video of a talk where run.Group is described. + +## Examples + +### context.Context + +```go +ctx, cancel := context.WithCancel(context.Background()) +g.Add(func() error { + return myProcess(ctx, ...) +}, func(error) { + cancel() +}) +``` + +### net.Listener + +```go +ln, _ := net.Listen("tcp", ":8080") +g.Add(func() error { + return http.Serve(ln, nil) +}, func(error) { + ln.Close() +}) +``` + +### io.ReadCloser + +```go +var conn io.ReadCloser = ... +g.Add(func() error { + s := bufio.NewScanner(conn) + for s.Scan() { + println(s.Text()) + } + return s.Err() +}, func(error) { + conn.Close() +}) +``` + +## Comparisons + +Package run is somewhat similar to package +[errgroup](https://godoc.org/golang.org/x/sync/errgroup), +except it doesn't require actor goroutines to understand context semantics. + +It's somewhat similar to package +[tomb.v1](https://godoc.org/gopkg.in/tomb.v1) or +[tomb.v2](https://godoc.org/gopkg.in/tomb.v2), +except it has a much smaller API surface, delegating e.g. staged shutdown of +goroutines to the caller. diff --git a/vendor/github.com/oklog/run/actors.go b/vendor/github.com/oklog/run/actors.go new file mode 100644 index 000000000..ef93495d3 --- /dev/null +++ b/vendor/github.com/oklog/run/actors.go @@ -0,0 +1,38 @@ +package run + +import ( + "context" + "fmt" + "os" + "os/signal" +) + +// SignalHandler returns an actor, i.e. an execute and interrupt func, that +// terminates with SignalError when the process receives one of the provided +// signals, or the parent context is canceled. +func SignalHandler(ctx context.Context, signals ...os.Signal) (execute func() error, interrupt func(error)) { + ctx, cancel := context.WithCancel(ctx) + return func() error { + c := make(chan os.Signal, 1) + signal.Notify(c, signals...) + select { + case sig := <-c: + return SignalError{Signal: sig} + case <-ctx.Done(): + return ctx.Err() + } + }, func(error) { + cancel() + } +} + +// SignalError is returned by the signal handler's execute function +// when it terminates due to a received signal. +type SignalError struct { + Signal os.Signal +} + +// Error implements the error interface. +func (e SignalError) Error() string { + return fmt.Sprintf("received signal %s", e.Signal) +} diff --git a/vendor/github.com/oklog/run/group.go b/vendor/github.com/oklog/run/group.go new file mode 100644 index 000000000..832d47dd1 --- /dev/null +++ b/vendor/github.com/oklog/run/group.go @@ -0,0 +1,62 @@ +// Package run implements an actor-runner with deterministic teardown. It is +// somewhat similar to package errgroup, except it does not require actor +// goroutines to understand context semantics. This makes it suitable for use in +// more circumstances; for example, goroutines which are handling connections +// from net.Listeners, or scanning input from a closable io.Reader. +package run + +// Group collects actors (functions) and runs them concurrently. +// When one actor (function) returns, all actors are interrupted. +// The zero value of a Group is useful. +type Group struct { + actors []actor +} + +// Add an actor (function) to the group. Each actor must be pre-emptable by an +// interrupt function. That is, if interrupt is invoked, execute should return. +// Also, it must be safe to call interrupt even after execute has returned. +// +// The first actor (function) to return interrupts all running actors. +// The error is passed to the interrupt functions, and is returned by Run. +func (g *Group) Add(execute func() error, interrupt func(error)) { + g.actors = append(g.actors, actor{execute, interrupt}) +} + +// Run all actors (functions) concurrently. +// When the first actor returns, all others are interrupted. +// Run only returns when all actors have exited. +// Run returns the error returned by the first exiting actor. +func (g *Group) Run() error { + if len(g.actors) == 0 { + return nil + } + + // Run each actor. + errors := make(chan error, len(g.actors)) + for _, a := range g.actors { + go func(a actor) { + errors <- a.execute() + }(a) + } + + // Wait for the first actor to stop. + err := <-errors + + // Signal all actors to stop. + for _, a := range g.actors { + a.interrupt(err) + } + + // Wait for all actors to stop. + for i := 1; i < cap(errors); i++ { + <-errors + } + + // Return the original error. + return err +} + +type actor struct { + execute func() error + interrupt func(error) +} diff --git a/vendor/github.com/pkg/browser/browser.go b/vendor/github.com/pkg/browser/browser.go index 3e5969064..d7969d74d 100644 --- a/vendor/github.com/pkg/browser/browser.go +++ b/vendor/github.com/pkg/browser/browser.go @@ -30,7 +30,7 @@ func OpenFile(path string) error { // OpenReader consumes the contents of r and presents the // results in a new browser window. func OpenReader(r io.Reader) error { - f, err := ioutil.TempFile("", "browser") + f, err := ioutil.TempFile("", "browser.*.html") if err != nil { return fmt.Errorf("browser: could not create temporary file: %v", err) } @@ -41,12 +41,7 @@ func OpenReader(r io.Reader) error { if err := f.Close(); err != nil { return fmt.Errorf("browser: caching temporary file failed: %v", err) } - oldname := f.Name() - newname := oldname + ".html" - if err := os.Rename(oldname, newname); err != nil { - return fmt.Errorf("browser: renaming temporary file failed: %v", err) - } - return OpenFile(newname) + return OpenFile(f.Name()) } // OpenURL opens a new browser window pointing to url. @@ -58,6 +53,5 @@ func runCmd(prog string, args ...string) error { cmd := exec.Command(prog, args...) cmd.Stdout = Stdout cmd.Stderr = Stderr - setFlags(cmd) return cmd.Run() } diff --git a/vendor/github.com/pkg/browser/browser_darwin.go b/vendor/github.com/pkg/browser/browser_darwin.go index 6dff0403c..8507cf7c2 100644 --- a/vendor/github.com/pkg/browser/browser_darwin.go +++ b/vendor/github.com/pkg/browser/browser_darwin.go @@ -1,9 +1,5 @@ package browser -import "os/exec" - func openBrowser(url string) error { return runCmd("open", url) } - -func setFlags(cmd *exec.Cmd) {} diff --git a/vendor/github.com/pkg/browser/browser_freebsd.go b/vendor/github.com/pkg/browser/browser_freebsd.go index 8cc0a7f53..4fc7ff076 100644 --- a/vendor/github.com/pkg/browser/browser_freebsd.go +++ b/vendor/github.com/pkg/browser/browser_freebsd.go @@ -12,5 +12,3 @@ func openBrowser(url string) error { } return err } - -func setFlags(cmd *exec.Cmd) {} diff --git a/vendor/github.com/pkg/browser/browser_linux.go b/vendor/github.com/pkg/browser/browser_linux.go index ab9b4f6bd..d26cdddf9 100644 --- a/vendor/github.com/pkg/browser/browser_linux.go +++ b/vendor/github.com/pkg/browser/browser_linux.go @@ -19,5 +19,3 @@ func openBrowser(url string) error { return &exec.Error{Name: strings.Join(providers, ","), Err: exec.ErrNotFound} } - -func setFlags(cmd *exec.Cmd) {} diff --git a/vendor/github.com/pkg/browser/browser_netbsd.go b/vendor/github.com/pkg/browser/browser_netbsd.go new file mode 100644 index 000000000..65a5e5a29 --- /dev/null +++ b/vendor/github.com/pkg/browser/browser_netbsd.go @@ -0,0 +1,14 @@ +package browser + +import ( + "errors" + "os/exec" +) + +func openBrowser(url string) error { + err := runCmd("xdg-open", url) + if e, ok := err.(*exec.Error); ok && e.Err == exec.ErrNotFound { + return errors.New("xdg-open: command not found - install xdg-utils from pkgsrc(7)") + } + return err +} diff --git a/vendor/github.com/pkg/browser/browser_openbsd.go b/vendor/github.com/pkg/browser/browser_openbsd.go index 8cc0a7f53..4fc7ff076 100644 --- a/vendor/github.com/pkg/browser/browser_openbsd.go +++ b/vendor/github.com/pkg/browser/browser_openbsd.go @@ -12,5 +12,3 @@ func openBrowser(url string) error { } return err } - -func setFlags(cmd *exec.Cmd) {} diff --git a/vendor/github.com/pkg/browser/browser_unsupported.go b/vendor/github.com/pkg/browser/browser_unsupported.go index 5eb17b013..7c5c17d34 100644 --- a/vendor/github.com/pkg/browser/browser_unsupported.go +++ b/vendor/github.com/pkg/browser/browser_unsupported.go @@ -1,15 +1,12 @@ -// +build !linux,!windows,!darwin,!openbsd,!freebsd +// +build !linux,!windows,!darwin,!openbsd,!freebsd,!netbsd package browser import ( "fmt" - "os/exec" "runtime" ) func openBrowser(url string) error { return fmt.Errorf("openBrowser: unsupported operating system: %v", runtime.GOOS) } - -func setFlags(cmd *exec.Cmd) {} diff --git a/vendor/github.com/pkg/browser/browser_windows.go b/vendor/github.com/pkg/browser/browser_windows.go index a2b30d39b..63e192959 100644 --- a/vendor/github.com/pkg/browser/browser_windows.go +++ b/vendor/github.com/pkg/browser/browser_windows.go @@ -1,13 +1,7 @@ -//go:generate mkwinsyscall -output zbrowser_windows.go browser_windows.go -//sys ShellExecute(hwnd int, verb string, file string, args string, cwd string, showCmd int) (err error) = shell32.ShellExecuteW package browser -import "os/exec" -const SW_SHOWNORMAL = 1 +import "golang.org/x/sys/windows" func openBrowser(url string) error { - return ShellExecute(0, "", url, "", "", SW_SHOWNORMAL) -} - -func setFlags(cmd *exec.Cmd) { + return windows.ShellExecute(0, nil, windows.StringToUTF16Ptr(url), nil, nil, windows.SW_SHOWNORMAL) } diff --git a/vendor/github.com/pkg/browser/zbrowser_windows.go b/vendor/github.com/pkg/browser/zbrowser_windows.go deleted file mode 100644 index cbb25ba63..000000000 --- a/vendor/github.com/pkg/browser/zbrowser_windows.go +++ /dev/null @@ -1,76 +0,0 @@ -// Code generated by 'go generate'; DO NOT EDIT. - -package browser - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) - errERROR_EINVAL error = syscall.EINVAL -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return errERROR_EINVAL - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) - return e -} - -var ( - modshell32 = windows.NewLazySystemDLL("shell32.dll") - - procShellExecuteW = modshell32.NewProc("ShellExecuteW") -) - -func ShellExecute(hwnd int, verb string, file string, args string, cwd string, showCmd int) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(verb) - if err != nil { - return - } - var _p1 *uint16 - _p1, err = syscall.UTF16PtrFromString(file) - if err != nil { - return - } - var _p2 *uint16 - _p2, err = syscall.UTF16PtrFromString(args) - if err != nil { - return - } - var _p3 *uint16 - _p3, err = syscall.UTF16PtrFromString(cwd) - if err != nil { - return - } - return _ShellExecute(hwnd, _p0, _p1, _p2, _p3, showCmd) -} - -func _ShellExecute(hwnd int, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int) (err error) { - r1, _, e1 := syscall.Syscall6(procShellExecuteW.Addr(), 6, uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd)) - if r1 == 0 { - err = errnoErr(e1) - } - return -} diff --git a/vendor/github.com/prometheus/alertmanager/asset/assets_vfsdata.go b/vendor/github.com/prometheus/alertmanager/asset/assets_vfsdata.go index ff1271793..9a6df1403 100644 --- a/vendor/github.com/prometheus/alertmanager/asset/assets_vfsdata.go +++ b/vendor/github.com/prometheus/alertmanager/asset/assets_vfsdata.go @@ -38,9 +38,9 @@ var Assets = func() http.FileSystem { "/static/index.html": &vfsgen۰CompressedFileInfo{ name: "index.html", modTime: time.Date(1970, 1, 1, 0, 0, 1, 0, time.UTC), - uncompressedSize: 1381, + uncompressedSize: 1654, - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\x8c\x54\xc1\x6e\xdb\x38\x10\xbd\xe7\x2b\x26\xbc\x58\xc6\x5a\x12\x72\x5b\x78\x45\x03\x41\x12\x2c\x52\xa0\x4d\x81\xe4\xd2\xe3\x44\x1c\x49\x6c\x28\x92\x20\x47\x4e\x8c\x22\xff\x5e\x50\x2a\xe2\xc8\x76\xda\xf2\x62\x71\x38\xf3\xfc\xde\x9b\x21\xab\xf3\xeb\xbb\xab\x87\x6f\x5f\x6f\xa0\xe3\xde\x6c\xce\xaa\xf4\x03\x06\x6d\x2b\x05\x59\xb1\x39\x03\x00\xa8\x3a\x42\x35\x7d\x8e\xdb\x9e\x18\xa1\xee\x30\x44\x62\x29\x06\x6e\xf2\x7f\xc5\xe1\xb1\xc5\x9e\xa4\xd8\x6a\x7a\xf6\x2e\xb0\x80\xda\x59\x26\xcb\x52\x3c\x6b\xc5\x9d\x54\xb4\xd5\x35\xe5\xe3\x66\x05\xda\x6a\xd6\x68\xf2\x58\xa3\x21\x79\xb1\x82\xd8\x05\x6d\x9f\x72\x76\x79\xa3\x59\x5a\xf7\x1e\xde\x68\xfb\x04\x81\x8c\x14\xba\x76\x56\x00\xef\x3c\x49\xa1\x7b\x6c\xa9\x7c\xc9\xa7\x58\x17\xa8\x91\xa2\xc1\x6d\xda\x16\xba\x76\x02\xca\x77\x10\xac\xd9\xd0\xe6\xd2\x50\xe0\x1e\x2d\xb6\x14\xaa\x72\x8a\x4d\x7a\xcb\xbd\xe0\xea\xd1\xa9\xdd\xbb\xd2\x58\x07\xed\x79\x1f\x48\xab\x2c\xe1\xb6\x01\xee\x28\x10\xe8\x08\xd6\x01\x07\xd4\x46\xdb\x16\xa2\xc1\xd8\x01\x72\x3a\x05\xb2\x0a\xdc\x98\x08\x1e\xb9\x03\x6d\xc7\xef\x21\x98\xd5\x21\x1e\x2a\x05\xce\x52\x01\x0f\x9d\x8e\x40\x36\x0e\x81\x22\x60\x8c\xc4\x11\x8c\x7e\x22\x98\x88\x14\xdf\x23\x60\x20\x30\x0e\x15\x29\xf0\xc1\x79\x0a\x66\x37\x83\xd3\x0d\x64\xc6\xd5\xc8\xda\xd9\x22\xfd\x71\xea\x4d\x11\x87\xc7\xc8\x21\xcb\x2f\x96\x70\x2e\x61\x51\x2e\x96\xf0\x63\x56\x96\xd6\x51\x19\xc8\x13\xb1\x7f\x52\xf9\x7f\x47\xc5\xb5\xb3\xd1\x19\x2a\x8c\x6b\xb3\x05\xaa\xc4\x6f\xb4\x63\xb1\x9c\xe7\xbe\xee\xdd\x2d\x0f\xed\xfd\xe5\x37\xc4\x50\x4b\xf1\x26\x59\x6c\x3e\xcc\x9c\x77\x66\x8b\x01\xd0\x7b\x90\x70\x63\xfa\xe2\x33\x6a\x5b\xa4\x59\xcb\x8e\x95\x36\x06\xdb\xb8\x3e\x61\x41\x5a\x3e\x38\x35\xd4\x49\xf4\x1a\x38\x0c\xb4\x3a\x99\xa5\xa8\xc1\xc1\xf0\x55\x20\x64\x17\xd6\xa3\x51\xe6\x9e\x5d\xc0\x96\x8a\x96\xf8\x96\xa9\xcf\x16\xf3\xac\xc5\xf2\x34\x56\x1b\xdc\xe0\x6f\x5e\x3c\x5a\x75\x69\xcc\x1a\x3e\xdd\xdf\x7d\x29\x7c\xba\x71\xd9\x69\xd8\x79\xc1\x62\xb9\x3c\x82\x7d\x9d\x9b\x7e\xd0\x04\xf4\xbe\x48\x37\x35\x16\x9e\x42\xd4\x91\xaf\x67\x3c\xc7\x71\xa9\x83\x7e\xa4\xac\x19\xec\x68\x45\x96\x7a\xff\xd1\xd0\xbc\x11\x8c\x1f\xe8\x5e\x8d\x2f\xc4\xe1\x24\xfc\x89\xd4\xff\x33\x95\xa7\x48\xd1\x78\x48\xea\xef\x89\x1d\x38\xb7\x9a\xbc\x8e\x1c\xb4\x6d\x75\xb3\xdb\x23\xfe\x86\xec\x7c\x1c\xab\x72\x7a\x34\xaa\x72\x7a\x54\x7f\x06\x00\x00\xff\xff\x90\x80\x4f\x52\x65\x05\x00\x00"), + compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\x94\x55\xd1\x6e\xdb\x3a\x0c\x7d\xef\x57\xb0\x7e\x71\x82\x9b\xd8\xe8\xdb\x45\x6e\x1c\xa0\x68\x7b\x87\x0e\xd8\x3a\xa0\x05\x86\x3d\xb2\x16\x6d\x6b\x51\x24\x81\xa2\xd3\x06\x43\xff\x7d\x90\x3d\x34\xb5\x93\x6c\x9d\x5e\x62\x51\xe4\xc9\xe1\xe1\xb1\xbc\x3c\xbf\xbe\xbb\x7a\xf8\xf6\xe5\x06\x1a\xd9\x98\xd5\xd9\x32\xfe\x80\x41\x5b\x17\x09\xd9\x64\x75\x06\x00\xb0\x6c\x08\x55\xff\xd8\x6d\x37\x24\x08\x65\x83\x1c\x48\x8a\xa4\x95\x6a\xfe\x6f\x32\x3e\xb6\xb8\xa1\x22\xd9\x6a\x7a\xf2\x8e\x25\x81\xd2\x59\x21\x2b\x45\xf2\xa4\x95\x34\x85\xa2\xad\x2e\x69\xde\x6d\x66\xa0\xad\x16\x8d\x66\x1e\x4a\x34\x54\x5c\xcc\x20\x34\xac\xed\x7a\x2e\x6e\x5e\x69\x29\xac\x7b\x0b\x6f\xb4\x5d\x03\x93\x29\x12\x5d\x3a\x9b\x80\xec\x3c\x15\x89\xde\x60\x4d\xf9\xf3\xbc\x8f\x35\x4c\x55\x91\x54\xb8\x8d\xdb\x4c\x97\x2e\x81\xfc\x0d\x84\x68\x31\xb4\xba\x34\xc4\xb2\x41\x8b\x35\xf1\x32\xef\x63\x7d\xbf\xf9\xbe\xe1\xe5\xa3\x53\xbb\x37\xa5\xa1\x64\xed\x65\x1f\x88\x2b\xcf\xe1\xb6\x02\x69\x88\x09\x74\x00\xeb\x40\x18\xb5\xd1\xb6\x86\x60\x30\x34\x80\x12\x4f\x81\xac\x02\xd7\x25\x82\x47\x69\x40\xdb\xee\xb9\x65\x33\x1b\xe3\xa1\x52\xe0\x2c\x65\xf0\xd0\xe8\x00\x64\x43\xcb\x14\x00\x43\x20\x09\x60\xf4\x9a\xa0\x27\x92\x7d\x0f\x80\x4c\x60\x1c\x2a\x52\xe0\xd9\x79\x62\xb3\x1b\xc0\xe9\x0a\x26\xc6\x95\x28\xda\xd9\x2c\xfe\x71\x9c\x4d\x16\xda\xc7\x20\x3c\x99\x5f\x4c\xe1\xbc\x80\x34\x4f\xa7\xf0\x63\x50\x16\xd7\x41\x19\x14\x47\x62\xff\xc4\xf2\xff\x0e\x8a\x4b\x67\x83\x33\x94\x19\x57\x4f\x52\x54\x91\x5f\x27\x47\x3a\x1d\xe6\xbe\xec\xd5\xcd\xc7\xf2\xfe\xd2\x1b\x02\x97\x45\xf2\xda\x72\xb2\x3a\x99\x39\x9c\xcc\x16\x19\xd0\x7b\x28\xe0\xc6\x6c\xb2\x4f\xa8\x6d\x16\xbd\x36\x39\xec\xb4\x32\x58\x87\xc5\x11\x09\xe2\xf2\xec\x54\x5b\xc6\xa6\x17\x20\xdc\xd2\xec\x68\x56\xa5\x39\xc8\x35\xee\xee\xaa\xaf\x44\xeb\x05\x7c\xbc\xbf\xfb\x9c\xf9\xf8\x96\x74\xf2\x9b\x7b\x71\x8c\x35\x65\x35\xc9\xad\xd0\x66\x92\x0e\x0b\xd2\xe9\xf4\x38\xae\xa2\x0a\x5b\x23\x57\x4c\x28\x8e\x17\x70\x1c\x6c\x98\x95\x9e\xc0\xaa\xd9\xb5\xfe\xe6\xd9\xa3\x55\x97\xc6\xbc\x83\xe3\xb0\x20\x9d\x4e\x0f\x60\x5f\x86\xc3\x1c\x0d\x17\xbd\xcf\xe2\x0d\x10\x32\x4f\x1c\x74\x90\xeb\x01\xcf\xce\x86\x25\xeb\x47\x9a\x54\xad\xed\x24\x9e\x44\x4f\x9d\x32\xe3\x2b\xc1\x70\xa2\xef\x59\x77\xf3\x8c\x1d\xf6\x27\x52\x1f\x06\x5d\x1e\x23\x45\xdd\x21\xa9\xf7\x13\x1b\x29\x37\xeb\xb5\x0e\xc2\xda\xd6\xba\xda\xed\x11\xff\x96\xec\xff\x03\xdb\x1c\x23\x3b\x34\xd6\xfb\x29\x8f\x0c\x79\x40\x79\x84\xfb\x1b\xe2\xc3\xf7\x73\x99\xf7\xb7\xe8\x32\xef\xbf\x32\x3f\x03\x00\x00\xff\xff\xc3\xab\x41\xac\x76\x06\x00\x00"), }, "/static/lib": &vfsgen۰DirInfo{ name: "lib", @@ -152,9 +152,9 @@ var Assets = func() http.FileSystem { "/static/script.js": &vfsgen۰CompressedFileInfo{ name: "script.js", modTime: time.Date(1970, 1, 1, 0, 0, 1, 0, time.UTC), - uncompressedSize: 108434, + uncompressedSize: 110123, - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xdc\xbd\x8b\x76\xdb\xba\x92\x28\xf8\x2b\x32\xdb\xad\x03\x6c\x95\x18\xca\xaf\x24\x94\x71\x34\x4e\xe2\xbc\x76\x62\x27\xb1\xf3\xf4\xf6\xcd\x05\x29\x48\xa2\x2d\x81\x0a\x08\x5a\x76\x2c\xf5\x6f\xcc\x07\xcd\x8f\xcd\x02\xc0\x07\x48\x3d\x92\xdd\x7d\xfa\xde\x3b\x73\xce\x5e\x0e\x45\x16\x0a\x05\xa0\x50\xa8\x2a\x14\x0a\x5b\x83\x94\x87\x32\x8a\x39\xe2\xf8\xde\x49\x13\xd6\x48\xa4\x88\x42\xe9\x74\xf3\x0f\x0d\x81\x38\x08\x90\xf8\x5e\x30\x99\x0a\xde\x90\x2e\x25\x1c\xa4\x3b\x20\x02\xe4\xa2\x00\x1b\xa3\x12\x44\xa0\x1d\x90\x50\xa0\x16\xc5\x07\xbb\xb6\x1c\x1d\x12\xc0\xf1\x62\x81\x4b\x54\x7d\xc4\x2c\x54\xbb\xc0\x4a\x54\x72\x19\xd5\x46\xec\x0c\x49\x30\xf8\xed\x0a\x24\x4a\xad\x0a\xf6\x20\x2d\x2b\x60\xcb\xd8\xfe\x6e\x9d\x29\x62\x50\xd4\x6a\x57\x1b\x22\x6a\x55\xbb\x0f\xb4\xac\x36\x5d\x46\xf8\x2f\xa0\x84\xa2\x14\x6c\x5a\x6c\x62\x18\x0a\x2d\x62\x0e\x20\x2c\x89\xa1\xcb\x38\xff\x7b\xe8\x0b\x11\x85\x1a\x85\x36\x89\x29\x8a\x2d\x12\x1f\x42\x5c\x92\x18\x2e\xa3\xfd\x5f\x46\x75\x8c\x42\x58\xa6\xdb\x26\x9c\xa2\xc8\x22\xfc\x11\x44\x25\xe1\xf1\x32\xe6\xff\x9d\x6d\x89\x50\x0c\x2b\x5b\x63\x37\x27\x46\x03\xab\x39\x8f\x61\x50\x36\x27\x5a\x46\xfe\x7f\x58\x0b\x07\x28\x82\x75\x6d\xb4\x1b\x39\xab\x89\xb9\x1d\x42\x08\x77\x69\x8f\xbb\x03\xa4\xde\xfb\xaa\x1e\x24\xad\x12\x77\xa6\x04\x94\x44\xee\x56\xcb\x00\x2b\x4a\x21\x66\x15\xbc\xcd\x0b\x42\xd9\xe4\xbd\x7a\x51\x48\xed\xc2\x28\xb5\xca\x27\x65\x79\x28\xfb\x71\x7f\x19\x03\xd0\x1a\x0e\x44\x2d\x34\x03\x1b\x0d\x94\x63\x74\xb0\x0a\x11\x84\xcb\xa8\x50\x68\x61\x0b\xaa\xd8\xa0\x64\x83\x87\xab\xf1\x41\xbc\x12\x23\x8a\xf1\xe2\x86\x8a\x46\x44\xfa\xa8\x1c\x4e\x33\x34\x83\x58\x20\xf5\x8d\x91\x23\x21\xe8\x1d\xe2\x18\x52\xe2\x75\xd3\x43\xde\x4d\x5b\x2d\xcc\x2e\xd2\x4b\x22\x91\x68\xa5\xb8\x9b\xcb\xff\x05\x86\x1b\x32\xae\x60\x2a\xf1\xc8\x12\x0f\x23\x5e\x97\x1d\xf2\x66\x53\xb8\x41\x97\xb5\x5a\x58\x5e\xb0\x4b\x22\x5c\x0a\x82\xa8\x57\xc5\xe2\x37\x66\x7c\x28\x47\x84\xc1\xa9\x5a\x5a\xf0\x02\xc3\x36\x41\xf5\x0a\xf2\xc9\x72\xc1\x2f\x17\x18\x36\x35\x24\x47\x08\x69\x46\x0b\xc3\x40\x89\xd7\xa5\x87\xac\x4b\x5b\x2d\x9c\x5e\xd0\x4b\x22\x2f\xe8\x65\x4e\x41\x7a\xc1\x2f\x89\x80\x74\x81\x61\x7d\xb3\x44\x8e\x35\xef\x29\xd9\xea\xe4\x7d\x25\xcb\xbe\x12\x17\x69\x81\x97\x5d\xc8\x4b\xc2\x81\xfd\x3e\xbd\x0a\x19\xd3\xc8\x04\x51\xb3\x47\x5e\xa4\x97\x20\x8a\xae\x17\xbf\x89\xa9\xdd\xe9\x7a\x87\x84\x75\x59\xbb\x5d\x20\x62\x35\x44\xb8\xfb\x77\xda\xba\xa2\xa5\x1c\xa9\xb6\x56\xd8\xe2\xbf\x3c\x2a\x5a\x62\xb4\x28\xa8\xd1\x29\x30\xa7\x2b\x31\x17\xdc\x93\x8c\xa3\x90\xe9\x16\xfc\x82\x02\x51\x52\xc0\xdb\x0c\x68\x4e\x47\x0b\xa5\x05\x75\x87\x69\x2f\x7f\xf4\x53\x8c\x21\x24\x5e\x37\x3c\x64\xdd\xb0\xd5\xc2\xf4\x22\x54\xe3\x1b\x5e\x76\x15\x4e\xf3\x25\xcd\xbf\xb4\x98\xe2\xa9\xb0\x18\x7b\xba\x82\x99\xac\x51\x5c\xf3\x29\x8c\x79\x12\x8f\x99\x3b\x8e\x87\x88\xb7\x1c\xbf\xe1\xb4\x26\x08\x63\x10\x0b\x5c\x6a\x8f\x13\x94\xc3\x3b\x87\x11\x97\x4c\x70\x3a\x4e\xfe\xe9\x94\x72\x63\xa4\x64\xb5\x1c\x89\x78\xd6\x38\x16\x22\x16\xc8\x19\x49\x39\x4d\xfc\x07\x0f\x86\x91\x1c\xa5\x81\x1b\xc6\x93\x07\x6c\x3c\x79\x10\xc6\x82\x3d\x08\xc6\x71\xf0\xa0\xe3\x7a\xae\xf7\x60\x14\x71\x99\x3c\x70\x5a\xbc\xe5\xb8\x93\xbe\x63\x89\xa2\x61\x8d\x45\x80\x91\x8b\x4b\x48\xc9\x54\x77\xb3\x07\x0c\x77\xd3\x66\x13\x49\xc2\xdc\x69\x3c\x45\x18\x77\xd5\x37\xe9\x52\x90\x6e\xa0\xbf\x5b\xc3\x59\x60\x9d\x96\xe2\x3e\x1a\xa0\x8e\xe7\x1d\x4a\x9c\xb3\x93\x3b\x4d\x93\x11\x3a\xd5\xf5\x62\xd8\xf2\xba\xd1\x00\x71\x42\x88\xc8\x20\xcc\x1b\x27\x0e\xae\x58\x28\x9d\x2d\x22\xef\xa6\x2c\x1e\x34\xf8\x7c\xce\xd3\xf1\x58\xc9\xc7\xe2\x29\x2f\xe2\xe4\x15\x3b\xa4\x00\x6f\x36\x47\x68\x1f\xc3\x56\xa7\x9b\xb7\x2d\x6d\x44\xbc\xc1\xdd\xed\x43\xaf\xd9\x44\x9c\x0c\xa4\x92\x65\x42\xfd\xab\x08\xe1\x38\x1a\xa0\xad\x29\xe2\x7a\x66\xaa\x3f\xb2\xd5\x51\xcd\xcb\xa8\xea\x74\x73\xf2\xb4\xc8\x3d\x26\x63\x34\xc4\x70\x4d\x56\x0f\xf8\xd6\x30\x63\xdc\x72\x74\x8f\x72\xc6\x5d\xd9\xba\xbc\x7b\x74\x4f\xf4\x3c\x9f\x1f\x8a\x5e\xbb\xe3\x77\x54\x5f\x6c\x71\x77\x3b\xfb\x8e\x24\x39\x42\x5c\x49\x5a\x97\x62\x3c\x9f\x67\xbf\x03\x10\x6e\x80\x71\x4f\xfa\xea\x57\x08\xc2\x0d\xb1\x6e\x77\x97\xbb\x81\x96\xd3\xcd\xe6\x56\xb5\x6c\x97\x13\x5d\x4e\x49\xec\x62\x08\xe5\x7c\xae\xb0\xf5\x3a\xbe\x70\x03\x55\xbf\x67\x16\x98\xb3\x35\xcd\x34\x8d\xc2\x87\xde\x02\xc3\xd5\x5a\xd1\x9e\x01\x75\xd6\x4f\x0f\xef\xf0\x28\x9f\xe8\x6b\x21\x48\x0e\x82\xe1\xe9\x32\x39\xd9\xd7\x42\x0e\x1e\x7a\xbd\xbe\xf4\x45\x6f\x2c\xfd\x44\x2e\x30\xbc\x22\x5e\x39\x14\xa7\x36\xea\x7b\xea\x73\x08\x7c\xb1\x28\xd9\xf7\x7b\x55\x16\xe5\x10\x10\xfa\xd2\x82\xfa\x60\xb0\xe4\xd3\xe6\x7e\xa1\x7b\x5c\x6a\x2e\xc3\x66\x89\xb8\x90\x97\xe5\x4b\x61\x5e\x0a\xf5\xb2\x90\xad\xaa\xf4\x17\x32\x46\xe7\x16\xa7\x9c\x1b\xc4\x8a\x4f\x94\x85\xc9\x87\x16\x5b\x17\x7c\xd2\x12\x19\x6b\x04\xf9\x2b\xd1\x35\x42\xfe\xa3\x19\xe4\x6c\x84\xbb\x96\xa4\x56\xdc\x60\xde\x62\x46\x98\x1b\x94\xa0\xf9\xf8\x6b\x7a\x4e\xc8\xfd\xb6\xef\x2d\x4a\x82\x3e\x56\xfa\x6b\xdb\xef\x40\xd1\x67\x0a\xfe\x0d\x19\xa3\x8f\x16\xfd\xcf\x94\x98\xca\xab\x15\xe4\x04\x24\xe1\x99\xec\xed\xca\x76\xbb\x8b\x85\xaa\xf8\x42\x56\x17\xae\xa2\xf8\xe7\x6a\xf1\x8b\x4b\x8b\x6c\x61\xa4\x07\x77\xa9\x55\x52\xc1\xfd\xdc\xa8\x02\x5d\x5c\x76\xf5\x1c\x90\x4a\x3b\x51\x1c\x0f\x92\x48\xd5\x0b\x06\x9d\x5e\x9d\xb4\x48\xa3\xa5\x38\x7b\xa6\xf4\x50\x0c\xaf\x09\x92\x35\xcc\x4a\xac\x15\x42\xc5\xc6\xdd\x6c\xb2\x6a\x05\xa0\xbb\x19\xa7\xa6\x9a\xbb\xb2\x1a\x60\xd5\xaa\x52\x55\x55\xb8\x54\x8f\x52\x7a\xf3\x9a\xe8\x52\x4d\xcd\x66\xba\xaa\x3a\x48\x49\xea\x06\x98\x9a\x4a\x6f\xab\x95\x42\x5a\xad\x98\xaa\x8a\xd9\x8a\x8a\x95\xb6\x9c\x57\x1d\xae\xae\xba\xd9\xa4\xeb\xeb\x07\x4a\xa8\x1b\xe0\xd0\x50\x91\x2c\x53\x01\xb4\x4a\x49\x58\x9b\xfd\x12\x4a\xc3\xe4\x19\x52\x4c\xe1\x26\xb1\x90\xeb\x44\x8c\x96\xe6\x5a\x94\x2f\xf4\x7f\xf0\xad\x22\x8f\x7e\x13\x1b\x99\x29\x48\x9b\x2f\x09\x21\x89\xec\x79\xbe\x7a\xe8\x4b\x2d\x93\x75\x05\xeb\x04\x15\x6f\x29\x1d\x0c\x5e\xae\x95\x86\xbc\xbd\x41\x4f\xe0\x7f\xe8\xd2\xef\xd7\x97\x7e\xb0\xa9\xf4\x03\x31\xf7\xf4\xe7\xb7\x54\x8e\xdc\x69\x3c\x5b\xaf\xab\xfc\x3b\x5f\x85\xe7\xdf\x09\xcf\x5b\xee\xa9\xa5\xb6\x37\x42\x9d\x0e\xf6\xbd\x43\xd1\x6c\xf2\x43\x6f\x3e\x17\x6a\xf5\xf4\x0e\x79\x4f\xb4\xb8\x9f\x69\x9b\xba\x32\x2a\x29\xdf\xc1\x5a\x04\xfd\x20\xfa\x4d\xc8\xa2\x31\xbc\x35\xcf\x83\x71\x1c\x0b\x78\x62\x7e\x88\x38\xe5\x7d\x78\x6e\x7e\x8c\xe3\x61\x77\x5d\x73\x9a\xcd\x4d\x8d\x9d\xcf\x37\x7d\xdd\x22\x44\x29\x57\x8a\x9e\x77\x64\xd3\x58\x75\xff\xd6\xc7\xdf\xd3\xa5\xcd\xaa\x40\x89\x70\xc3\x11\x15\x4f\xe3\x3e\x3b\x92\x28\xc5\x5d\x7a\xb8\xbf\xbf\xf3\xf8\x60\x3e\xdf\x3f\xd8\xed\x3c\x3e\xa4\x3d\x64\x6b\xdc\xa0\x54\x70\xdf\x7e\xd5\x12\x17\x69\xab\xa3\xbf\x90\x1d\xbc\x28\x54\xa8\xab\x38\xe2\xc8\x71\xf0\x46\xc3\xe6\xe2\x12\x2a\xba\xb1\xb1\x41\x0a\xd2\x94\x8a\x13\x2e\x51\x98\xb6\x5a\x10\x56\xa9\x0c\xe7\x73\x44\x5b\xa6\x80\xa2\x10\x38\xa2\x58\x89\x02\x3d\xb3\x69\x41\x96\xb4\xc8\xea\xfe\x2d\xbb\x28\xa7\x49\x1a\x9a\xe4\x6f\xd3\x24\x0b\x9a\x8c\x35\xa4\xd6\xb2\x45\xa9\x8d\xeb\xd1\x7f\x41\x7e\x87\x16\x65\x52\x65\x74\xa4\x44\x1b\x55\xb4\x4a\x07\xc3\x5d\x53\x47\x4a\xe8\x3f\xc9\xfe\xc1\xee\x8e\xd7\x6c\xee\x3f\xdc\xdd\xdb\xfd\x27\xa1\x3d\x79\xd1\x6e\xb3\xcb\x56\xea\xa7\x55\x0a\xe0\xd3\x3a\xde\x13\x6e\x32\x1d\x47\x4a\x6a\x2d\x30\x7c\x5d\x0f\xa5\xfb\x54\x03\xfd\x49\x7e\xd3\x58\xfa\x0d\xa6\x35\xeb\xf0\x7d\x66\x3f\xa9\xa5\x38\xad\xb2\x82\xc4\x5a\x2f\x47\x8c\xa4\xf5\xd6\xa6\x3d\x71\xd1\x6e\xcb\xcb\x16\xf3\x4b\xf5\xd8\x5b\xe4\x7a\x72\xd6\xed\x9c\x93\x7f\x19\x19\x5b\xbf\x4f\x47\x67\x51\x10\x84\x41\xf0\x75\xea\xf9\xd6\x7f\x08\x37\xe2\x7d\x76\x7b\x3a\x30\x9d\x2b\xd7\x81\x2a\x39\x58\x83\x65\x6b\x61\x73\x35\xe7\xb0\x68\xa1\x52\xbe\xc7\x34\x91\xaf\x0a\x04\xa4\xf8\xd6\xce\xc1\x17\x18\xd2\x65\x9c\xa6\xab\x0a\xcd\x29\x1a\x20\x79\xd8\xc9\x95\xbd\x13\x4b\xaf\xf3\x40\x6b\x21\xed\xce\x21\x62\x36\xad\xda\x38\xcb\x95\x0f\x86\x81\xb5\x88\xac\xaa\x1c\x7a\xa4\xe8\xba\xe6\x28\x4d\xcf\x83\x7e\xae\xea\xad\x93\x94\x0a\xac\x03\xac\x04\x2b\x9d\xf8\x4b\x70\xbb\x30\xf0\x39\x0c\x73\xcd\x31\xde\x54\xf5\x9e\xd6\xb9\x47\x3e\x5f\x64\x84\x0e\xd6\xf6\xbb\xae\xe8\x42\x5c\xaa\xd1\x09\xf8\xfa\x79\x92\xc1\x81\xd4\x90\x09\x5f\xad\xdb\xd5\x81\x81\x5d\xae\x57\xcf\x96\x81\x21\xbd\xdc\xa4\x54\xad\x2a\x00\x54\x17\x49\x57\x16\xb1\xdc\x8d\xb5\x42\x10\xea\x62\x74\x4d\x31\xcb\xaf\xb8\x54\x10\x62\x5d\x34\x5e\x5b\x14\xa2\x4d\x85\x21\xba\x5c\xb5\xf8\x48\x71\x97\x17\xea\xab\x57\xaf\xcf\x4e\x4f\xdc\x29\x15\x09\xd3\x8a\x59\x48\x65\x38\xb2\xfc\xcd\xdb\x12\xcd\xd0\x44\x82\x73\x3e\x8a\x92\x46\x94\x34\x78\x2c\x1b\x37\x74\x1c\xf5\x1b\xaa\xe4\x56\xc3\x69\x71\x77\xc2\x92\x84\x0e\x19\x28\x04\x4a\x37\x1a\x2b\x2e\xe8\x73\x8b\xcd\xfa\x59\xed\xc9\x2c\xd2\xf8\xdd\x6d\x7c\x1f\xd2\x84\x35\x76\xfd\xcc\x3f\x10\xc4\xf1\x98\x51\xcb\x3d\x20\x7a\x53\xa5\x2b\xfa\x13\x8e\x1c\xda\x78\x72\x7a\xfa\xc6\x51\x4a\x9f\x2e\xb5\x93\x97\xe2\xe9\x24\x60\xa2\x34\xd2\x45\x4f\x83\xf3\xc6\xab\x93\x73\x05\xee\x23\x71\x48\xda\x3b\x9d\xbd\x87\x7b\x8f\x76\x0f\xf6\x1e\xce\xe7\xe5\xf3\x21\x11\xf3\x39\xf2\xe6\x02\x2b\x45\x04\x37\x9b\x68\x2b\x4a\x9e\x47\x3c\x92\xaa\x2b\xe6\x73\xf1\xef\x1d\x5c\x47\xa7\x49\x32\x34\xec\xd5\x68\x58\x43\xf8\xf3\x37\xa7\x47\xe7\x25\xe5\x07\x79\xa9\xba\xd9\x98\x97\x12\x8d\x88\x27\x92\xf2\x50\xbd\x3c\xd3\x40\xfa\x4b\xcb\x71\x72\x94\x67\xe7\x1f\x5e\x9d\xbc\x28\x71\x3e\xf6\x73\xd9\x96\xf9\x5c\x54\x01\xee\x86\x06\x5e\xbd\x2c\x61\xf7\x73\x58\xab\x25\x0f\xf3\x77\x5a\x49\x72\xa3\xc4\x28\x4b\x02\xf7\x6e\xb8\xf1\x5c\xc0\xb3\xbc\xee\x37\xaf\xce\xac\xd6\x3c\xfa\x75\xc9\x6d\x9e\x15\xe5\x8d\xa3\x0f\x1f\x8e\xbe\x96\x85\x3b\x9e\x9f\xcb\xcf\xfe\x4a\x7f\x92\x28\xbd\x48\xf3\xf9\x56\x6e\xa1\xe7\xe2\x35\x43\x7a\xfa\xe4\xf5\xf1\xd3\xf3\xc6\x2c\x92\xa3\x06\x6d\x0c\x22\x36\xee\x37\x38\x9d\xb0\x7e\xe3\x7f\x3a\x2d\xd9\x72\xfe\xa7\xae\xd0\x48\xe1\x7e\x46\xd4\x85\x2c\x3d\x9c\x31\x43\x0c\xf7\x98\xaf\x19\x7d\x24\xd5\x0c\xd2\x46\x8f\x21\xb1\xe3\x2b\xf2\x98\x5e\xe1\xea\x6d\xac\x11\x52\xb6\x2e\x1a\x20\x51\xac\x32\xb2\x02\xd6\x78\x73\x7a\xf2\xe2\xf8\x43\x83\x6a\x5c\x8d\x13\xc6\xfa\x0d\xbd\x18\x34\x34\xb1\x8d\x20\x95\x8d\x98\x8f\xef\x1a\x09\x63\x0d\xa7\x95\xa3\x69\x39\x0d\xc6\xa5\x88\x58\xa2\x2b\xf8\x8d\x96\x0c\xeb\x2d\xd9\xf1\x7f\xd9\xc5\xbf\x68\xa0\xe9\xe9\xa2\x3b\x53\xa0\xc4\x2c\x71\xa9\x19\x18\xdd\xec\x11\x4d\x4e\x67\xfc\x9d\x88\xa7\x4c\xc8\x3b\x94\x62\x7c\x6f\x51\x9b\x5e\x1a\x65\x41\x93\x8a\x6d\x11\x33\x92\x90\x66\xf4\x52\xf2\x11\x9d\x22\xf3\x0b\x4a\xdd\x75\x2a\xd1\x47\x89\xca\x06\xed\xfa\xa5\xed\xcb\xdd\x01\xc4\x84\xbb\x43\x88\x88\xd7\x8d\x0e\xe3\x62\x45\x6e\xb5\x32\x02\xe2\x8b\xe8\x32\x1b\x9c\x6a\xf5\xac\x1b\x92\x10\xa9\xca\xac\x9a\xc2\xbc\x96\x3d\xbf\x24\xbf\xd6\xd3\xfa\xf5\x48\x97\x54\xa2\x81\x65\x25\xf6\x0b\xba\x06\xe4\x04\x02\x45\x55\x37\x70\x83\x6e\x40\x02\x37\xc8\x88\x09\x8c\x4b\x27\x1a\xa0\x1a\x29\x03\xf2\x51\x21\x84\x41\x41\x8c\xea\x1d\xdd\xf2\x01\x2e\x9a\xee\xd7\x84\xb3\xf1\x10\x65\x5f\x3d\x6b\x86\x73\xd5\xaa\xd2\x6d\x73\xb3\xd9\x33\xbf\x72\x6f\xe0\xde\xf4\xb0\x11\xe0\x7a\x6b\x20\xeb\xc1\xb0\x3a\x80\x43\xa9\x96\x1d\x3d\x80\x7a\x3b\x21\x74\xa9\xd5\x9f\xca\x48\xb0\x5c\xda\xdb\xf6\x2e\xe7\x0c\x85\x0c\x0a\x32\x56\xec\x79\x9a\x7d\xa7\xb2\xf4\xa4\xaa\x59\x14\x4b\xd4\xf1\xed\x94\x85\x32\xe2\x43\xb5\x28\xe9\xc5\xa8\x74\xcb\xf3\xc2\x61\xb7\xec\xc8\xe6\xee\xb6\x5a\x01\x0a\x0f\xee\x56\xa7\xbb\xb4\x4e\x79\x7e\xb5\xeb\xb9\x4b\x15\x1e\x97\x76\xb3\x65\x2c\x5b\x97\xb2\xa5\x21\x93\xf5\x15\x91\xbb\xe5\xd5\xc5\xb5\x1b\x6a\x1c\x61\x2e\x86\x33\x99\x9a\x4f\xd8\x0c\x6c\xc4\x0b\xf7\x71\x21\x38\x0b\x0c\x7d\x8d\xa1\xdf\x6c\x2e\x43\x59\xb4\x32\x0d\xc5\x56\x41\xed\x96\x50\x03\x0d\x35\x68\x36\x87\x0a\x6a\x08\xc2\x1d\x96\xd3\xa0\x80\x1a\x69\xa8\xd1\x2a\x5c\xc5\xe2\x62\x21\xb0\xd8\x6f\xb8\x5e\x6b\xde\x2a\x55\xee\x72\x10\x2c\xf5\xb9\xcb\x0e\xa5\xde\xc7\x54\xcc\xa7\x2a\xd6\x7b\x6a\x17\xec\x72\x9d\xf3\x7f\xba\x56\x11\xd5\x0a\x8f\x59\x7d\xa3\xc1\x1d\x12\xa0\x04\x20\x70\xdc\x72\x1c\x5b\x31\x9e\xd9\x1c\xc8\x35\xce\xbb\x0d\x6a\xab\x34\xfb\x98\x32\xd3\x83\x6f\x39\x51\x68\x4b\x74\xc7\x16\xba\xfb\x6d\xdf\x03\xaa\x94\xe6\xe2\xf3\x75\xf5\x73\xa7\xf6\xf9\xa8\xfa\x79\x07\x02\x9f\x43\xe8\xab\x2a\x8c\x96\x7e\xb6\x41\x4b\xdf\xd5\xd0\x7d\xad\xf8\xc3\xd5\x06\xc0\x3d\x0b\x50\xb7\xe2\x29\xb7\x5d\xf2\xaf\x34\x11\x5c\x7b\x9e\x81\xf9\x4f\x79\xab\x95\x99\x0a\xba\x07\x47\xfe\xc5\xe5\x22\x97\x90\x27\x0a\x16\x78\xd9\x82\x53\x7b\xc6\x1f\x71\x64\x4f\x73\x8e\x8e\x39\x7a\xa5\x00\x30\xb6\xe7\xf9\xf7\x8c\x40\xee\x8e\x8c\x81\x24\x30\x68\xc4\xba\xc9\x1f\x2a\x2d\xb1\xc7\xa2\x86\xfe\xbb\xf9\x0a\xa6\x1a\x5d\x85\x69\xde\x17\x4e\xb6\x3a\x70\xce\x95\x65\x56\x54\xaa\x2b\x50\x72\xe2\x9c\x67\x8e\x6c\x0c\x5b\x5f\x32\x77\xb7\x2a\xe1\x75\x39\x39\xe7\x6e\x32\x8a\x06\x12\xe1\x2e\xde\xb2\x03\x37\xf4\x86\x8e\x70\x07\x99\xc5\xcc\xd5\x7c\x72\xb7\x15\x8b\x7b\x66\x5b\xac\xa3\xfe\x29\xe0\x86\xca\xf6\x1c\x6a\xf1\xc3\xbb\x58\xb8\x43\xa2\x7e\x46\x5a\xc4\xaa\xc9\x63\x1a\xa4\x10\xea\x0f\x01\x52\xe8\xd4\x72\x93\x43\x2e\xd8\x38\x61\x8a\x5a\x1d\xdf\x51\x6c\x33\xb8\x03\x37\xd4\x55\x07\x95\x9e\x50\x78\xb8\xea\x42\xed\x82\x88\x06\x68\xdf\x50\x93\x91\x27\xdc\x51\x75\x0e\x66\x15\x0f\x74\xc5\xa3\xbc\xc9\x58\x57\xda\x50\x34\x28\xee\xd2\x3e\x48\xcf\xef\x28\x53\x50\x81\x42\xe4\x0b\x77\xb8\x80\xbc\x6c\x7f\xb1\x58\x20\x8e\xbb\xba\xb7\x17\x8b\x0d\xd6\xdc\x47\x35\x50\x0c\xb8\x1b\x1e\xa9\x3f\xfb\xea\x8f\x57\x2e\x08\xcb\x61\x31\xf8\x7e\xb1\xa8\x6c\xe0\x7d\xac\x19\x72\x66\xed\x9a\xa1\x31\x07\x0e\xa2\x27\xdc\xc1\x98\x0e\x13\xff\x26\x8e\xfa\x0d\x0f\x77\xf5\x2a\x36\x9f\x8f\x50\xe6\x15\x8d\xc9\xfd\x02\x22\x82\x42\x22\x91\x5e\xca\xd4\x4a\x4c\x28\x0a\x20\x52\x8b\xe2\x0a\xdb\x1f\x98\x96\x52\x4c\x69\x40\x6f\x78\xee\x9c\x7a\xa3\xc4\x53\x37\x75\x69\xb3\x89\x90\x24\x72\x3e\xbf\x5f\xe0\x0b\x76\x49\x52\x97\x22\x6d\x26\x81\x82\x58\x81\x90\x91\xfb\xa1\xb6\xa8\x0d\x89\x0b\x48\x09\x77\x43\xa0\x4a\x47\x06\xa5\xe7\x30\xad\xe7\x0c\x8a\xdd\x29\x77\x44\x5e\x71\x34\x43\x67\xbc\xe8\xa8\x86\x1d\x55\xa4\xbf\x70\xb8\xdf\xf6\xf7\x21\xf0\x6d\x66\x30\x5b\x37\xdc\xa5\x15\x6f\xb2\xbb\xdd\xbb\x43\x14\x98\x16\x6e\x7e\xd8\x6c\xc6\xbd\x5b\x1d\x76\x27\xdc\x08\x84\x7b\xa5\xde\xde\xe9\x17\x61\x4f\xb8\x6a\xa8\xd5\x2b\x35\x0c\xc0\xdd\x00\xe3\x05\xb2\xdd\x6b\x72\x81\x62\x08\xac\x01\x0a\x4c\x53\xd5\x98\x30\xe0\xaa\x5b\x07\x28\x52\xba\x02\x08\x0c\xdf\x38\x8a\x21\x74\x03\x48\x51\x84\x0b\x1c\xd5\xb7\x40\x7b\xf7\xd3\x58\xc8\xc4\xa7\x0b\xff\x3e\xdb\xdc\xe2\xe4\x7e\xa1\x07\xf0\xd9\xef\xca\x04\xe1\x0e\x51\x5d\x24\xac\x59\x2f\x66\xe8\x03\x07\xee\x8e\x20\x13\xdb\xa2\xca\x72\x9f\x37\x07\x6c\x69\x61\x7e\xed\x0b\x18\x2b\x81\x5e\xca\xb6\x9f\x75\x89\x3e\x51\xdf\x55\x1b\x5e\x6f\x14\xe5\xdc\xe7\x10\xd7\x7c\x38\xdf\x8a\xc5\x48\xb3\x10\xa4\xf9\x46\xa7\x66\xcb\x97\x1c\x6d\x79\x20\x20\xd5\x0b\x1d\x06\xf5\xbb\x03\xb2\xf8\xcd\xf1\xf7\x6c\x3d\xbd\xdf\xf6\x9d\xc1\xad\x03\xd4\x4f\x2f\xd8\xe5\x7c\x7e\x1f\xf9\x27\x70\xe5\x9f\x54\x02\xcb\x5e\x5a\xf3\x36\xd3\x92\x44\xa1\x25\x75\x7c\x33\x01\x84\x7b\x0d\x94\x20\x4a\x52\x88\x09\x83\x19\xe2\xbd\x37\xfc\x82\x5e\xba\xcc\x37\xff\x0e\x2a\x7a\x5e\xb9\x93\x18\x77\x85\xde\xad\xfa\x81\x95\xe8\x9c\x2a\xb9\x51\x2c\xc1\x4a\x47\x2c\x77\xa3\xd4\x04\x41\xf2\x22\xbd\x54\xd5\x50\x48\x09\x4a\xb5\xaf\x19\x5b\x74\x03\xef\xa5\x6e\x44\x3e\x22\x0a\xa9\x1b\x61\x3f\x75\xaf\xb2\x1f\x57\x18\x52\x5c\x38\x13\x4a\x43\x42\xb8\x93\x6e\xe8\x06\xca\x24\x70\x03\xac\xdb\xaa\x98\x53\xb5\x36\xab\xb8\x5b\x71\x5b\x68\x32\xb2\x3e\x71\x63\x90\x70\x3f\xf5\x85\xcb\xe1\x87\xcf\x16\x66\x99\xa2\x10\x97\x9d\xf7\x5e\x37\xf7\x0d\xbf\xe0\x97\xcd\xe6\x08\xed\x5a\xfd\xfa\xa3\xca\x75\x1a\x12\x34\x24\xb9\x67\xfe\x5b\x0e\xc2\x17\x40\xfd\x27\x7c\x01\x9f\x8b\x35\xf0\xed\x5a\x2d\xa7\x12\xb5\xf2\xa4\x98\xf0\x12\x42\x72\x71\x09\x31\xd1\x98\x5d\xa1\xa4\x9d\x24\x1e\xd4\xa6\x87\x19\x8c\x84\xc9\xf3\x68\xc2\xe2\xd4\x92\xd9\xf9\x6a\x8d\xf1\x02\x64\x31\x18\xd6\xe7\x70\xcc\xa8\xc8\x8b\x09\xed\x0f\xca\xa1\x4c\x9d\x01\x89\x4c\xbb\xdc\x70\x8d\xdb\xbf\x2b\xf2\x3d\x4b\x5c\xaa\x80\x21\xa4\x24\x46\x42\xdb\x84\xc6\x3c\xc9\xf5\x47\xaa\xa3\xa2\xe8\x25\x2a\xe3\xe4\xa2\x05\x86\xfb\x24\x0d\x92\x50\x44\x01\xab\x88\xbd\x30\x5f\xd5\x17\x90\xf2\xd5\x20\x88\xab\x25\x20\xcc\x1c\xf6\x18\x5b\xae\x65\x7c\xe8\xcd\xe7\xa1\xde\x17\xd0\xbe\xfc\x0e\x5e\x98\x59\xfb\x9c\x77\xd7\x48\x9e\x55\x06\x8d\xde\x0a\xc5\xb9\x9a\xf5\x8e\x13\x27\xe5\x7d\x36\x88\x38\xeb\x97\xb6\x79\x3f\x0e\xd3\x09\xe3\xb2\x97\x3f\xf8\xf7\xd6\x86\xff\x8b\x42\x39\xa2\xd3\x29\xe3\xfd\xa7\xa3\x68\xdc\x57\x1d\xbe\x6a\x81\x65\x84\xb9\x3c\xee\xb3\x72\xd9\x98\x52\xc1\xb8\x3c\x89\xfb\xcc\x15\x6c\x3a\xa6\x21\x33\x08\x6e\x04\xe2\xf6\x92\xbb\xc0\xc0\x30\xdc\x57\xe4\xcd\xa7\x95\xba\xac\x6a\xc9\xd7\x0a\x3f\xda\x6e\xd1\x5f\xec\x81\x79\xd6\x98\xdf\xe7\x32\x84\x76\x59\x8b\xa4\x6e\x30\x9f\x7b\x90\x6d\x65\xa5\xe5\x0e\x5b\xab\xdc\xa4\xd2\x42\x36\xf4\x43\xe8\xfb\x63\xa1\xc3\x1f\x7d\x09\x03\x9f\x42\xe0\x33\xad\x21\xa0\x6c\xc5\x87\x3f\xff\x5b\x08\xfc\x3d\x12\x77\x7e\x8b\x44\xb3\x33\x23\x36\x69\xe9\x57\x3e\xd7\x4b\x4a\xe0\x77\x5a\x48\xe8\xca\x71\x65\x80\x84\xa8\x95\xd9\x57\xcb\x0f\x4c\x7c\x01\xd7\xb9\x66\xb1\x58\x27\x38\x04\xba\xe0\x20\x2e\x57\xe8\x5d\x46\x6f\xcc\x98\x56\x8a\xf5\xc6\x50\x86\x03\xe4\x2a\x2c\x79\x64\xf2\x42\x7b\xe0\x05\xd9\xa0\x10\x16\x78\x80\xad\xc2\x54\x46\x2c\x6b\x5c\xa9\x20\x68\xe3\x06\x80\x85\x0e\xd2\x55\x08\xed\x48\xe6\xc5\x6f\x6c\x10\x54\x10\x02\x5d\x85\xb2\x1a\xdc\xbc\xf8\xad\x2d\x84\x1a\x5a\x08\x57\x21\xae\x87\x3b\x2f\x7e\x73\x9b\x61\x09\x39\xc4\xab\xd0\x2f\xc7\x3f\x2f\x6a\x9b\x11\x03\x08\x20\x81\x31\xf4\xe1\x06\xb6\x61\x02\xa3\x4a\x15\x4b\x5f\x57\x55\x22\x48\x00\x92\x24\xc0\xc8\x18\x52\xd2\x07\x4a\x6e\x20\x24\xdb\x10\x93\x09\x44\x64\x04\x8f\x08\x21\x88\x93\x01\x5e\x15\x6e\x0d\xd1\xba\x80\x6b\x14\x65\xb3\xa8\xbe\x5d\xb2\x58\x1f\x14\xa2\x74\x1f\xea\x39\x96\x7e\x85\x81\x6e\x98\x85\x0e\xed\xd8\xc0\x10\x6e\x84\xdd\xa9\xc0\xc6\x1b\x61\x77\x6d\xd8\xee\xba\x39\xa6\x41\xf7\x14\xa8\x80\xd8\xbf\x1f\xe8\x12\x72\x51\x11\x03\x91\x28\xe5\xb4\xa3\xd6\xb9\xa9\x74\x94\xb1\xe6\x4c\x1d\x9f\xaf\x99\xff\xaa\x13\xb4\x29\xb8\xdd\x9b\xa1\x54\x80\x52\x66\x90\x24\x1c\x38\x49\x98\xde\xbb\x8c\xb1\x12\x66\xcc\xdd\x56\x62\xbf\x77\x87\x06\x0c\xf8\xe1\x6e\x2f\x10\x7e\x22\x20\x60\x4a\xad\x66\x2e\xc5\xfe\x0c\x45\x2c\xf3\x41\x2f\x30\xf6\xb3\xd0\x37\x60\xf9\x4e\xa1\x80\x60\x5d\x3f\x34\x4e\x11\x37\x4b\xbd\x12\xb7\x0b\x0c\xc9\xda\x1e\x0b\x5f\x29\x26\x70\xc3\x57\x18\xe8\x91\x2f\x5c\x7a\x04\x34\x55\xff\xa6\x95\xae\xd0\x32\xd7\xd2\x33\xef\x17\x56\xc4\x5a\xe1\x90\xa2\xc0\x88\x74\xb7\x21\x25\xd2\xe5\x3a\xba\x20\xee\xaa\xc1\xdb\x22\x84\xf5\x90\x24\x42\xab\xc5\x48\xfd\x43\xd4\xca\xa8\x06\x8b\x10\xc2\x9a\x4d\x27\x1c\xd3\x24\x51\x3f\xd2\x5e\x5f\x20\x69\x0e\x2b\x68\xf5\x94\x62\xdf\x7c\x3d\xa1\x13\x56\x40\x08\x03\x21\x34\xc4\x62\x39\xc2\xae\x2f\x2a\x3a\x3d\xe1\x17\xe2\xb2\xab\xfe\x10\xd6\x63\x2d\xa7\xe1\xb4\xa4\x6f\x1d\x57\xbb\x11\x55\xd7\xda\x76\x6e\xdd\x17\xdb\x0f\x0a\xc2\xbd\xd6\xc1\xa3\xd7\x84\xbb\x26\xb4\x19\xe7\x9e\x89\x02\xec\x1d\x77\x43\xc1\xa8\x64\xe7\xec\x56\xab\x07\x26\x90\x2f\x1a\xa0\x3d\x0d\x66\x79\x8e\xb9\x7b\xad\xcd\xd3\xab\xae\xfa\xc4\xdc\xed\x2e\x5e\xda\x5f\x48\x7b\x29\xb9\x48\x81\xb9\x57\x97\x7e\xbe\xcb\xad\x94\x6f\xa5\x90\x5c\x9b\xad\x6d\x72\x7f\xe5\xa7\x30\xf5\x45\xee\x3c\x42\x21\xb9\x11\x88\x81\xb2\xbf\xd9\x78\xf2\x9d\xdd\x30\x2e\xbf\x2b\xf5\xe5\xbb\x60\x03\x42\x21\x5c\x44\x03\xb4\x6b\x53\xbd\x2d\x90\x32\x8e\x47\x88\xbb\x43\x0c\x02\xb8\xdb\xc7\x10\x76\x8b\xcd\x81\x5e\xd1\xac\xe3\x31\x53\xaa\xd4\xc9\x19\xe2\xee\x00\xf4\x26\x59\xfd\x9b\xde\x3a\xeb\x3e\xe7\xcd\xa6\x43\xd5\x7c\x71\xc3\x66\x33\x74\x69\xbf\x7f\xac\x08\x79\x13\x25\x92\x71\x26\x90\x13\x8e\xa3\xf0\xda\x81\xe7\x1c\x85\x18\x83\x22\x21\xab\xb9\x70\x5c\xc6\xda\x60\x5f\xb1\x25\xf1\x82\xa3\x10\x6e\x04\xea\xa8\x46\xf4\xe2\x8b\xe8\xd2\x57\x7f\xf4\x26\x43\xa1\xc4\x86\x96\xbf\x5c\x2c\x39\xee\x95\x69\x27\xed\x68\x98\xae\x12\x49\x6a\x20\x7a\x2b\xfd\x14\x84\xbb\x89\xbc\x1b\xb3\x95\xc1\xae\x0b\xc4\x21\xc5\x7e\x36\xf9\xab\x18\x6c\xbb\x92\xab\x01\x79\x9e\x68\x2e\xd2\x4f\x6a\x1a\x94\xa6\xa6\x2c\xa3\x84\xd8\x25\x84\x44\x99\x91\x8a\x75\xa8\x76\x31\x85\xe6\xaf\xfb\xc3\xdd\x26\x84\x50\x6d\x34\xba\x3f\x08\xed\x86\x31\x97\x11\x4f\xd9\x82\xbb\x82\x4d\xe2\x1b\x56\xed\x68\xa6\x56\xb7\xb0\x74\x96\x44\xa0\xa6\xb2\x75\xec\x27\xb7\x57\x06\xee\x0f\x90\xa4\xaf\x45\x07\xf0\x7c\x7b\x45\x62\xab\xd7\x20\x25\x7a\xa3\x1b\x04\x91\x2e\x05\x4a\xd2\x5e\x7a\xb8\xdb\x13\x2e\xf5\x95\x10\xf1\x05\x48\xd2\x51\x53\x54\xb8\x81\xbf\x4b\x48\xda\x6c\x6a\x99\x12\x12\x24\x9b\x4d\xd5\x85\xf1\xf4\x9d\x88\xa7\x74\x48\xcd\x52\x06\x68\x67\x09\x3c\xc5\x0a\x74\x2a\x34\xe3\x3e\x63\x03\x9a\x8e\x25\xc2\x10\xe1\x2e\x23\xa1\x7b\xd5\x35\x71\xc3\xcb\x01\xf1\x0c\x53\xc2\x10\xc5\x5d\xed\x5f\x2b\x99\xa8\xb0\x74\xe2\x76\xbb\xab\x60\x2e\xe2\x4b\x05\xa6\x6c\x94\xe9\x22\x44\x54\x7b\x60\x72\xbd\xc0\xfd\x41\x38\x0c\x16\x48\x00\xc5\xc0\x97\xf9\x96\x41\x08\x03\xd1\x6c\xde\x4f\x69\x92\x44\x37\xcc\x4f\x54\x9d\x87\x3b\x4a\x33\x51\x82\x2d\x34\xee\xbd\xf5\x63\x61\xc0\x72\x35\x52\xb3\x88\xe6\x9d\xdd\x55\xdc\x57\xa8\xd1\x86\xe3\xac\x38\xa2\x2e\xeb\x71\x37\x61\xf2\x48\x4a\x11\x05\xa9\x64\xc8\x9c\x30\xcb\xea\xb5\x5e\xe3\x45\xc1\x9f\x7b\x7f\xaf\x0e\x48\x09\x73\x07\x5a\xda\xc4\x4b\xf5\x9d\x9c\xa1\x14\x56\xd7\x69\x3e\x95\xf5\xde\xd0\x71\xca\x0a\x51\x3f\x62\xe1\x35\xeb\x67\x3f\xb5\x23\x8f\x90\x54\xcd\x09\xed\xe2\xc3\x8b\x85\x14\x77\xf7\xb3\x88\xf7\xe3\xd9\x0a\xb1\x21\x1d\xb3\xe3\x70\xaa\x45\xa5\x6b\xcc\xbe\x62\xc3\xf4\x7e\x01\x4e\x36\x30\x0e\xdc\x0f\x99\xf4\x2d\xb5\x69\x20\xc8\x96\xa7\x54\x93\x32\x4c\xc3\xda\x15\xab\x2c\x01\x17\x45\x00\xfb\x30\x93\x1d\xe0\x61\xfb\x80\xf3\x48\xd8\xe6\xe1\xfd\xb6\x2f\x40\xf8\x12\x12\x9f\x81\xcc\x6c\x04\x48\x73\x63\xa1\x70\xc2\x94\x81\x4a\xd6\xb6\x8e\xa8\x1c\x25\xd1\x61\x9d\xb9\x60\xe2\x4a\x4d\x50\xda\x84\x9a\x8c\xe9\x16\x21\x46\x14\x74\xb6\x74\x8f\xed\xe8\x17\xb6\x27\x65\xa4\xd6\x4e\x0f\x98\xde\x6c\x25\xab\x7d\x44\x4a\xa6\xfe\xe6\x39\x29\xb5\xbc\x16\xa7\xdd\x32\x2b\x91\xbb\xca\x08\xe3\x6e\x1f\x98\xcf\x60\xe0\xab\x75\x20\xf0\xb9\x1b\x2c\x16\x4a\x30\x50\xd2\x59\x64\x7e\x2d\x9a\x79\xb5\xf6\x2b\x3b\xcd\x63\x88\x55\xe5\x10\x91\xb0\xd8\xb3\x24\x11\x21\xa4\x90\xf0\x83\x66\x33\x52\x33\x75\x40\xc2\x8b\x48\x31\x87\x92\xed\xaa\x03\x06\x76\x5b\x91\xd0\x0b\xf1\x35\xee\xaa\x07\xa1\x56\x64\xbd\x60\x05\xb5\xb1\x73\xaf\x41\xb8\xd7\x10\xa8\xf1\xd3\xe5\xbc\xc3\xa0\x88\x5c\xd3\xfd\xd5\x01\x06\x01\x2e\xc2\x52\x72\x62\x13\xb5\x36\xc3\x98\x08\xf7\x0a\xfa\x64\xab\x03\x37\xaa\x3a\xbd\x58\xdf\xa8\xc5\xba\x4f\xb6\x3c\x58\x5a\xb1\x93\x5e\x42\x2e\x12\xb8\x51\x2b\x76\x62\x86\xfb\x46\xad\xd8\x37\xe4\xc6\xbd\x2e\x56\xb6\x6d\x22\x32\x54\xdb\xeb\x51\x8d\x7b\x63\x72\x31\x86\x6d\x85\x6a\x6c\x50\x6d\x2b\x54\xdb\x64\xdb\xbd\xce\x9b\xd8\x6f\x36\x93\xac\x39\x5b\x84\x8c\xb3\xc7\x5e\x9d\x1b\x7c\x84\xfa\xeb\xa6\x3d\xf1\xba\xf2\xb0\x3c\xbe\x60\x76\x09\xf9\x85\xbc\x54\x9c\x78\x21\x2f\x57\x6c\x11\xa2\x04\xc6\xd8\x4f\x08\x21\x63\x3c\x9f\xeb\x7a\x76\x80\xc1\xd8\x74\xb1\xea\x77\x65\xb6\x48\x60\xad\xce\xd2\xbe\xba\x1e\x04\xee\x52\xbd\x67\x49\xb3\x31\xd8\xd5\x2e\x72\xba\xb4\x45\xaf\xd1\x4d\x8b\x19\x02\x77\xf5\x10\xa6\x25\x88\xdb\x1c\x62\xd7\xd7\x7b\xd3\x23\x5d\xcf\x68\xed\x34\x09\xc8\x4c\x71\x49\x1f\x84\x52\x3d\x82\x8c\x9e\x3d\xcd\x13\xdd\x80\x08\x37\x2a\x37\x73\xed\x16\xe4\x90\xfb\x86\x7b\x6c\xe7\xb4\x45\x4d\x9a\x2d\xe9\xdd\x62\xb3\x5a\xad\x6d\xd9\x4e\x71\x0f\x21\x6a\xd7\x8e\xad\xca\xa9\x32\x74\x73\xb9\x80\x7d\x8b\x62\xeb\x90\xb2\xa8\x29\x18\xb9\xa1\x56\x86\x95\x70\xac\xd4\x9a\x2d\xbd\xb4\x2a\xf5\x24\x7f\xda\x2d\x9e\xf6\xf4\x53\xcf\x04\xa1\xf4\x50\x4c\xf8\x45\x7a\x89\x95\xe5\x68\x22\xa8\x71\xb3\x99\xc9\xef\xac\x44\x2e\xbf\x8d\x0c\xca\x74\x1e\xd9\x6c\x22\x14\x92\x18\x2b\xe5\x04\xc5\x84\x62\x77\x5b\x6f\x71\x87\x2e\x85\x38\x3b\xc9\x85\x18\x61\x66\x2f\xc7\xe8\xf5\x95\xdf\xb2\x97\x29\x60\xb2\xe7\x38\xb9\x2a\x25\x55\x05\xbb\xe6\xad\x91\xa5\xda\x56\x53\x62\x69\x00\x71\x2e\x5e\xfd\xe5\x43\x44\x17\xe9\xa5\x42\xa3\x56\x0a\x3f\xeb\xe4\xfc\xd4\x9b\xaa\x11\x52\xd5\xd9\x75\x82\x74\xb7\x45\x59\x34\x8e\xee\xbd\x0a\xd1\x91\x12\x88\x91\x7d\x6c\xb4\x3c\xfc\x6d\xc9\xf0\x5c\x72\x33\x2d\xb9\x19\x70\x92\xe6\x82\x4e\x10\x9a\x4f\x33\x71\xc8\x7b\x7a\x50\x0f\x80\xc1\xfd\x8d\x2f\x20\xf2\xf5\xb9\x09\x9f\x1f\x8a\x8c\x0f\x1e\x9a\x4f\x1c\x98\x4f\x17\xa5\x5a\x1c\x12\x7e\x28\x7a\xda\x72\x25\x5e\x37\x3e\x0c\xbb\x71\x1e\x68\x12\x91\xf4\x22\xbe\xec\x0e\x05\x8a\x80\x5e\xc4\x97\x20\xa1\xd5\x32\x71\xb1\x91\x76\x74\x59\x5c\x7a\x2b\x56\x9f\xf5\x01\x4a\xee\x17\xb9\x9f\xdb\x28\xe0\xaa\x19\x83\x42\x40\x43\x40\xa2\xfc\x31\x21\x1e\x8c\x89\x07\x7d\xc2\xba\xc9\xe1\xa0\xd9\x1c\x1f\x06\xd9\xe6\xed\x0d\x6c\x13\x74\x43\xe2\x8b\xe4\x12\xbb\x14\x26\x04\x5d\x91\xe8\x62\xac\x7f\x8c\xc8\x8d\x1b\xc0\x90\x5c\xb9\x81\x12\xec\xdb\x5b\x84\x4c\x4c\xa9\x29\xcc\xe0\x0e\x6e\xe1\x18\xae\xe1\x48\x15\x6e\x75\x2e\xe1\x4c\x15\x6c\x75\xf4\x22\x70\xd4\x6c\xa2\x19\x39\x72\x03\xb8\x23\x13\xc5\xa6\x53\x72\xa4\xf8\x0b\xce\x9a\x4d\x74\x4c\xce\xdc\x00\xae\x89\xd2\x90\xd1\x2d\x39\xd3\x1f\xae\x9b\xcd\x3b\x3c\x14\x68\x04\xc7\x90\x42\xab\xd5\xc7\x70\x2d\x74\xae\x89\x6d\x18\xc2\x58\xa9\x64\xfd\x16\x19\x19\x2f\xe4\x51\xfe\x65\x66\x20\xfb\x2d\x32\x33\x5f\x92\x16\xd9\x81\x71\x8b\xec\x18\xfd\x32\x1a\xa0\x6b\xdc\x6f\xb5\x72\x5c\x93\x1c\x57\x51\x53\xdf\xc6\x9b\xb4\x48\xa7\x5a\xfa\x0e\x17\x75\x8d\x8a\xba\x32\xe8\xa1\x40\x33\x18\xe6\xd4\x2e\xd3\xd0\xe9\xe6\x1b\xd7\x5b\x47\xf3\xf9\x74\x8b\x90\x5b\x1c\x08\x46\xaf\xbb\x75\x9c\x75\xea\x6a\x75\x1c\xaf\xaf\x63\x67\x61\x34\x59\xdd\x1e\x9b\x96\xa2\x45\x2d\x18\xb7\x5a\x0b\xbd\xe5\x90\x1c\x0e\xba\x79\x7b\xac\x41\x37\xe3\xbc\x5c\xd0\x9c\xdc\x2c\x79\xe5\x0a\x9e\x92\xa7\xf3\xf9\xc5\x65\x37\xa3\xd7\xe2\x95\x2b\x37\x80\x4c\xa1\x7a\x8a\x75\x8d\xc8\x3b\xcc\xa7\xd4\x7c\xee\x1d\x86\xc5\xf3\xd3\x5c\x82\x3e\x52\x33\x67\xe6\xa7\x70\xeb\x87\x70\xe7\x3f\xcd\x36\x93\x8e\x05\x71\xbe\xb3\xf1\xe4\xf3\xc1\x93\x37\x56\x4e\x9b\x6b\xb1\x6a\xdb\x5b\x1f\x6d\x54\x3d\x1c\xe6\x6b\x47\x76\xde\xec\x5e\xf8\x29\x1c\xf9\x21\xb9\x0f\x7d\x0f\x7e\xfa\x0c\xd4\x8b\xa4\xf0\x0c\x67\x7a\x86\x2a\x4f\x42\x6d\x46\x29\x3b\x35\x74\x43\x7c\x5f\xc3\xb0\xc0\x10\xba\x21\xd9\xc9\x76\xcf\x2b\x8a\x4b\xe8\xfe\x04\x06\x31\x84\xae\x50\x50\x82\xa4\x06\x6d\xe8\x26\x6e\x42\xee\x67\x7e\x6c\x30\x2c\x72\xea\x5b\xc7\x22\x77\x93\x96\x81\x2f\xcb\x2b\x51\xd1\x2e\x5a\x04\x2e\x50\x43\x5a\x4e\x48\x58\x25\x84\x01\x75\x7f\x42\x08\x69\xb6\xb8\x8f\x04\x12\xf0\x18\x52\xd5\xc1\x21\x1c\x29\xd1\xb4\x38\xaa\xd2\x60\x42\x1c\xee\x05\x29\x60\x73\xef\xbc\xee\x94\xfb\xd0\xef\x58\xfd\x26\xec\xb5\xf3\xcc\x92\x4a\x5b\xd6\x96\x7c\xcd\x1b\x6b\x84\x9d\x52\x52\x8d\x37\x8c\x44\xae\x30\x4c\xa5\xd6\x56\x9a\xb1\x55\x00\x09\x89\x8c\xf2\xac\x86\x20\xe9\x9d\x29\x7a\xa4\x7b\x0d\x91\x9b\x40\x8c\xfd\x47\xfa\x2d\x8a\x5c\x49\x04\x44\x6e\x4a\x62\xf0\x0e\x91\x92\x6f\x89\x3b\xc3\x85\x9a\x68\xaa\x0f\xc0\xcb\xaa\xc7\xfe\xe3\xe5\x82\x48\xd5\x95\xa8\x65\x25\x71\x8f\xdc\x84\x08\x83\x6a\x33\x22\xec\xdb\x38\x30\x6c\x21\xd5\xaa\x56\x4b\x6f\xc4\x22\xdd\x2c\xfc\xcf\x82\x01\x53\xcd\xc5\x63\x22\x4d\x9b\xf6\xb4\xfe\x55\x08\xef\x3e\x91\x99\x52\xd9\x37\x4a\x65\xbf\x54\x13\x55\xc5\x7d\xd3\x81\xad\x0e\x84\x20\x56\xb8\x86\xcc\x1c\xb9\x21\xd2\x65\xdd\x5c\x49\x0d\x47\xd1\xb8\x7f\x12\xf7\x59\x52\x2c\x3f\x13\xe2\x75\x27\x87\x37\xf9\x42\x36\xc9\xd7\x9e\x91\xb2\xfc\xc9\xb8\x77\x73\x31\xb9\xf4\xd5\x1f\x2d\xe1\x5b\x2d\xda\x42\x66\xe2\xeb\xa9\x40\x0f\xc9\xa0\xd9\x1c\x1c\x92\x61\xb3\x89\x52\xc2\xd1\xf6\xc5\xe4\x12\x46\xd9\xd8\x0e\xa1\xe8\x83\x5a\x0f\x14\x5d\xd0\xa5\x64\xb8\x28\xfa\x23\xb7\xcd\xc0\x03\xe1\x06\x60\xe7\x55\xb9\x12\x4b\x1b\x22\xda\x55\x97\xab\xc8\xdc\x47\x16\xb3\xc1\x53\xf5\x2c\xed\xa0\xc7\xa7\x62\x95\xa2\x2c\x6c\x45\xb9\x6e\x2b\x4b\x60\xb5\xa8\x94\xa5\x3d\xfe\x42\x0f\x5e\xe9\x24\x22\xdc\xda\x5a\x04\x49\x6e\x34\xc3\xe2\xae\x5c\x31\x5e\xf3\x39\x5a\xf5\xda\x78\x99\xea\x63\xdb\x65\xcd\xa6\xdc\x22\x84\x37\x9b\xb5\x2d\x4b\x09\xdc\x3a\x1d\xad\x77\xe2\x13\x10\x6e\x5a\x8b\xd3\xcf\x9c\x68\x6e\xaa\xbe\x63\xa8\x6f\xe4\xf3\x1c\xe9\x33\x2a\x29\xf2\x80\x17\x3a\x8f\x05\x5d\xa8\xf5\xa6\x6b\xdd\xa4\xae\xcc\xaf\x22\xbd\xb7\xea\xa5\x7b\x45\x84\x9b\xf8\xab\x3e\x91\xfb\x2b\x5f\x35\x61\xea\x0b\x37\x5d\xe4\x55\x1f\xf8\xf6\xd9\xac\x24\xcb\xf0\x22\xdd\x48\x87\x45\xe6\x1e\x0c\xd3\x23\xdc\x62\xfb\x0b\xe9\xde\x94\x2a\x1f\xcf\x43\x4d\x4b\x85\x09\x69\x7c\x58\xab\x7b\x95\x82\x8c\xa8\xa2\x5d\x56\xac\x55\x59\x4d\x11\x4f\x98\x90\x4f\xd8\x20\x16\x0c\xdd\x08\x94\xea\x58\x4c\x37\xc5\x40\xeb\xf5\x3c\x56\x26\xcc\x56\x56\x03\x2e\x9d\x08\xf6\xe6\xb3\x45\xb6\xea\x67\x23\xc0\xa5\x7b\x64\x1b\x2b\x0d\x6f\x4b\x2d\x40\x42\xfb\xd5\xd6\x16\x0e\xdd\x84\x98\x59\xe0\xce\x8a\x21\x7b\xb4\x8a\x5d\x73\x2f\x89\xe9\xc8\xea\x97\x68\x60\xbb\x1b\x24\x29\xdc\xc3\xcf\xb2\x6d\xf8\xe7\x82\x0e\xb5\x9f\xb8\x48\xb3\x63\xf7\x4f\xae\x3e\x5f\xb0\x4b\xf7\xa8\xfb\x82\x2b\xcb\x92\x10\x92\xba\x61\x2f\x75\x13\x5f\xf5\x97\xfb\x53\x77\x97\x15\x25\xb5\x40\xd2\xbd\x33\xf9\x00\x8a\x06\x94\x49\x31\x88\x74\x6f\xb3\xa0\x87\xd4\x0e\x7a\xc8\xd6\xf9\xf4\x82\x2a\x4d\x37\x74\x8f\x20\x26\x3b\xda\x11\x11\xf6\x62\x53\x57\x9c\xd5\xd5\xad\x0d\x5b\x0c\x95\xa1\x0e\x5d\x71\x89\x17\xac\xd9\xd4\x51\x05\xcc\x0a\xba\x31\x29\x1b\xaa\x47\x47\x84\x9b\x20\x8e\xbb\x7d\xe3\xd9\xf4\x47\xa8\xe3\xe1\xc5\x02\xa5\x3a\x13\x09\xd1\x53\x14\x71\xc2\x8a\xf6\x59\x61\xa0\xaf\x44\x16\x5d\x69\x72\x4b\x29\x96\x3f\xbf\x9b\xb2\x9c\x35\x3e\x71\xc4\x5d\xc9\x6e\xe5\xd3\x98\x4b\xc6\xcd\xd1\xc2\xce\xd6\x1a\x50\xc7\x29\x3b\x29\xcf\x61\x40\x73\x17\x5e\x02\xf5\x93\xa3\xd6\xc1\x51\x41\x3e\xa2\x19\x8a\x05\xa4\x2e\xa7\x13\x06\xa9\xab\x2d\x44\xbd\x23\x52\x1e\xe6\xe7\xae\xa4\xc3\x13\x3a\x61\xae\x8c\xdf\xc4\x33\x26\x9e\xd2\x84\x21\x0c\x21\x39\xd1\x96\x45\xd9\x81\xc0\x4a\xef\x8f\xae\x2b\x24\x1f\xd1\x2b\x81\xe2\x0b\x76\x89\x21\x2c\xfa\xf3\x0e\x7d\xd5\x27\x60\x21\xac\xc4\x69\x08\xe0\x20\xad\x0d\x60\x1d\xc6\xa8\x53\x84\x1c\xa9\x3f\xfb\xea\x8f\x15\x08\xa9\x0f\xca\xe7\xd1\xfb\xe1\x43\x48\x49\xa8\xbb\x07\x28\x79\x25\xac\x28\x98\x0f\x95\x58\x8e\xc2\x4d\xce\xf4\x44\x23\x13\xa5\xa8\x0a\x35\x62\x57\x02\xa9\xb5\x4b\xad\x1e\xca\xe4\x5b\x94\x9b\xfc\xa7\x95\xed\x79\x43\x26\xab\x92\xc9\xd6\x92\xc9\x80\x5b\xa1\x12\x33\xed\x1a\x9f\x99\xe3\x05\x9a\xec\x50\x4d\x2d\x19\xc9\x31\x83\x58\x3d\x06\x71\xff\x0e\x22\xd5\x84\x78\x7d\x13\x9e\x73\x92\x76\x4d\x3b\xa8\x4e\x27\x40\xbe\x72\xe4\xa8\xa2\x0e\x46\x27\x18\x09\x37\x78\x94\xb5\x2e\x52\xeb\x4e\xac\x5a\x17\x43\x04\x8a\xad\x21\x22\x12\x9e\x73\xe2\x41\xa8\x9d\x2a\xe1\x4e\xb3\x89\x72\x22\x88\x3e\x69\xbd\x83\x4d\xf3\xe1\xbb\x58\x19\x94\x23\xd8\x8f\x94\x25\xf2\x88\x47\x13\xbd\x03\xf0\x5c\xd0\x09\xeb\xad\x7c\x5b\x89\x29\xb2\x62\xa9\x38\x74\xd8\xee\x83\x03\x0f\x5b\xd1\x3c\x1f\x04\x32\x9e\x58\x24\xb3\xd3\x35\x76\xe4\x35\x45\xf8\x3e\xd5\xda\x49\xda\xf3\x7c\xf4\x5d\x20\x8a\x41\xef\xb4\x76\x8a\x49\x56\x3b\xe5\x47\x38\x88\x1e\xd2\x30\x5a\xfc\x68\x65\xa5\xa3\xd4\x35\xcf\xfc\x34\x48\x52\xb2\x63\x47\xe8\x7f\x11\xe5\xe6\xfc\x19\x53\x5d\x33\x8e\x43\xdd\x22\x77\xa4\x16\x61\x97\xce\xe7\x23\xd4\xc1\x8b\xb5\xb1\x92\x2c\x86\x2b\x56\x09\x27\xc3\xf7\xa2\xd9\x1c\x45\x89\x8c\xc5\x9d\x3b\x8c\x91\xc0\xc0\x91\xc9\x00\xa1\x5b\x7a\xbe\x76\x17\x78\x35\xb6\x1c\x95\x32\x44\xce\x24\x95\x4c\xfb\xcc\x1d\xb0\xf0\xc2\x89\x58\x9b\x9d\x61\x33\xd2\x4c\x07\x58\x87\xf7\xbe\xee\xce\xb7\xbd\xf3\x0b\x58\xb1\x63\xe2\x57\x83\x95\xe1\xe3\x6a\xb6\x32\xbb\x05\x3d\xf3\x8f\x7f\x22\xec\x7d\xff\x6a\x10\xcb\x29\x47\xb5\x60\x3d\x3b\xb5\x29\xbe\x7f\xc5\x51\xaa\x83\xcb\xca\xfc\xa6\xcb\x3b\x40\xa2\xba\x03\xa4\x4f\x62\x5b\x84\xca\x35\x7b\x3f\x26\x9c\x6f\x55\x0c\x45\x76\x3e\xc8\x3e\x21\x25\x70\xef\x56\xea\x5d\x7c\xff\x58\xda\x7b\xf1\x6f\x32\x5e\x5f\x11\x99\x2b\xf0\xfd\x77\x61\x8f\x8b\x09\xa2\xcf\x03\xe0\xdc\x21\x93\xd9\x26\xed\x93\xbb\x57\x7d\x35\x57\x04\xe2\xbd\x63\x8e\x94\x4c\xc3\xfe\x35\x47\x63\xbd\xe1\x67\x26\xb1\x8e\x08\x16\xd5\x88\xe0\x32\x08\xef\x4d\x5d\xb4\xe4\xab\xd5\x85\xb8\x44\x18\x5e\x6d\x8a\x09\x96\x64\x39\x98\xe5\xa3\x70\x93\x50\xc4\xe3\xb1\x86\x84\x57\x8b\x7a\x50\x65\xb5\x65\x3a\x8c\x52\x22\x6c\x9d\x47\x90\x1b\xe2\x3d\xd6\x93\x9b\xd5\xfa\x86\x0d\x94\x19\x96\xff\x3c\x8f\xa7\x44\x66\x8d\x50\xb8\x3f\x0b\xf2\xab\xdc\x35\x85\xc6\x1b\x12\xd1\xa2\x87\xa5\xd7\x2f\x26\x5e\x37\x6c\x36\xe3\x43\x6a\x16\xd1\x48\x69\x33\x65\x02\x00\x65\xde\x13\x7e\x11\xb7\x5a\x7a\x23\xec\x42\xb4\x5a\x97\xcd\x26\xea\x78\x84\x44\x3d\x24\x5b\x2d\x60\xa4\x83\x7d\xc4\x5a\x2d\xd0\x19\x22\x08\x41\x07\xbb\x7b\x8f\x1e\x35\x23\xdc\xab\x95\xf3\x3b\xe5\xfe\xf7\x77\x14\xf6\x84\xdf\xee\x64\x11\x5e\xf0\x73\x53\xc4\xd9\x61\x61\x16\x55\xab\x90\x55\x4a\x71\x8f\x23\xe9\x26\x69\x90\x48\x65\x98\xec\x60\xdc\x13\xad\x1d\xbf\xdd\xf1\x39\x92\x17\xe2\x12\xf7\x9c\xbf\xb8\x76\xd7\x5e\x88\xcb\x5e\x7b\xc7\x17\xad\x8e\xfa\xda\xee\x2c\x30\xbc\x16\x9b\xd2\x3b\x54\xea\x51\xda\xcd\x02\xc3\x37\xb1\x32\xc3\x42\x97\x97\x56\x18\xcf\x15\x39\x59\x4d\xab\x60\xf6\xaf\xe5\xe1\xde\xa3\xf9\x7c\xff\x61\x99\x9c\x8d\x97\x5a\x15\x86\x97\x62\x63\xe6\x0c\xaf\x5b\xf6\x4b\x57\x94\xca\x69\x8d\xd8\xf6\xde\x23\xbd\x3d\x77\xe8\xcd\xe7\xfc\x90\xa4\x99\x27\x8e\x11\xfe\x07\x6b\xa5\x8b\x22\x26\x47\x98\x71\x78\x2f\x36\xa4\x8d\xf0\x56\xb6\x8d\xad\x6a\xdb\xde\xa3\x7f\xb2\xf9\x9c\xfd\x73\xff\x21\x8e\x06\xe8\x60\xdf\xfc\x7a\xe8\x69\xfd\x90\x1d\x3e\x7e\x38\x9f\x77\xbc\x9d\x43\x96\x91\x23\x49\xe7\xe0\x0f\xd9\x62\xed\x47\x0f\x8d\x5f\xaf\x78\xb1\xbf\xdf\xad\xbe\xd8\x7b\x54\x12\xcd\x75\xa8\x61\xf7\x57\xcc\x9f\x5a\x39\x19\x34\x43\xd3\x43\xaf\x97\xcf\x00\x9f\xb6\x78\xe9\xf7\x0e\x33\xe7\x4c\x5c\x9b\x06\xad\x16\xee\x2a\xa6\x8f\x7b\x88\x91\x0e\x48\x93\x29\x66\x89\xe9\x63\xdc\x6c\x2a\xd8\x45\xc1\xe6\x34\xe3\x70\x93\x99\xa7\xd2\xbb\x76\xb4\x61\x4d\x50\x9a\xb8\x0e\x4e\x38\x9b\x35\xbe\xbc\x7d\xf3\x52\xca\xe9\x07\xa3\x86\xa8\x91\x83\xbb\x01\x92\x84\x62\x65\x2d\x2f\xef\x41\x4f\x45\x3c\x14\x2c\x49\x9c\x8a\x44\xc9\xdb\xf8\x34\x9e\x4c\x53\x49\x83\x31\x6b\x36\x5f\xa9\xf9\x42\xd1\x7d\x48\x7d\xa5\x0c\xd0\x3e\xeb\x43\x18\xf8\xdc\x95\xb1\xa4\x63\xb3\x1a\xac\x08\x32\x70\x98\x10\xb1\x70\x2a\x31\x7f\xe8\x9a\xa3\xe9\x60\x6d\x09\x69\xd4\xa3\xe5\x32\xb3\xf5\x65\x14\x41\xb5\x02\xab\xcc\xbc\x15\xc7\x32\xc2\x03\xbd\xeb\x9f\x4c\x63\x9e\xb0\x8f\x1f\xde\x40\xf0\xde\xbf\x0f\x07\x3e\x77\x13\x49\x65\x9a\x40\xf8\xaa\x78\x3e\x67\xb7\x72\x01\xe1\x6c\xc5\xf1\x99\x20\x36\xc9\x4f\xca\x04\x6f\xe5\x54\xe0\x59\xfa\x18\xe7\x2f\xf1\x17\x77\x30\xac\xce\x64\x03\x14\x42\x63\x94\x28\x13\x2e\x67\x42\xc7\x6f\x38\xb8\xeb\x1d\xc6\x5a\x6f\x0b\x33\x89\x15\xf1\x21\xf2\x20\x56\x1a\xb4\xfd\x6a\xa7\x15\x63\x10\xe4\x0e\xbd\x1a\xd8\xb9\xbf\xcb\x35\xe2\x56\xa2\x3b\x65\xd5\xf6\x68\xcb\x01\x9d\xb1\x81\xfa\x14\x2f\xf4\x11\xd9\x22\xf4\x0c\x71\xb5\xbc\x1e\x8d\xc7\x1f\xb2\x5e\x79\xc9\x68\x9f\x89\x04\x61\x0c\xc1\x23\xab\xb7\xcc\x71\x2e\xbd\x37\x69\xfa\xe7\x70\xc7\xf3\xe6\xf3\x5d\xcf\x3b\x24\xf9\x2b\x5c\x88\x45\xa5\x9a\x93\xb2\xb0\xea\x4b\xb8\xe6\x68\x34\x50\xeb\x74\x57\x10\x81\x64\x4d\x6b\x38\x36\xb1\x7f\x3e\x5a\x5b\x78\x86\x26\x03\x93\xab\x4c\x2d\x9f\x88\x43\xea\x86\x53\x97\x6a\x95\x52\x8a\xbb\x7b\xee\xc6\x53\xc6\x51\xea\x86\xaf\xd5\xa7\x03\xd8\xf2\x96\x33\x5b\x68\xde\x1a\x0e\x14\xd4\x81\x42\xb3\xb5\x3e\x13\x4e\x38\xeb\x4a\x37\xe8\x9a\x84\x74\x3a\x88\x24\x9b\x69\xa6\x8b\xd4\x0c\x31\x89\xd3\xdc\x40\x59\xda\x05\xb9\x77\x53\x2d\xf8\xa6\x6e\x00\xdc\x9d\x45\x72\xf4\x54\xb0\x3e\xe3\x32\xa2\xe3\x44\x7d\x78\xac\x66\xa9\x70\xc3\x0e\x56\x16\xb3\x9b\xcd\x00\xf5\xa5\xe3\x52\xd3\xb0\x3c\x75\x41\xea\x06\x8f\xca\x28\x8c\x84\xf1\x3e\xba\x1d\x20\x86\x7b\x68\x05\x3d\x4e\x66\x3e\xb7\x15\x05\x8e\x39\x48\xcf\xdc\x00\xfb\xfa\xc9\x56\x42\x5c\x1a\xc4\x42\x22\xbc\xa8\x6b\x3b\xd5\x50\x53\x0f\x02\x5f\xba\x01\xd0\xca\x14\xe0\x44\x89\x86\xd2\xf9\x36\x43\xc7\x83\x22\x3d\xbd\xa6\xfb\xad\x58\x93\x32\xc7\x19\x3a\x5d\xee\x86\x6f\x9a\x4d\x24\x5b\xc4\x99\x38\x6a\x7e\x87\x61\xf6\x33\x72\xcc\x38\x96\xec\xfb\x81\x0d\x8f\x6f\xa7\x9a\xaa\xe5\x91\x3c\x96\x4a\xb3\x7e\xb2\x5e\xe7\xe7\xe9\x78\xac\x0d\xbe\x49\x56\x72\x73\xb6\x5a\xc8\x76\x11\x3d\x35\x33\x4d\xa8\x49\x9e\x1e\x08\x62\xd2\xee\x74\x69\xab\x75\xc8\x9b\x4d\x1d\x0e\xcb\x6e\x59\x88\x42\x8c\x9b\xcd\x78\xcb\x86\xec\x96\x08\xa3\x22\x82\xab\xdd\x81\x41\x16\xae\x12\xa9\xe9\x1d\xe5\x7e\x7e\xc2\x2e\xa2\xcb\xee\xe0\xa2\xdd\x8e\x2e\x49\xa0\x14\xe7\x40\xab\xcd\x69\x9e\x21\xf0\x3a\x00\x76\xe1\x5d\x02\x33\x22\x02\x28\x3c\xd3\x19\x01\x4c\x30\x4a\x5e\x69\x31\x9b\xcb\x57\x44\x42\x96\xbb\x50\xda\xcb\x8a\x49\xc4\x95\x79\x90\xbc\x22\x95\x5e\x6e\x00\xd5\x74\xcd\x68\x80\xc2\x56\xeb\x9f\x24\x2d\xb4\x10\xcb\xf1\x42\xc5\x50\x6b\xe7\x79\x18\x47\x7b\x17\xf2\x4c\xd4\x42\x35\x52\x14\xe9\xa3\x0a\xc8\x0b\x71\xd9\x95\x17\xed\xb6\x0e\x67\xbd\x95\x88\xe9\xc6\x16\x69\xfe\x75\x73\x39\x94\xe0\x4b\x55\xec\x5c\x42\x08\xcf\x0a\x55\x1f\xc3\xf3\xcd\x9a\x90\xcc\xc7\xd4\x1e\xcb\xca\xc8\x76\x79\x21\x93\xc3\x7c\x58\x19\xce\xf6\xcf\x8c\x06\x52\x04\xb0\x9a\x83\x41\x14\x32\x79\x8d\x71\x15\x71\xe1\xdd\xaf\xc1\x63\x0c\x2b\x07\xe6\xdd\x5a\x0b\x98\x37\x37\x65\xbf\xdb\x98\x5c\xef\x7f\x6c\x28\x29\x0e\x0f\xf9\x02\x77\x4f\xd6\xd6\x2b\xfe\xf9\x4f\xbe\x21\x89\xf2\x3f\xf5\xe7\x6e\x25\x77\x23\x5b\x77\xd4\xb2\x38\x4b\xf6\x8a\x4b\x26\x6e\xe8\xd8\xb6\x82\x5e\x71\xc4\x36\x9e\x24\x2b\x0a\x09\x5c\x3b\xeb\xfb\xc2\x8a\x6e\x6f\xcc\xd0\x53\x09\xce\x5f\xbc\xd1\x68\x34\x1c\x98\xa1\x57\xfa\x97\x03\xdc\xde\x0f\xf9\x64\x97\xb8\x43\xa7\x72\x6d\x03\x5b\x4a\xdd\xf7\x94\x24\x2b\x0a\x7f\xb5\x0b\x3f\x7e\x78\x48\x10\x27\x27\xfa\x18\x57\xb3\xc9\x0f\x49\x67\x67\xa7\x84\xfd\xd3\x82\x2d\xc0\x0e\xc9\x63\xaf\xd9\x3c\xd8\x3f\x24\x96\x3f\x94\xcb\x95\x90\xfb\x0f\x9b\xcd\xbd\x47\x15\x48\x61\x41\x1a\x62\xe6\xf3\x3f\xcd\x3f\x1a\x89\x75\x1d\x88\xac\x64\x2e\x28\xaf\xc8\xa8\xbc\x77\xa9\x75\x33\xc5\x9a\x12\xd4\x7a\xef\x38\xda\xa6\xd1\xf3\x43\x92\x37\x10\x4b\xb2\x0d\x91\x24\xa8\xe2\xa9\xb0\x0e\x32\x0b\x37\xd4\xa9\x8b\xfb\x90\xae\x63\xb3\x3b\x14\x4b\xe0\xee\x76\x4f\xfa\xa9\x8e\x9d\xa6\x6a\x84\xed\x8f\x29\xe8\x7f\x0c\x62\x63\x7e\xac\x39\x33\x68\xa2\x6a\xdb\x3b\x7a\x87\x6c\xbb\x9a\x70\x57\x69\x5e\x8a\x0c\x0e\x94\xdc\x69\xf7\x7a\x00\xd2\x0d\xe1\x0e\x45\x1a\x33\x48\x97\xe9\x74\xcb\x29\x08\x42\x41\x12\xa6\xcf\x7f\x0c\x56\x69\x8e\x0d\x5d\x68\xad\x1d\x38\x43\xa1\x04\x93\xbc\x58\x1b\x1e\x70\xa2\xb8\x08\x82\x95\xa8\xb8\xd2\xd3\x7f\x8d\x8e\xdb\x98\x12\x49\x3a\x30\x96\x64\x07\xfa\x92\x78\x70\x23\xd7\xca\x8e\x05\x86\xed\x95\xba\x6f\x91\x5f\x02\x26\x72\xd3\x59\xe3\x22\x83\x30\x86\xd1\x06\x40\xcf\x06\x1c\x6e\x00\xec\xd8\x80\xd3\x35\xa4\x65\xa7\x09\x61\xb6\xe6\xfb\x4e\xf6\xfd\x4e\x92\x6f\x70\xfb\x0b\x24\xc7\x92\xa8\x7a\x17\x70\x2d\x09\xe7\x70\x24\xc9\x17\x38\x93\x64\xca\xe1\x6a\xf5\x80\xb4\x1c\x67\x01\x4f\xe5\x7a\x8f\xe4\x57\xe0\xf0\xd9\xa4\xb0\x85\x57\x6b\xe1\x9e\xa1\x19\xfa\x04\x26\x07\x0d\x86\x53\x49\x36\x32\xed\x96\xac\x67\x88\x66\x3a\x55\xaf\x61\x58\x7d\x85\x40\x9e\x2d\xda\x66\x50\xf8\x2e\xc9\x4f\xf8\xf0\x0b\xe4\x9d\x3c\x2b\x77\xa1\x92\x67\x35\x70\x48\x89\x68\x77\x74\x0d\xa1\xd4\xe5\xba\x9c\x30\x10\x24\x05\x49\xa8\xaa\xe0\x8b\x5c\x3f\x73\x3f\x98\x99\x73\xa2\x1a\x78\xbe\x01\xee\xbb\x82\x9b\xa1\x2f\x12\x3c\xf8\x24\x90\xc0\xed\x0e\x36\x69\xc4\x4f\xaa\x43\x90\x07\xf7\x5a\x26\xb5\x67\x25\x0b\xaf\xe4\x16\x15\x3d\xe1\x77\xbc\x9d\xbd\x3f\x90\x68\xeb\x0f\xb8\x55\x29\xd8\xc1\x6d\x9d\x88\xb2\x75\xb0\xbf\xbf\x7b\xb0\x80\x8f\x6b\x66\xf2\xa9\x84\x50\x66\x13\xeb\xcd\xdf\x21\x47\xbb\x36\xeb\x34\x9d\x22\xae\x54\x34\x9e\xad\xf7\x1d\x8c\x7d\xf3\xaa\xc5\x2f\x3a\xe5\xfb\x1d\x8c\xb5\xba\x03\xcf\xd6\x75\x9b\xf3\x17\xff\x8b\x23\xa7\x75\x25\x11\x6f\x75\x70\x0b\x39\xb8\xe1\xb4\x5e\x08\xf4\x59\x66\x49\x52\xe0\xf3\xca\x16\xcd\xd0\x4f\xd5\xdd\x27\x78\x01\x3f\x97\x91\x73\x3f\xe3\x8b\x15\xd9\x8d\xec\x23\x50\x5c\xf3\xde\x92\x17\xf6\x4d\x9e\xeb\xb3\x63\x12\x45\x94\x61\xb5\x45\x2a\x09\xe0\xd6\xed\x22\xe8\xab\x40\x6a\x01\xa0\x66\xa5\x12\x6a\xa5\x9c\xa1\x6b\x09\x42\xaa\x0e\x47\x4a\x6b\x52\xba\xb6\x66\xbf\xb4\xe7\xb8\x4e\x4b\xfa\xce\xc5\x3f\x74\x26\xb4\x7f\x5c\x3a\x86\xe3\xa9\x62\xc8\xe2\x88\x48\xa3\xd8\x88\xcf\xa3\x0d\x02\x08\x89\x73\x61\xfa\xca\xa5\xb8\xe5\x5c\x3a\x36\xde\x70\x13\x96\x1d\x3f\x3f\xa5\x43\xf5\x61\x11\x37\xd0\x53\x26\x76\x83\xfc\x54\x58\x44\x90\x70\x83\x9e\x73\x3e\x62\x8d\xd7\x49\xcc\xdd\x67\x2c\x8c\xfb\xcc\x8d\x39\x3b\x1d\x34\xa8\x6c\x5c\x25\x31\x77\x5a\x46\xfd\x70\xe0\xa3\x1e\x1f\xdf\x59\x02\x75\x70\xcb\x69\x0c\x68\x34\xd6\xf9\xde\x1a\x72\xc4\x1a\x83\x78\x3c\x8e\x67\x26\x5b\xd5\x95\x44\x9f\x04\x8a\xb1\x82\x9a\xd1\xbb\xc4\x77\xba\x35\xc5\x46\x29\x33\xba\x41\x11\xcc\xd0\xb9\x84\x67\x52\x47\xdf\x2c\x38\xa1\x84\x91\x58\x5f\xec\x92\x12\x61\x35\x31\xd3\x42\x9d\x0f\x94\x37\x22\x2e\xe3\x06\x5d\xd1\x02\x9d\x36\x8f\xc7\x8d\x69\x9c\x24\x51\x10\x8d\x23\x19\xb1\xc4\x69\x99\x46\xaf\x6f\xdf\x96\x53\x6e\xff\x86\x7a\xe0\x63\x9d\xc8\x3e\x1b\xf8\x88\xe8\xf2\xef\x44\x1c\x8c\xd9\xc4\x54\xa2\x9a\xac\x37\x59\xd7\x61\x6d\x39\xbe\x6a\xa6\xd6\xe0\xfc\xe5\xb2\xc3\xe8\x86\x71\x83\x41\xc3\x39\xb8\x85\x5e\x08\x34\x43\x67\x12\xf6\x40\x77\x5d\xf6\x3a\x54\xa2\xeb\xb5\xdc\x70\xb0\xd8\x5e\xb4\x20\xf4\x25\xf4\xf5\x21\x6c\xf8\xa6\x0f\x7b\xbf\x94\xe4\x07\xbc\x5f\x2b\xd2\x9e\x23\x81\x1f\x3c\x37\x46\xe4\x0f\x49\x5e\x4a\x34\x43\xef\x25\xec\xc0\xee\x0e\xc6\xf0\x56\x92\x5b\xf4\x5a\x09\xbb\x1f\x12\xbe\xa9\xff\x30\x3c\x91\x24\x82\xe7\xeb\x17\x6a\x7d\xac\x1a\xa2\xb5\xca\xb8\xa9\xeb\x9d\x24\xc7\xf0\x42\x92\xb7\xf0\x69\xf5\xda\x95\x67\xaa\x85\xaf\x6b\xab\xca\x2f\x71\xe8\xe9\xc4\xe0\xf0\xa7\x24\x2f\x81\x33\x72\x03\x82\xad\x76\x54\x77\x73\x83\x7d\x86\x38\x83\xdd\x1d\xe0\xc6\x8f\x15\x80\x34\xf3\x2b\xeb\x4a\xe9\xd2\x45\x9e\x5d\x8f\x95\xab\xd9\x47\x2d\x32\xcc\xaa\xa2\x8d\x74\xf9\xab\x7a\x5e\x4a\x24\x1e\xec\xee\x14\x72\xa6\xf0\x7b\x97\x04\xb8\xb4\xcb\xc9\x0c\x09\xa6\x25\x5d\x8e\x99\x2d\x63\xd6\x09\x09\x59\x8e\x79\x77\xe7\x0f\xe1\x32\x60\xe4\x45\x36\x62\xbb\x3b\x20\xdb\x1d\x8c\x81\x13\xde\x53\x3c\xe8\x0e\xb1\x2f\xdc\x21\x28\xec\x52\x61\x57\xa5\xbb\xc5\xd9\xf0\xd7\x12\x3e\x29\xa8\x01\x6e\x49\x98\xa1\xaf\x3a\x74\xfe\x8f\x1f\x12\x6b\xc8\x32\x63\x9f\x0d\x99\x71\x81\xfe\x8c\x21\x65\xe4\x0c\x28\xdb\xb0\x13\x93\x2d\xda\xe2\xd0\x2b\xdb\xcd\x18\x6c\x75\xe0\x7e\xe8\x33\x60\xbe\x7c\xb0\xbb\x33\xf7\x60\xe0\xa7\x79\x6a\x60\x92\x69\x56\x77\xe8\x89\x6e\x93\x76\xc0\x74\x95\x6a\x29\x88\x68\xab\x46\x12\x09\xcc\x8c\x16\x05\x86\x21\x25\xa9\x3e\xb1\xbc\xb2\xc3\xbc\x43\x5e\x9c\x22\xfd\xf7\xdd\x1d\x60\x44\xe3\x95\xc0\xdb\xd2\xda\xdf\x4b\x10\x65\xaa\xa6\xb6\x54\x35\x70\x38\x81\x32\xdc\xe3\xad\x5c\x60\x88\xd9\x0a\x1e\xdd\xe2\xee\xf6\x02\x22\x46\x06\x1c\x06\x8c\x04\x1c\x02\xb6\x59\x81\x4b\xaa\xdf\x57\x2c\x5c\x39\x67\xd7\x02\xb4\x3a\xb5\xc0\xac\x9d\x42\x5e\xe5\x97\x9b\x2d\x16\x30\x66\x24\x95\xd0\x67\x64\xf3\x39\xff\x7b\xba\xef\x53\x08\x94\xb0\xa0\xd2\x67\x10\x24\xbe\x84\x60\xe6\x73\x08\xdf\xfb\xba\x2f\x6f\x18\x11\x1c\xb6\xd9\xe6\x79\x39\x61\xe4\x4f\x18\xb1\xb5\x72\xe0\xb0\xd3\x13\xfe\x1d\x9a\x28\xd6\xdb\x36\xa7\x14\x15\xd7\x0c\x19\x49\x39\x4c\x37\x96\x73\x1c\x53\xd0\x83\xec\x8e\x96\x19\x5b\x73\x7a\xca\xd3\x61\x32\x15\xad\x06\x18\xd9\xdb\xd5\x67\x15\xf6\xf6\x09\x91\xbd\x8e\xef\x41\x4a\x58\x37\x2d\x23\x9b\x5a\xad\x32\xfc\xb6\x96\xfa\x5d\xc7\x44\x66\xbb\x54\xc5\x61\xae\x63\xd9\x15\xa4\xe3\xfd\x21\x5a\xd4\xda\x8d\x49\x09\x61\xbd\x63\xe9\xdf\x4a\x64\x6a\x6a\x0b\x5f\xe0\x05\xdc\xad\x9b\x12\x0a\xb7\xaa\x64\x3e\x9f\xa1\x1b\x06\xce\xff\xe5\x40\x8a\xad\x3a\x0c\x45\x33\x34\x64\xe0\xf8\xea\x9b\xa6\x26\x5b\xc6\xa9\x5e\xc6\xab\xc0\x21\xa1\xfa\xd8\xe7\x8c\xa1\x19\x1a\x31\x08\x5b\x1d\x9d\x08\x28\x93\x36\xb4\xb4\x1c\x8f\x65\x97\x92\x22\x37\xd6\xad\x44\x03\xd4\x67\x5a\x95\x9d\x32\x13\x4c\x4c\xf3\xc3\x2d\x8b\x25\xa0\x14\x8e\x65\xf1\x15\xc3\x2d\x5b\xb9\x18\x99\xd6\xb1\x7a\x83\xd2\xac\x41\x0f\x1c\x30\x0e\xa8\x54\x35\x28\x25\x69\x99\xaa\x2b\x41\x77\x86\x96\x91\x62\x54\x86\x35\x46\x4d\x59\x6a\x13\x94\x81\x29\x4c\x79\x9e\x0c\x38\x66\x2b\xec\x05\x43\x89\xac\x53\xc2\x32\x4a\x7a\x0e\x18\xcd\x8f\x99\x0c\xa6\xac\xa4\xe4\x16\xdd\xaa\x2a\x6e\xa5\xe9\x4f\xd6\xea\x80\xc4\x8a\x1e\x4d\x0d\xd3\xfe\xda\x2a\xa8\xe9\x19\x45\xca\xf5\x4a\x09\x44\xa5\x95\x7a\x36\x23\x44\x66\x84\xfc\x5b\x9e\x5f\x57\x59\x4c\xf7\x92\xc8\x92\x90\x3b\x74\x6c\x13\x22\x5b\x1d\x9d\x67\x4d\x93\x21\xed\x9d\x8e\x0c\xf0\x38\xbb\xee\xee\x88\x11\xc9\xe1\x8c\xad\x56\xa7\x8f\x18\xe8\x0b\xbc\xfc\x07\x0f\x1c\xe0\xb8\x37\x43\xd7\x6a\x92\xe9\x2a\x1e\x02\xc7\xd8\x2f\x61\x92\x0a\x50\xc7\x00\x3d\xd2\x40\x4a\xdb\xbf\x5a\x9e\x91\xdd\x2e\xf6\x16\xf0\x94\x91\x63\x0e\x7f\x92\xa7\x4c\xcd\xc5\x57\x6b\x79\x85\x95\xa9\x67\x98\x66\x62\x66\x0e\x9f\xd0\xfc\xbd\x61\xee\x98\x50\xf3\x3e\x36\x83\x15\x6b\xd8\xd8\x82\x35\x10\x56\x3a\x96\x14\x66\x3a\xe0\x6c\xa6\xe3\xb9\xf4\x2d\x0d\x2e\x85\x7d\xcf\x3b\x94\x3d\x6d\x24\x29\x32\x3e\x4a\xa5\xa0\xfa\xb7\xe8\x15\x33\x3e\x93\x56\x47\xab\xa0\x65\xcf\x2e\xa3\x12\x6b\xbe\x8a\xfa\xeb\xea\x8d\x0c\xa7\x6c\xfd\x46\x7b\x51\xbf\x67\x98\xe8\x7b\x85\x89\xec\x3b\x64\xee\xd0\x29\x83\xb5\x86\x7c\x28\xc1\xdc\x2e\x56\xb8\x56\x30\x7c\x60\xe4\x8c\xc3\x17\xb6\x2e\x64\x64\x86\x3e\xb0\x55\xfb\x6b\x4f\x19\x12\x3a\xe6\xc6\xa0\x39\xaf\x90\x2f\x75\xe8\xd9\x6a\x14\xe2\x77\x50\xcf\xb2\x4b\x7f\xf1\x42\xcd\x60\x53\xc5\x09\x23\xcf\x38\x7c\x5c\xbb\x26\x9c\x72\xa4\x31\x9e\x30\xd3\x46\x6c\xee\x2f\xd8\xe0\x67\xfa\xb2\xb2\x7a\x6f\x01\x48\x90\x19\xfa\xce\xe0\x63\x86\x0a\x74\xbf\x9e\x33\x14\x4a\x0c\x4f\x19\x3a\xc1\x90\xd9\xa4\x72\x43\x05\x9a\xb9\x17\x18\x9e\xf1\xf5\xbe\x95\x2f\x2c\x5b\xbd\xba\x6f\xb8\x7b\x4e\x93\x6b\x72\x1f\xf8\x7f\x42\xe8\xa7\x1c\xfa\xbe\xe4\xc0\xfc\x67\x1c\x06\xc5\xa1\xe4\x32\xa2\x88\x59\xca\x03\xfd\xd3\xdf\xea\x40\x10\xf9\x8e\x03\xc1\x55\x25\x8d\xea\xb3\x0a\x5c\xdf\xbf\x89\x21\xf8\x52\x81\xf8\x6c\x41\xa0\x19\x1a\xc6\x5a\x78\xea\x39\x3d\x8a\xa1\x03\x1c\xfb\x1c\xb7\x9c\x07\x74\x1a\x3d\xb8\xd9\xb1\xee\xfd\xfb\xc9\x36\xe6\x77\x7d\x5d\xfd\xbc\x5f\xfb\xfc\x8d\xad\xf2\x71\x95\x39\xf7\xaa\x9f\x77\x6b\x9f\xdf\xb3\x8d\xc9\x63\x7f\x54\x3f\xef\xd5\x3e\xbf\xdd\x5c\xf7\x93\xcd\x75\x3f\xdf\x8c\xfc\xdd\xe6\x76\xbf\xd8\x4c\xf9\xa7\xcd\x9d\xfa\x75\x33\xe5\x7f\x6e\x2e\xcd\xd3\x8d\x94\x8b\x74\x23\xe5\x32\xdd\xd8\x2d\xac\xfa\xb9\xf3\xb8\xf6\x3d\xdd\x5c\x9c\xa6\xf6\x46\xb5\xd6\xf0\x4d\xeb\x60\xb5\xd3\x27\x56\x16\xc2\x0c\x3d\x89\x33\x9f\xa3\xf1\xc8\x58\x57\x75\xa7\x9b\x6f\x15\xbe\x43\xaf\x35\x0a\xb3\x79\x5b\xde\x94\x9c\x56\x32\x89\xbe\x8c\xeb\x9b\x82\x43\xb4\xad\xa5\x82\xc4\xb9\xc3\x6c\xc6\xac\x0d\x61\xe1\x6e\xf7\x68\x8a\xfe\xf1\x8a\x9b\x1b\x23\x22\x2e\xd9\x90\x09\xbf\xe1\xfc\xa3\xc5\x5b\xff\x70\xfe\x81\x7d\x11\xe9\xcd\xfe\x62\xef\x2f\x45\x59\x1a\x6f\xd6\x37\x7e\x11\x89\x5b\xc8\x69\xf4\xa3\x61\x24\x13\xd0\xf9\xf1\x87\xb1\x34\x9f\x74\xdd\x4a\xf4\xc0\x9f\x31\xfa\x14\x23\xae\xb7\x03\xcb\xf4\x47\xa9\x6d\x79\x58\x17\x05\xa7\x88\x97\xa7\x5c\xa9\x3e\xf6\x10\x40\x48\xb6\xb4\xaa\xd9\x5d\xd1\x4b\xd9\x16\x16\xea\x47\x90\x02\xd7\xbb\xf9\xc1\x31\x70\x97\x7e\xd0\x7b\x16\x7a\x23\x91\xea\x9b\x9c\xcc\x6d\x8d\x61\x71\xfa\x0c\x49\x68\x77\x94\xf4\xa8\x0c\x11\xc5\xd8\xd7\x5d\x1e\x82\x07\xf7\xf4\x83\xf6\x42\x94\x79\x09\x02\x65\x5e\x1c\xfb\x4c\x8d\xb8\x4b\x17\xf6\x88\x04\x36\x63\xdc\x44\x68\x86\xc6\x91\x49\xef\xfa\x48\xf3\x87\xdd\xfe\xc4\x86\x55\x1d\x9b\x0f\x43\x9f\xde\xf9\xa6\x07\x2b\xdb\x63\x63\xbb\xbf\xd0\x0c\x6d\x47\xb0\x07\x1c\xcf\xe7\x5b\xfa\xb9\xe3\x79\xc0\xb5\x0b\x4f\x7d\xd0\x3f\xec\x4b\xee\xad\xc2\x08\xf1\x36\xe9\xe0\x07\x7b\x73\x0f\xb7\x11\x7f\xd0\xf1\xbc\xb9\x87\x5b\x88\x3f\xd8\xd3\x4f\x56\xda\xa2\x5f\xf2\xe5\x50\x35\xef\x6d\xac\x43\x4f\xcb\x74\x38\x69\x05\xe6\x36\xaa\x0c\x48\x60\xdf\x6a\x3d\x49\xab\xfa\x5c\x04\x77\x51\x65\x5f\x6f\x94\xfe\xe6\xa6\xe0\x1d\x3a\x8d\x74\xa6\x7d\x73\x17\xc0\x02\x43\x10\x57\x30\x0d\x6d\x4c\xbc\xe7\x48\x91\x32\xc7\x77\x06\x74\x9c\x30\x6b\xa5\x98\x56\xc0\xdc\xed\xf9\xdc\x71\xb6\xb2\x2b\xa5\x95\xa2\x58\x9e\x8e\xb7\x01\x7f\xaa\xb1\x3e\x8b\xe1\x75\x04\x47\x12\xf1\x96\x43\x9c\x0a\xb3\xdf\xa5\x6b\xb7\x07\x9d\xef\x4e\x76\x25\x6b\x75\x9f\xf0\xb6\x42\x08\x29\x59\xc9\x08\x2b\x5c\x9d\x18\x84\x57\x62\x47\xfe\x05\xb3\x62\x3e\xf7\x0e\xc9\x1d\x7a\x17\xaf\xcc\xf9\x2f\x6d\xda\x17\xa0\x2f\x39\xf8\x2f\x4f\xa4\x6e\x11\x72\x55\x76\xc3\x71\x95\x43\x7e\xa8\x4e\x70\xec\xfb\x77\xaf\xd3\xcd\xb9\xd9\xd3\x8d\xcb\xcc\x99\x3d\x31\xb8\xbb\xdd\xbb\x4e\xfd\xa3\x14\x9b\xab\x19\xca\xb3\x7e\x9b\xeb\x78\xba\xb9\x8e\x57\xd5\x26\xbc\x50\x4d\x78\x17\x59\xe8\x4f\xab\x00\xf9\x40\x1b\x3c\x76\xc6\x75\x1b\x6e\x90\xa2\xd3\xb4\x2a\x22\x3e\xd8\xf3\x75\x1a\xa1\x67\xe8\x62\x86\xa2\x08\x44\x84\xae\x52\xe4\x61\x0c\xdf\x53\x74\x9c\xea\x4d\x0a\xd0\x5f\xf2\xcf\x4f\xf5\x4b\x78\x95\xa2\xe5\xc1\x76\xfe\xfa\xcb\x8c\xf2\x3a\x80\x2d\x6f\x91\xe3\xfb\x0d\x54\xe6\x90\x60\x71\xbd\x2f\xbe\xb4\x5b\xf0\xa5\x36\xfb\x32\x77\x44\x75\x34\xce\xab\x40\x3a\x1e\x87\x67\x81\xec\x65\xa2\xf8\x1a\x50\xf9\xe5\x63\x6a\x6f\x20\x05\x5f\xca\xb8\xad\xf0\xb8\x85\x66\xa8\x3f\x00\xc7\x81\x19\xfa\x19\x01\x93\x70\x9e\x2a\x29\x3e\x00\x3d\xbd\x4f\x52\x78\xa7\xea\x3a\xc5\x18\xde\x47\x6a\x71\xd3\xcb\x64\x4b\xa8\x7f\xca\x1a\xde\x54\x6b\xb8\xd5\x3e\x9d\x73\xbd\x75\x73\xa6\xd3\x74\x7d\xd0\x93\xf7\xa5\x4e\x19\xfe\x1d\xf8\xf2\x66\x4e\x32\x28\x08\xa1\x38\x8b\xe6\xdb\x2e\x6f\x2e\xe3\x76\x62\x70\xad\xfe\x6b\xf2\x3e\xa6\xe6\xdf\x5b\x09\xdf\x22\xe4\x0c\xa2\xb1\x64\x42\x09\xa2\x6c\x43\x27\x24\x33\xf4\x39\x5a\xad\xa4\xcc\xd2\x5c\x7a\xe2\x05\x9c\x23\x0e\xcf\xd0\xc5\x29\x72\x92\x68\xcc\x78\xc8\xfa\x8e\x32\xdd\x87\xa9\xa1\x6a\x4b\x7b\x11\x30\x86\x53\xe4\x44\x7c\x14\x05\x91\x5c\x86\x60\x39\x04\x0d\xa5\x4e\x97\x64\x7f\xf6\x20\xcd\x3f\x0b\x16\xb2\xe8\x86\x09\x07\xa6\x86\x73\x4e\x91\x33\x14\x71\x3a\x75\x20\x54\xcc\x51\x2c\xf9\xde\xe1\x27\x81\x42\xdc\x73\x7a\xf9\xde\x44\x53\x81\xf8\x8e\x25\xbd\x9f\xa5\x99\xea\xd3\x36\xc7\xd2\xb6\xe7\xf3\xec\xa9\x6f\x3d\xb3\xb2\x2f\x79\xb7\x04\x66\x06\xc8\x3c\xd0\x72\x00\xfb\x7a\xfc\x18\xe4\xdb\xbe\x52\xa7\x7c\x97\x3a\xe5\xbb\x74\x59\xe9\xf5\x39\x8b\x94\x0d\xa2\x45\x6d\x08\xfa\xa7\x39\x36\xac\x63\x2d\x74\x12\x16\x86\xf3\xf7\x79\x62\xd0\x22\x0d\xb5\xae\x28\xca\x2a\x8a\x74\x45\x91\x1b\x82\x24\x88\x92\xc8\xed\x63\x55\x9e\x50\x97\x41\x48\xa2\x5a\xa5\x1e\x50\x37\x00\x9a\x57\xba\x44\x43\xec\x06\x10\xbb\x21\xc4\x6e\x1f\x62\x45\x83\xc4\x39\xa4\x22\x43\x68\x32\x4a\x23\xeb\xbf\xd2\x87\xfd\xbc\x0f\xfb\xcb\x7d\xa8\xba\x80\xe9\x06\xa6\xfa\x96\x5b\x4a\x98\x1b\x42\x48\x98\x22\x8b\xb0\xa5\x9e\x8c\xf4\x02\x37\x20\x2b\x7a\x53\xea\x85\xab\xec\xcd\xfc\xe0\xbe\xe9\x4d\xbb\x20\x23\x48\x5f\x61\x85\x75\xb5\x48\xcd\x1c\x86\x75\xe5\x5c\x57\xce\x75\xe5\x7c\xa9\x47\xf3\x20\x94\x8c\x16\x24\xd4\x80\xe3\x95\x83\xd9\x81\x08\x06\xc0\x96\x68\xb1\xfa\xf4\xe7\x92\x1a\xf3\x6d\x00\x49\x55\x41\x79\x5d\x85\x49\x82\x62\xf3\xbb\x34\x44\x6b\xea\x52\x00\xf7\x61\xa8\xec\xe9\xf0\x8d\xbf\xd5\x59\x54\x60\x5f\x56\x61\x8f\xb5\x02\x30\x51\x0d\xd1\xff\x2a\x8d\xa2\x3f\x80\xb3\x00\xbe\xa5\xc8\xb9\x68\x5f\xfc\xf5\xd7\xe5\xfd\x02\xe1\x3f\x5a\x3d\x17\xfe\xfa\xeb\xaf\xbf\xfe\xc7\xf6\xfc\xdf\xfe\xfa\x2b\xb9\x74\x30\x86\x3b\x74\x15\x68\x87\xe6\x59\xbc\xfa\x90\x4b\xf8\x72\xa1\x14\x1f\x25\xdf\xb1\x71\x38\xd4\x6d\x9c\xf7\x75\xa1\xed\xdc\xe7\xd3\x18\x74\x4c\xd8\x77\xa6\xe4\x17\xc7\xb8\xe5\x2c\x9c\xca\xa4\xfe\xb1\xd9\x1a\x7b\x5b\x5b\x9c\xeb\xab\xf3\x93\xda\xf7\x47\x75\x1b\xb9\xfa\xbd\x6e\x0b\xbe\xab\x7e\xae\x97\x7e\xf1\xfb\x0a\xea\x1d\xaf\x2a\xa8\xf7\xd5\x01\xfb\x64\x63\x7a\x91\x22\x23\x80\x39\x9d\x30\x07\x4e\x02\xa4\x46\xcd\x48\x47\x93\x2e\x29\x7b\xf7\x25\x13\xc3\xc9\x07\x36\x64\xb7\x0e\xbc\x55\x4b\x45\x30\xcc\xdf\x1e\xff\x48\xe9\x58\x75\x6f\x7f\x00\xe7\x81\x59\xd5\xde\x0e\x14\x0d\x03\x5c\x5b\x7b\xbf\xa6\xab\xc3\xcc\xfe\xb4\x56\xb3\x19\xfa\xa8\x18\xa1\xb3\xb7\xe7\xe1\xd6\xc3\xce\xe3\xbd\x83\x47\x4a\x42\x89\x43\xaf\x27\xda\x9d\xbd\x03\xef\xf1\x81\x2f\xf0\x03\xfd\xf4\x70\xee\xa9\x59\x6a\x5e\x3f\xfc\x43\xaa\xe9\xc7\xda\x88\xe9\xaf\xda\xe2\x60\x0f\x76\x0f\xf6\x77\x8c\x1d\x62\x5e\x3f\x3e\x98\x7b\x18\xab\xd7\xf3\x3c\xea\xf5\x9e\x7e\xf2\x11\x27\xac\x8d\x76\x0f\xf6\xff\x48\x5b\x28\xcd\x2c\x97\x34\xb3\x5c\x30\x6e\x23\xd4\xd9\xdf\xfd\x03\x09\x82\xf6\xff\xe0\xad\x1d\xfc\xa0\xb3\xbf\xab\x6a\xd8\xc1\x0f\xf6\xd5\xbf\x1d\xa0\xb1\xcf\x88\x68\x21\x71\xd8\xf1\x7a\xbb\x7e\xfb\x31\x86\xa0\xe3\xa7\xad\x3d\xcf\xfb\x43\xb6\xd0\xce\x21\xeb\x79\x7e\xc7\xb6\x62\x38\xad\xcc\x9f\x27\x01\xec\xc1\x0c\xbd\x0b\xe0\x45\xa0\xb9\x14\x39\x6d\xc5\xc0\x4f\x02\xd8\x81\x55\x9b\x60\xf5\x2d\xb0\x4e\x6d\x0b\x6c\xa7\xb6\x05\xb6\x5b\xcb\x78\xb0\x57\xcb\x8f\xb0\x5f\x3b\xe6\x7d\x50\xbd\xb1\xb0\xf1\xb0\x76\x5b\xe0\xa3\xda\x01\xfb\xc7\xb5\xeb\xad\x3a\x5e\xfd\xba\xaa\x4e\xa7\xbe\x0b\xd7\xd9\x59\x2c\xd0\x0c\xbd\xcc\x1b\x5d\x6d\xf5\x0c\x7d\xb6\x7a\xe3\xdc\x7a\xff\xd3\x7a\xef\x5b\xef\xbf\xad\x79\xff\xdc\x7a\xef\x66\xef\x77\x61\x86\x5e\x17\x15\xeb\xff\xb5\x9c\x6f\x96\x58\x10\xb4\xa2\x65\x0d\x4d\x6e\x50\x23\xee\x83\x01\x56\xcb\x50\xb9\x01\x71\x1f\x1e\xfb\x5a\xee\x9d\xfa\x5b\xa2\xd9\xd4\xf7\x91\xcf\xe7\xb2\xa7\x9f\x77\xfc\x5d\xbf\xa3\x7d\x8d\x6e\xf0\xc5\xf6\x22\xd9\x2c\xc0\x0f\xbd\x4c\x1b\xa5\x09\x5a\x39\xd1\xeb\x41\x3a\x5c\x5b\x58\xc2\x8a\xb9\x69\x9c\x22\xf1\x80\xcd\xbd\x1e\x6f\xa1\x2b\x69\x9e\x71\x0b\xc9\x96\xd3\x70\x30\xf6\x95\x84\xdd\x8e\x8c\x63\x7e\xa1\x27\xb0\x03\x3c\x52\x3a\x3e\x84\x09\xd6\x27\x28\x4a\x17\x96\x45\xdb\x56\xe1\x0d\x68\x36\xb7\x6c\x77\x40\xee\x1c\x30\xfe\x00\xcb\xbf\x65\x37\x4c\x4d\x2b\xe3\x12\x50\xda\xef\x20\x01\x0e\x7b\xb8\x9d\x3d\x75\x3c\x0f\xb7\xf2\xb7\x9e\x67\x53\x40\xe9\xff\x1f\xd9\xbd\x74\xca\xd9\xcd\xd3\xf1\x04\x1d\xc5\x89\xf7\xd5\x66\x79\xb5\x66\x75\x6a\xcd\xda\xa9\x35\x6b\xb7\xd6\xac\xbd\x5a\xb3\xf6\x6b\xcd\x3a\xa8\x35\xeb\x61\xad\x59\x8f\xea\xad\x7a\x5c\xbf\x5e\xae\xe3\x2d\x35\xb3\x63\x3b\x11\xad\x59\x84\x98\x52\x0a\xf4\xb0\x6b\x99\xad\x6f\x58\xe2\x6a\x46\xe9\xd7\x4a\x67\xd3\x02\x5b\xbf\x97\x05\xb8\x52\x95\x3a\x7b\x07\x1d\xfd\x9a\x55\x60\x0b\xe5\x5d\xc9\x59\xd1\xea\x78\xde\x1f\xbc\xb5\xf7\x87\x6c\x31\x97\xb6\x10\x73\x83\x5e\xc7\xb7\x3d\x4e\x91\x4d\x8d\x20\x8a\x38\x22\x08\xc7\x70\x4f\x13\x9f\xb7\x53\x8a\x84\x16\xdc\xc2\x0e\x44\x1e\x27\x4a\x2f\xec\x68\xf5\x90\x26\x16\xb6\x81\xcd\xe6\x8f\x0e\xf6\x98\x66\xf4\x87\x9d\xc7\x9d\x03\xfb\x6e\x97\x80\x56\x96\xf0\xa3\x04\x3a\xbb\x46\xee\x58\xde\x3a\x5a\x8f\xb5\xee\x6d\x4b\xe4\x9c\x8d\xe2\x74\xdc\xd7\xb7\xee\x06\xac\xc1\x26\x53\x79\xe7\x60\x7f\x86\x4e\x13\xb8\x91\xc8\xf9\x2c\x62\x3e\x6c\xbc\x3a\x3b\x7d\x74\xe0\x75\x1a\x83\x58\x4c\xa8\x74\x30\x4c\x6a\x76\xfb\xd8\x26\xe0\x03\x9a\xc6\x70\xff\x41\x09\x9a\x2d\x0f\xc3\x59\xfe\x70\x9e\x3f\xbc\x54\x0f\xef\x95\x25\xf5\x9d\x81\xa0\x5a\x3c\xda\x5e\x82\xbe\x8d\x8d\x6a\x71\xc5\x31\xde\x40\xee\xb4\xea\x78\xba\xa9\xc8\xd5\xf0\x58\x5b\x3f\xa7\x46\xa6\x7d\xa9\x46\x1e\x66\xb3\x43\xd6\x27\xff\x29\xda\xea\xc0\x96\x57\xcf\xb6\x69\x5e\x77\xea\x59\x79\x4e\xd1\x96\xa7\xa1\x6b\x7c\x6a\xde\xab\x45\x19\x61\x9d\xe6\xb0\x10\xe6\xc1\x40\xf5\x41\xea\x06\x18\x82\xa1\xcf\x21\x98\xf8\x42\xc9\x6f\x66\x7b\x21\xab\xcb\xc3\x55\x37\x1f\x3c\x6d\xa7\x04\x91\xee\x90\x77\x63\xa6\x48\x09\xe3\xc9\x74\xcc\x24\x6b\xd0\x7e\x3f\xe2\x43\x1d\xd1\xa6\x0f\x23\x29\xf3\xd9\x17\x6e\xd0\x9b\x4a\xd3\xdf\x37\xfa\x2a\x4f\x5f\x95\x7d\x6b\x00\x92\x06\x15\x4c\x27\x9e\x88\x04\xeb\xdb\x9e\xa9\x49\x95\xaf\xde\x27\xb0\xa3\xd8\xea\x16\xbd\x4e\xa0\xd3\x81\x8e\xfa\xa1\xd9\x6d\x27\x5f\xe5\x2c\xbf\xe7\x32\x4f\x76\xea\x3c\x39\xa4\x1b\x37\x58\xa6\xd5\x0e\x08\x4d\x02\x96\x8f\x66\x20\x4f\xb5\x1f\x82\x7e\xd6\x76\x50\xa2\xed\xa0\x9b\x42\xe1\x3a\xf3\x67\x68\x30\x86\xbe\xd2\x55\xe9\x19\x06\xfa\xb1\x78\x21\x31\xdc\xf8\x21\x24\xe6\x45\x38\x06\x8a\x81\x7e\x36\xbf\xde\x24\x6a\x46\x7f\xc1\x90\x62\x08\x42\x35\x22\xa7\xe6\x43\xa2\x03\x9b\x5e\x2b\x13\xc5\xf2\x9f\xda\x2d\x54\xc6\x23\xe2\x44\x8f\x19\x76\xb7\x7b\xd1\xd8\x57\xad\xab\xb8\x7d\xee\xe8\xc6\x1d\xa1\xdb\xcd\xbd\x71\x6c\x57\x37\x1e\xa0\xc9\xb8\x3a\x07\xaf\x37\x17\x3f\xa2\x9b\xdd\x88\x9b\x3f\x5f\x55\x14\x89\x6a\xbe\x1a\xcb\x8d\x58\x15\x7e\x5a\x67\x3f\x31\x2a\xfb\x0c\xbd\x19\xeb\x98\x85\xd2\x64\xfb\xeb\xaf\x9e\x71\xed\xb8\x74\x1f\x63\xec\x06\xbd\x53\x9d\x59\x5d\xb8\x81\x71\x89\xe5\xf1\x9b\xc2\x0d\x74\x8c\x03\xf6\x4f\x91\xf3\xc0\x81\xe3\xe2\x38\x8f\xd9\x3b\x1e\xc3\xc7\x31\x7c\x40\x1c\xee\xe9\xbe\x7f\x2c\x81\x4a\x5f\xe1\x09\xdf\x2b\xc6\x5f\x60\x3d\x1e\xd7\xb1\x7a\x67\xb9\x34\x2d\x52\x35\xd9\xdd\xa5\x44\x64\xf9\x92\x67\xee\xc0\xa7\xba\x7c\x75\xf1\xbb\xdf\xf6\x1f\x56\x3e\xec\xf8\x15\x33\xab\xfc\xb0\xeb\x57\xcc\xb3\xf2\x83\xe7\x57\xb6\x00\xcb\x0f\xc5\x82\xfa\x79\x5c\x5b\x51\x7f\x8e\xeb\x72\xe6\xd9\xd8\x1a\xaa\xd3\xea\x48\x76\xea\x43\xf9\xbd\xfa\xfd\x61\xed\xf3\x87\xea\xe7\x83\xda\xe7\x2f\x56\xbf\xbd\x91\xd6\x5e\x5c\x27\xbb\xd5\x83\xfb\xca\x8a\xd1\x9a\x23\x76\x95\xbd\xf6\x23\x82\xaf\x63\x3d\x13\xaa\x57\xe6\x9e\xd7\xcc\x93\x31\x3c\x43\x17\x3f\xc6\xc8\xa1\x63\x26\x64\x43\xff\x6d\xcf\xa8\xe0\x11\x1f\x3a\xf8\x12\xab\xcf\xef\xc6\x48\x13\x50\xb5\xfb\x4e\x6c\x4c\x7f\x32\xc4\xd5\xd2\x22\xfa\x3a\xaa\x51\x6b\xcd\x14\x4e\x7d\x2f\x53\x8f\x17\x15\x29\xf5\xb1\x4a\x44\xda\x87\xfc\xda\x85\x80\x55\xa7\xd7\x9b\x2a\xe4\xfb\x31\x38\x23\xc1\x06\x0e\x3c\xf8\x1f\x57\xf4\x86\x9a\xeb\x4f\xfc\x07\x91\x2b\x59\x22\x11\xe2\x84\xe3\xe2\xb0\xe3\x83\xbf\x92\x07\x43\x70\x1c\x8c\x75\xce\x5d\x0b\xeb\xb3\x2a\xd6\xef\xac\xbe\x03\x6a\x79\x4d\xdf\x99\x5e\xac\xfb\x4e\x07\x7d\xd5\x33\x9a\x3c\xe8\xf7\x91\xf3\x3d\x18\x53\x7e\x6d\xf5\x18\xd7\xbd\x05\x1f\xd5\x84\x4a\xfa\x60\x72\x99\x9d\x54\xba\xe1\x73\x45\xd2\x2a\x95\x89\xdb\x11\x3c\x93\x3e\x9c\x80\xf6\xc5\x6f\xf7\xf3\x51\x92\xec\x56\xb6\x79\x3c\x13\x74\x6a\xd5\x25\x5a\x8e\xaf\x7e\x62\x98\xa1\x9b\x02\x76\xd6\xee\x78\x9e\x81\x5a\x1e\xbd\x9f\xd5\x2e\x18\x14\xa5\x02\xc9\x1b\x81\xe4\xed\x38\x95\xe3\x88\xb3\x76\xc4\x07\x71\x23\x88\x45\x9f\x89\xb6\xe7\x60\xd0\x4d\x36\x35\xcf\x90\x2c\x8a\x0d\x68\x63\x40\xdb\xba\x44\x38\xa2\x42\x36\x26\xa2\xbd\xa3\x2b\x3f\xc1\xf0\x6e\x8c\x9c\xb3\x38\x15\x21\x73\xaa\x54\xbc\xde\xdc\x03\x25\x77\x2a\x8a\xb4\x77\x57\xe3\x6d\x4c\x82\x0c\xb9\xa6\xe2\xf9\xb8\x4e\x7c\x32\xc9\x49\x16\xd1\x70\x24\xdb\x5e\x43\x77\xdc\x24\x95\x6a\xa9\x85\x19\xa2\x7d\x70\xd2\x84\x89\x76\xc2\xc6\x2c\x94\x0e\x38\x11\x8f\x64\x44\xc7\xc5\xd7\xf6\x24\xfe\xd9\xfe\x05\xc8\x8c\x05\xd7\x91\xfc\x05\x54\x46\x48\x18\x8f\x63\xe1\x80\xf3\x6f\x61\x18\x56\x86\xee\x1f\xa4\xd8\x37\x37\x23\xc8\xfb\xab\x9a\x33\x6c\x0f\x68\x9f\xf5\x2b\x63\x93\xb0\x30\xe6\x7d\x2a\xee\x1c\x0c\x1f\x29\x3a\xa1\xe8\x54\x47\x36\x61\x0c\x71\x1f\x39\xcf\xb5\x23\xbf\x11\xdc\x35\xe4\x28\x4a\x1a\x63\x1a\xb0\xb1\x55\xb5\xd3\xd2\xa3\x51\x19\x90\x6f\xb6\xbe\xfa\x6f\x0f\x32\x37\x7e\xf2\x80\xb3\x59\xcf\xec\x0b\x10\xa7\xf5\x3a\x42\xef\x6b\xaa\xe8\x4b\x6b\x20\xcf\x53\xc4\xdd\xe0\xbd\x1b\xbc\xad\x84\x0c\xfc\x4d\x26\xfb\x46\xd1\xd2\xcc\x2c\xf4\x9a\x97\x66\xcf\xd6\xcb\xf6\x1d\x06\x52\xd5\x38\xc2\xda\x35\xb5\x9a\x33\x03\x36\x1e\xb7\x93\x31\x4d\x46\xed\x78\x99\x37\x4d\x33\x75\x77\xf8\xe6\x74\x0b\x6c\x22\xb7\x4f\xf9\x50\x75\x6c\x85\x60\xbb\xbb\x9c\x96\xf8\x0d\x4a\xd6\xd1\xd1\xd7\x84\x58\xbd\xfb\xbe\x3a\x59\x67\xfd\xda\x29\xd6\x1f\xd5\xef\xcf\x2d\xa1\x1e\x0d\xb9\x62\xcd\x41\x3b\x64\x5c\x31\x43\x51\xa5\xe1\x81\xdb\xbe\xea\xb9\xd3\x1a\x17\xbc\x5d\x21\x9f\x47\xaa\xcd\x0e\xe8\xfb\x9f\x38\x9c\xf6\x6d\xf8\x27\x9b\x95\xa0\xe7\x74\xa3\x23\xf6\xdd\x6a\xc3\x81\xdb\xbb\x6c\x99\xea\xbe\x25\xe7\x73\x7d\x16\x54\x2b\xf0\x3b\x26\x15\xbb\xbe\x33\xa8\xd0\xe5\x2b\x4e\x56\xbb\x1d\xab\x18\xaa\xe2\x05\xe2\x6e\x30\xc8\x9c\x41\xb2\xea\x05\xca\x39\x2e\x98\xc0\x3a\x2f\x90\x76\xa9\x2e\x6a\x27\x93\xed\xea\x4f\x11\xd7\x09\x2b\x4a\xbf\x69\xb5\x93\x5f\xf7\x95\xe8\x98\xa6\x32\xeb\xe3\x4f\x34\xef\xeb\x6f\xfd\x0a\x33\xfc\xb9\x3c\x38\xd7\xec\xae\x1f\xcf\x78\x31\x3a\x2f\x2b\xa3\xc3\xc3\x55\x05\xd2\xe9\x1a\x70\x11\xae\xd1\x10\xc2\x78\xdc\x08\xe3\x71\x9b\xa6\x32\x2e\x85\xef\x6f\xca\x68\xbe\x71\xee\x1b\xf9\x35\x43\xe7\x7d\xd8\xd2\x21\x28\x05\x83\xea\x5d\xd7\xb5\x82\xb1\x3a\x21\x6d\x2c\x9d\x0a\x16\xe7\xff\xf9\xbf\x73\x59\x57\x61\x74\x19\x6e\xd4\xca\x58\xb8\x31\x5a\x2c\x5d\xee\xd8\x49\x9c\x26\x4c\xcf\xb4\x65\x5d\x86\xae\x01\x1f\x33\x7a\xc3\x96\xc1\xc3\x70\xa3\x4a\x18\x87\x1b\xcd\x9a\x28\xdc\x38\xe7\x06\xe1\xc6\x19\x1b\x2c\x93\x1a\x8c\xd3\x15\x6d\x4a\xfe\x37\x32\xcb\xa7\x25\x66\xe1\x7f\x93\x51\x3e\xfd\x36\xa3\x8c\x37\x77\x67\x7f\x33\xa3\xdc\x6c\x1e\xab\xed\x70\x63\x98\xc8\x64\x33\x23\x8c\x36\x23\x1f\x86\x1b\x4d\xcc\x69\xb8\xd9\x6e\x99\x85\xff\x6b\xbd\xb5\x35\x1b\xeb\xa1\x45\xca\x5d\x58\xf7\x6d\xec\xd4\x5d\x1b\xb7\xeb\xd8\xb1\x4f\x25\xab\xe8\x80\x35\x3d\xbd\xb4\xf7\xc3\x62\x5d\xd0\x87\xe1\xfa\x03\x48\x23\xe4\x69\x7b\x39\xc6\xab\x54\xd3\x90\x8e\x99\x52\xc2\xbe\x37\x26\x31\x97\xa3\x1c\x35\x12\x44\x82\x0d\x27\xe2\x99\x81\x68\x8f\x74\x3e\x9f\x55\xb3\x62\x2a\xd8\x4d\x5b\x03\x35\xfa\xed\xc1\x98\xdd\x66\xcb\xb6\x61\xd8\xa7\x37\x45\x91\xd3\x9b\x62\x89\x17\x22\x9e\x39\x6b\xb5\x0d\xca\x87\x63\xd6\x1e\xb3\x81\x54\xbf\x76\x6f\x1b\x61\x2a\x92\x58\xb4\xa7\x71\x64\x10\x6b\xed\xe3\x32\x17\xb1\x25\x29\x86\x54\xd5\x65\x75\x52\x8a\xae\xbb\x52\xb6\x8d\xd9\xd9\xca\x42\x83\x5e\xdd\xc0\x49\xa6\xcc\xfc\xc6\xde\x96\xf3\x9a\xf2\x54\x69\xaf\x55\x2e\x72\x9e\xb3\x40\x58\xef\x73\x6e\x72\xde\x52\x11\x8e\x9c\x2a\x4b\x39\x47\x53\x11\x8d\x9d\x2a\x5f\x39\x6f\x69\x5e\x78\xbf\xa8\x2b\xe5\xcc\xa9\x1a\xf9\xce\xeb\x74\x9c\xc3\x3d\x2c\xf0\xa5\xc3\x34\x91\x4e\xd5\x7f\xee\x9c\xb1\xa9\x64\x93\x80\x09\xa7\xea\x48\x77\x4e\x43\x19\x97\xaf\x0b\x7f\xba\x73\x12\xdf\x64\xf0\x55\x8e\x76\x9e\xb1\xd0\x7c\xb0\xb6\xc8\x04\x5e\xee\x7f\xae\x99\x75\x1d\x2b\x5c\xfd\xa7\x59\x41\x1b\x44\xbf\xe6\x85\x4b\x33\xa0\x67\x37\x3a\x4f\x45\x65\x8a\x5c\x57\x27\x59\x2c\xc0\x99\xd0\x5b\x73\xa4\xcc\x81\x7a\xfc\xe9\x51\x58\xf1\x61\xbd\xee\xe9\x46\x9e\xd4\x39\x7f\x12\xf7\xe9\xb8\xa1\x4c\x9c\x46\x32\x52\xad\xc8\xac\xa7\x7e\x94\x4c\xc7\xf4\xce\x51\x0b\x50\x1c\x5e\xaf\x9a\x34\xba\x68\xbb\x1f\xd1\x71\x3c\x6c\xd8\x3f\xb2\x1e\x2b\xa7\xfb\x72\xa9\xd0\xa4\xd4\x5a\x0f\x50\x9f\xab\xe5\xd2\x12\x8e\xe3\x84\x35\x26\xf9\x12\xa7\x46\x65\x18\xa2\xed\x1b\x7b\x35\xb9\x75\x56\x4f\x2d\x85\xd9\x64\x37\xce\xf1\x7e\x1a\xc3\x34\x04\x1d\x67\x72\x53\x01\xd6\x56\x7c\x43\xd1\x49\x23\x5e\x52\xa2\xe5\x54\x05\x70\x2a\xdb\x7b\x0d\x25\x64\xae\xd2\x44\x46\x83\xbb\xbc\x6d\xb5\x79\x3b\x43\x27\x6a\x4c\x3d\x55\x58\x3f\x75\xf2\xe1\x5e\x43\xe7\x20\x8e\xe5\xea\x1e\x98\x8c\xdb\x3b\x8d\xfa\x0a\x9b\xa4\x61\xc8\x92\x44\x2d\xeb\x1b\x3a\xe6\x29\xe5\xa1\x31\x46\xab\xeb\x75\x05\xe5\x54\x44\x93\xc2\xb8\x1d\x86\x68\x52\x41\x71\xc6\x64\xe3\x19\x95\xec\xc1\x79\x34\x61\xd6\xa2\xbd\xbe\xc3\x69\x78\xdd\x17\xf1\xd4\xe6\xb2\x9c\xe3\xfd\x1c\xdc\x70\x5d\x38\x8e\xa6\x0e\x38\x82\x85\x12\x79\xfa\x1a\x07\x0f\x17\x2c\x39\x8d\x93\x48\x5f\xf1\x08\xce\x20\xba\xdd\xc0\x5d\xba\xa2\xdc\xbe\xfb\x05\x3d\x25\x29\x96\x77\x78\xf3\xca\x7e\xb5\x59\x87\x7b\xba\x59\xab\x78\xb5\x59\x2f\x38\x5d\x9e\xe4\x22\x9e\x25\xcb\xf3\xfb\xfb\x66\x3c\x1f\x42\x3b\x0a\xe4\xa9\xd4\x27\x6f\x9e\xa1\x0b\xcb\x5a\x76\x40\x6f\x33\x38\xac\x1f\x49\x47\x0d\x9d\x82\xfc\x5b\xbe\x02\x61\xb1\xc5\xb1\xc2\xa2\xfa\xb1\x8c\xcf\x0c\xde\xbb\xc1\xbb\x9e\xf0\xff\x26\xd6\x19\x8a\xb6\x15\x69\x57\x66\x3b\xc3\xaa\xe3\x03\x33\x77\x07\xd4\xbc\x59\x5f\xac\xb6\xea\x90\x02\x51\x86\x14\x08\xdb\x79\xf8\x54\xc2\x96\x32\x3a\x9b\x4d\xd1\x73\x88\xe3\xab\xe7\xf9\x5c\xf4\xca\x77\xff\xe1\xf8\xce\x96\xfe\x43\x74\x77\x99\xd8\x2b\x37\xf8\x52\xa9\xf0\x7c\x73\x85\x20\xc8\x4b\x86\xee\x68\xcd\x2f\x6b\xa2\x19\xf2\xba\xbc\xa5\xda\xab\x61\x0d\x56\x24\xe5\x0c\x7d\xbc\x81\x5b\x9d\x1a\x58\x37\xd5\x76\x06\x87\x99\xe7\xb4\x9a\x45\xa4\xf1\x7f\xce\xb4\x2a\xd3\x99\xb8\x41\x6c\xf6\xb4\xdc\xe0\x91\xde\xd5\xa2\x2e\xdd\xd5\xee\x06\xea\x86\x3b\x15\x15\xef\xff\xb3\xcb\xd4\x60\xbb\xfa\x5d\xe7\xc2\x77\x36\x59\x4a\x7a\x39\x33\xa2\x56\xac\xb4\x87\x7e\xb1\x84\xc9\xdf\x59\x3d\xd8\x7f\x41\x44\x5b\xdb\x07\x9b\xad\xad\x37\xab\x4c\x96\x42\xb9\xcc\xc2\x8a\x6b\xaa\xe5\x94\xf1\x7e\xc4\x87\x4b\xda\x1a\xbb\x9d\xea\xed\x59\xfb\xb8\x61\x55\x32\x0e\xfb\xcb\x5c\x52\x8e\xc3\x68\xbb\xfe\x4e\x75\xea\x09\x9d\x30\xbf\x61\x56\x3f\xbd\xbf\x10\x4c\x96\x3a\xe4\x57\x38\x8e\xfa\x7d\xc1\x92\xa4\x82\x86\xbe\x58\x32\x5c\x3f\x87\x15\x3f\xd7\xc4\xf8\xb9\xde\x9b\xdd\xdc\x1f\x5d\xdb\x67\x98\x35\x63\xb6\x6d\x9e\xf4\x32\x3d\x4e\x13\xc9\x44\xe3\x4c\xa7\x7f\x35\x35\x59\xfb\xf8\x3a\x15\x87\xbd\x53\xe2\x38\xd9\x1d\xfd\xa4\x22\xef\xca\x56\x0d\x62\x31\xc9\xec\xfe\x8a\xae\x5a\xb6\x31\x8c\xc7\xed\x64\x52\x71\x52\x9a\xfe\x72\x96\xba\x28\x03\xed\x78\x75\xc6\x56\x3d\x80\xaa\xb0\xff\xb9\x8a\x4d\xbb\x7f\xa7\xea\xdc\xeb\x8a\x38\x31\x01\x98\x01\xed\x0f\x99\x03\x5b\x5e\xa5\xc7\xd6\x47\x3e\x98\x02\xb9\xfa\xe4\xac\x0c\x83\xc8\x60\xf2\xed\xb9\xd5\xc1\x0f\x19\x50\xe6\xe9\xd0\x3e\xc7\x05\x52\x54\xfe\x18\x67\x7b\xbb\x79\xdc\x2c\x53\x06\xea\x78\x00\x27\x69\x16\x2b\x57\x34\xfc\x77\x8c\x37\xc1\x68\x7f\xc9\x74\x4b\x98\x54\xeb\xe9\xf2\x44\xea\x47\x09\x0d\xc6\x7a\x26\x21\x69\x0b\x81\x2a\x3b\xb1\x35\xec\xc4\xfe\x95\xec\xf4\x8e\x31\x51\x0e\x6a\xd4\x5f\x3d\xa8\xba\x8b\x9e\x85\xc0\x73\x7e\xaa\xee\x9f\x2d\x79\x1a\xb2\xe9\x93\xdc\x94\xd3\xc7\x9e\x36\xff\x75\xb2\x3f\x4e\x65\xf4\xdb\xf3\xe0\x2a\x51\x62\xe5\x73\xd1\xcd\x4a\x0e\xb8\xf4\x3d\xce\x02\xb9\xbf\xc1\xba\x29\xff\x89\x89\x44\xeb\x87\xdc\xc4\x1d\xa9\x15\xf9\x5f\xd4\x80\x27\x82\xf2\x70\xf4\x9b\x0d\x10\x2e\x7d\xbb\x6a\xa1\xf8\x4f\x56\x9d\x46\xe3\xbe\xb2\x16\x7e\xbf\xf6\x93\x7f\x71\xed\x1f\x13\x26\x7e\xbf\xf6\xd3\x7f\x5d\xed\x2f\xe2\x6c\x4c\x7f\xbf\xf6\x47\xff\xba\xda\x3f\xb0\x9b\xe8\x6f\x55\x1e\x3c\xfb\xd7\x55\xfe\x77\x1b\x1e\x7c\xb5\xdd\x1f\xda\x1a\xa7\xe7\x6e\x30\x05\x7b\x86\x57\x16\xc8\x98\x0f\xa2\x61\x8e\xfe\xb6\x20\x66\xda\xde\x2b\x37\x80\x69\x78\xad\x28\xe7\x7d\x07\x9c\x7f\x1b\x3c\x1c\x3c\x1c\x3c\x2e\x3e\x0e\x62\x2e\xdb\x03\x3a\x89\xc6\x4a\x79\x9c\xc4\x3c\x4e\xa6\x34\x64\x65\x03\xef\xca\xda\xb8\x45\xdc\xe5\xf2\x51\x89\xd7\xb6\xeb\xd2\x1c\x14\x22\xdc\x0d\x3f\xe6\x31\x36\xb4\x88\xb1\x29\xa2\x5c\x66\xe8\xcf\x31\xfc\x0c\xc1\xd8\x48\x1f\xea\x57\x82\xad\xdc\x2a\xfb\xd3\x28\xcf\xaf\x8c\xea\xfc\x54\x07\x84\x05\xaf\x96\x03\x79\x76\x7d\x93\xc7\x47\xd8\x69\x65\xa6\xdb\xc0\x40\x02\x85\xad\xad\xb4\xb8\x81\xbb\xba\xcc\xbd\x58\x0a\xb4\x39\xa7\xe6\x34\xf9\x02\x71\x37\xfc\x56\xbf\xb8\x7b\x86\x82\x1b\xd5\x80\x5d\xa0\x2e\xad\xdf\x0e\x28\x8d\x4d\x14\x7e\xc5\x2e\xfd\x6e\x4e\xad\x9e\x83\xbe\xb9\xea\xd6\x44\x58\xbf\x82\x35\x8a\x7e\xa0\x46\x30\x1f\x84\x57\x7d\xeb\xfd\x8e\x83\xe1\xdb\x4d\x7e\x4e\xae\x1d\x50\xd1\x2e\x82\xfe\x4a\xd6\x7b\x9e\x9d\xa2\x33\x9c\xf1\x69\x6c\x4e\xc1\xdc\x51\x78\xc9\x34\x27\xf6\xe1\x3e\xfc\xac\x4f\xdf\x98\xe5\x10\xee\x90\xdc\x86\x9b\x6d\x9d\x22\x09\x6e\x11\xdb\x86\x64\x1b\xcc\xbd\xa5\x97\x18\xd7\x1c\xe9\x1a\xe3\x37\x06\xb7\x48\x6c\xc3\xb1\xd4\x99\x5d\xb8\x1b\x46\xea\xcf\x17\x5c\x8f\x94\xac\x40\xdf\x4a\xa4\x3a\x0a\xde\x8e\xc1\x71\x2a\xf0\x0f\xd7\x0f\xc1\xf8\xc6\xce\x6b\x51\x5d\xfb\x8e\x0b\xae\xe7\xf4\x26\x50\xcb\xb5\xfe\xa7\x2d\xe3\xe1\x70\xcc\xd4\xaa\xdf\x9e\xf4\xf3\x97\x63\xed\x7e\x2c\xa2\x19\x26\x41\x7b\xbf\x31\x95\xed\xdd\xc6\x34\x68\xef\xd6\x63\x26\x82\x58\xca\x78\xe2\x80\xd3\x99\xde\x36\x92\x78\x1c\xf5\x1b\x62\x18\x50\xe4\x41\xc3\xfc\xe7\x76\x76\xf6\x71\x39\x4c\xd7\x96\x30\xa8\xf9\xcb\x6c\xdb\x3f\x23\x25\x10\x94\xf7\xf3\xbd\xfb\x8a\x6a\x3d\x66\x42\x4e\x28\xa7\xc3\x72\x00\xa3\x7a\x69\x4e\x6f\x4a\x35\xe1\x74\x1b\x71\x0c\xdf\xb7\xf1\x2a\x65\xaf\xcc\x6b\xb6\xe3\x57\x47\x31\x53\x71\x6a\x7d\xbd\x24\xf6\x22\x3e\x8e\xb8\xe5\x6a\x5c\x6e\xd1\x9a\x6d\xb2\x5a\x54\x02\x67\xb3\x8a\x56\xcd\x66\x0d\x3b\xf2\x41\xe9\x37\x46\xcd\xa9\x68\x3a\x2f\x6b\x7e\xa4\xba\xb9\xf5\xbe\xf6\xbd\xbe\x89\xfa\xa3\xf6\xbd\xbe\x7f\xf5\xd6\xe6\xa6\xaf\x1c\x45\xa2\xea\x5a\x78\x12\x56\x12\xea\x54\x26\x6b\xf6\x74\xbe\x0d\x3a\xf6\x2d\x7c\x8e\x5b\xce\x38\x0a\x1e\x04\x71\x2c\x13\x29\xe8\xb4\xbd\xe7\x7a\xae\xd7\xa6\xe3\xe9\x88\xba\x07\xed\x7e\x94\xc8\x07\x61\x92\x94\x00\xee\x24\xe2\x6e\xa8\x14\xee\x97\xa1\x1a\xcc\xf3\x6d\xe0\x06\x87\x96\xcc\x74\xc6\x92\x78\xc2\xda\x7b\xee\x43\xd7\xd3\x25\xed\xd7\x65\xe1\x1f\xb5\xc2\x6c\x3c\x69\xf7\xa9\x64\xd3\x28\xbc\x66\x42\x17\xac\xbe\x32\xc5\xde\x87\x75\x1d\xd8\xa8\xbb\xdf\xd5\x5a\xf4\x18\x84\x1b\xde\xa8\x3f\x63\xdc\x2d\xb2\xbc\xde\x8b\xe2\x49\x2e\x65\x7e\xa5\x96\xf4\x2d\x5e\x06\xab\x5e\x86\xd6\xcb\xa5\xbe\xfd\x16\x22\xe1\x86\x1f\xeb\x2b\x66\x36\xa5\xd4\x54\x2d\x64\xe3\xeb\x50\xbb\x0b\x4a\x11\xb5\xe3\xeb\xb4\xfa\x0d\x51\xb0\xb5\xf9\x2d\x17\x95\xcf\xbc\xf8\x1c\x0d\xd0\x8e\x76\x1a\x29\x92\xf2\xb2\xb5\x32\x65\x3a\xa5\x6d\x93\x51\x5c\x49\xfa\x25\x04\x41\x81\x40\x1f\x2d\x5d\x8f\xb5\x32\xfd\x16\x75\xe4\xa1\x95\xb9\xa4\x78\x19\xe8\xab\x3b\x8a\x65\xd7\xa4\x4c\x7f\x1e\xc2\xbb\x10\x5e\x84\xf0\x29\x84\xaf\x21\xfc\x19\x02\x8f\x41\xc4\x20\x63\xf2\x99\x23\xe7\x9c\x26\xd7\x0e\x06\x16\xaf\x4b\x8e\x24\x63\x54\xe4\x47\xca\xd2\x28\xd5\xef\x02\xd2\x6c\x7d\xa2\x97\xac\xf0\x14\x2a\x37\x94\xe9\x6b\x3a\xd0\x17\xa1\x6f\x1b\x2b\x4e\x0a\x09\x74\x4f\xad\x3b\x6d\x98\x75\x97\x15\x25\x0c\x3e\x8a\x55\x17\x07\xc5\xd3\x44\x52\xc9\x1c\x90\x18\xfe\xe3\xa3\x70\x39\xbd\x89\x86\x54\xc6\xc2\x4d\x13\x26\x8e\x86\x8c\xcb\xf2\x12\x9b\x73\x11\xf5\xb5\x33\xaa\xd9\x5c\x89\x6d\x44\x93\x51\x1e\x2e\x24\xf1\xea\xe3\x4e\x5d\xe1\x86\x52\x8c\xff\x64\x77\xf3\xb9\x70\x27\x4c\xd2\xec\x31\x19\x45\x03\xa9\x9f\x3b\x87\x6a\x7d\x4e\xa5\x8c\xf9\x7c\xce\x5d\x49\xc5\x90\x49\x7d\x44\x38\x9e\xf1\x71\x4c\xfb\xf3\x39\x12\xee\x54\xe8\x6b\x76\x9f\x19\x5e\x40\x58\x2b\x27\x23\xc1\x06\x20\x88\xea\x1a\xe0\xe4\x8c\x21\xa9\x8f\x98\xa0\x14\xf1\x66\x53\xb8\xc1\xcc\xb0\x8b\xbe\xbc\x32\x08\xcc\x8f\x40\xff\x48\x5c\x6a\x7e\x26\x2e\xed\x15\xee\x6c\x3f\xf3\x9f\xcb\x85\x39\x33\x01\xe1\x91\xbf\x32\x2e\x4d\x5f\x9a\xc9\x41\x57\x2c\x15\xdc\x43\xdf\x5c\x8a\xb9\xef\xeb\xab\x34\x43\x4f\xfd\xeb\x29\x14\x69\x9c\x25\x31\xa7\xfa\x61\x77\x01\x61\x4c\xc6\x1c\xe2\x98\x50\x0e\x91\x7e\xe9\x2d\x60\xa0\x1f\xda\x3b\x0b\x08\x62\x32\x88\x21\x89\x49\x10\xc3\x78\x1d\x4b\xdd\xd3\x67\x7e\x12\x03\x3d\xd6\x11\xd9\x2f\xfd\x28\x06\xfa\x4a\xfd\x0d\x63\x5f\x00\xfd\xee\xbf\xd1\xa9\xbd\xe8\x63\x3f\xcb\xa8\x45\xbf\xf9\x8e\x03\xa1\xf4\x4f\x80\xee\xe8\x23\xc1\xc7\x3e\x87\xf0\x85\xc2\x12\x5c\xfb\x27\x10\x8c\x15\xaa\xe0\xa9\x56\x55\xf4\xc7\xe0\xce\xbf\xcf\x8a\xe9\x9f\x1a\xea\xa7\xfa\xf3\x54\xe7\xe8\x7a\xa9\x0b\xe8\xb0\x7d\x08\xce\xfd\xec\xea\xf9\xac\x3d\xdb\x31\xb9\xa7\x67\xfe\x33\xa6\xa6\x1e\xd0\x8f\xf9\xd3\x8d\x7f\x4f\x23\x4d\xf4\x95\xfe\x1b\xeb\xbf\x3f\xf4\xdf\x3b\xfd\xf7\xa7\xce\xb7\x97\x14\x45\x3f\xe7\x4f\x41\xa8\x2b\x3c\xcd\x7f\x9b\x9a\x27\x31\xb9\x0f\x3e\xf8\x51\xbc\x80\x51\xfc\x1b\xc9\x39\x3d\x68\x67\x49\x36\x87\x31\x61\x1c\xa6\x8a\xd0\xa1\x6a\xd0\x77\x8d\xfe\x56\xfd\xfd\xa0\xfe\x9c\xa9\x3f\xe7\xea\xcf\x4b\x4d\xd2\x4c\xb7\xed\x60\x01\x77\xfa\x61\x67\x01\xb7\xf9\x90\x1e\xc7\xeb\xf3\xe6\x1f\xd8\x79\xf3\xaf\x73\x6e\x38\xca\x91\x9d\xc5\x1b\xee\xc5\x43\x1c\x49\x2d\x31\xae\x62\xb2\x32\xed\xdc\xf2\x6d\x98\x1c\xdf\x4b\x71\x77\xff\x51\x14\x97\x7f\x12\x51\xde\xb9\x63\xdf\x09\x2a\x98\x9a\x5d\x48\x9f\xa0\xc1\x78\x01\x4f\x63\xf2\x9a\xc3\xab\x98\xfc\xe4\x70\x1a\x93\x57\xb1\xe2\xa0\xef\x31\x39\x17\xf0\x61\x3d\x91\xf7\xf4\x95\x2f\x21\x18\xa9\x36\xde\x9a\x46\x7e\x59\x3b\x0e\xc2\x0d\x7a\x3a\x01\x5d\x68\x92\xe2\xe9\x44\x35\xe7\xeb\xc1\x4d\xee\x3a\x5e\xe4\xad\xfb\x12\xc3\x89\x19\xbc\xd7\x9c\xc4\x2b\xd3\xbe\x42\x08\x31\x44\x25\x75\xaf\x7d\xa9\x78\x28\x06\xea\xf9\x29\xd0\x03\x5f\x64\xc4\xfe\xf4\x99\x62\x26\x0a\xc1\x7b\x3f\x82\xe0\x93\xaf\x33\x4f\x9f\xaf\xbf\x25\xf0\x3e\xe8\xab\x96\xbe\x55\x18\xde\x29\xd5\x06\xc3\x49\xbc\x3a\x01\xee\x43\x08\x74\x02\xdc\x8f\x31\x19\x30\x14\x61\x88\x36\x25\xef\xfb\x18\xc3\x0c\xc5\x26\xfb\x97\x49\x8b\xf8\x26\x26\x31\x87\x67\xf1\xe6\xfb\x1f\x62\x4e\x66\xe8\x4d\xbc\xe1\x78\xb1\x93\xf2\xa9\x88\x43\x96\x24\xac\xef\xe4\xab\x6b\xc0\x50\xe6\x73\xcc\x9d\xe4\xd6\x97\xec\xf4\x95\x93\xa4\xd3\xa9\x58\x2a\xb7\xb3\xa4\xc4\x3e\x8b\x91\xf3\x91\x5f\xf3\x78\xc6\x1b\xf2\x6e\xca\xfc\x86\xd3\xe2\x78\xa1\x66\x8b\xee\xcc\x3b\x14\x41\x99\x04\xe4\xc9\x9d\x03\x27\x31\x52\xdf\xf4\x87\x3c\x7d\xc8\xd2\x7b\xb3\x4a\xc5\x1c\x02\x86\xce\x85\xce\x05\xf2\x39\x36\xc8\xcc\x89\xf7\x59\xac\x3e\xad\xe8\x9e\x60\xa2\x87\x06\xc3\xcf\xf8\x37\xef\xb2\x78\xbd\x81\xb9\x6b\xa9\xc6\x75\x9a\xf1\xb8\x7a\xc5\x1f\x2f\x52\xeb\xae\x4f\x3e\x46\xb3\x3b\xd7\xb2\x63\x1c\xd8\xce\xfd\x96\x1d\xca\xc9\x2e\x11\xd5\x31\xc9\x5a\x19\x00\x4e\x52\xa5\x1e\x62\xab\xac\x36\x31\xf2\xdc\xf9\x96\xa7\xf4\x67\x0c\x72\x3e\x67\x26\x4c\xba\xf2\x4d\xe7\x57\xca\xbf\x29\xa5\x47\x4d\x76\x78\x19\x93\x6f\x31\xbc\xff\xdd\x1e\xfa\x11\x6f\x4a\xc7\x6e\x12\x37\x0d\xd5\x74\x92\x26\x65\x93\x9e\x1b\x6f\xf3\xa5\xe0\x49\xbc\x3e\x69\xe4\xfb\x18\xde\xc6\x70\x8b\x7e\xc4\x56\xfe\x29\xad\x9c\x69\xb1\xf7\x3c\x26\xe8\x79\xa8\x45\xa6\xb7\x32\x7b\x9e\x49\x61\xa7\xcc\x5c\xd5\xf4\x5e\x9e\x09\xcf\x53\xb2\xa5\x96\x6d\xea\xb9\x41\xf9\x2e\x26\x3f\x05\xbc\x88\x7f\x91\xc6\xdb\x0c\xa9\xce\x6f\xa5\xbe\xa6\x4a\x11\xd5\x29\xf3\x28\xb4\x3b\x45\x82\x5b\x5d\xdf\x11\x4a\x75\xca\x86\x43\x2f\x4b\x63\xc5\x20\xf4\x53\x9d\xc6\x2a\xd5\x69\xac\x84\xea\x13\x09\xd4\x4f\x5d\xba\xc0\xdd\x94\x20\x46\x34\xa2\x1d\xdc\x43\x59\xaa\xef\x56\x07\x24\x91\xad\x0e\x74\xb0\x9f\xbd\xa3\x26\xf7\x77\xab\x83\x21\xd5\xa3\xf6\x53\xac\x5a\x04\x56\xf4\x4a\x82\x5e\xc4\x46\x9b\xb5\xb3\x7a\xa9\x59\xf9\x49\xb7\xfe\xeb\x7f\x37\x0b\x07\xda\x05\x13\x56\x98\xd0\x04\xdb\xa7\x60\x52\x64\x2b\x10\x4d\x1e\xd5\xe9\x66\x98\x6e\xe0\x9f\xf1\xea\x3c\xbe\x5f\x63\xb8\xd1\xb7\x4e\x00\x8f\xc8\x13\x10\x11\x09\x53\x90\xd1\x2a\x60\x4f\x1f\xdf\x34\x41\x4a\xcd\xe6\xd6\x83\x8b\xbf\x92\xdb\x20\xbe\x7c\x60\x0e\x0e\x71\x7d\x55\x1f\x69\x71\x4c\x08\xd7\x79\xa1\x4c\x76\x5f\x16\x91\x55\x09\x12\x1f\x1f\x9a\x24\x85\xab\xd2\x1b\x52\xd9\x98\xc4\x89\x6c\x3c\xde\x98\xdd\x30\xdb\x8d\x96\x11\x72\x3c\x57\x49\xc6\x75\xc9\x15\x07\xe3\x98\xca\x5a\x6a\x45\x1e\xa1\x0e\xdb\xfd\x43\xe8\x93\xfd\x76\x96\x44\x48\x23\x92\x4a\xa0\xd1\x2f\x52\x9f\x37\xd2\x08\xf1\xd6\x81\xf7\x87\xf8\xe3\xc0\xfb\xa3\xc3\x76\xd5\x33\x92\x6d\x8a\xf5\x0f\x85\x9c\xb5\xf4\x25\x66\x61\x65\x81\xa2\xe5\x4d\x50\x21\xe1\x10\x13\xb1\x9e\x2f\xc2\xbf\x27\xda\x4c\x42\x39\xb5\x78\x0b\x37\x5c\x96\x6b\xe9\x2a\xb9\x96\x1a\xb9\x66\xf2\x3d\x6a\x37\x5f\x5d\xba\xa5\x30\x43\x14\x8c\xfc\x33\xf7\x5c\x43\x1c\xad\xbf\xd5\x26\x8c\x34\x3f\x19\x4d\x22\x8a\x48\x1c\xc1\x60\x33\xf8\xf3\x02\x3c\x88\x96\x32\x16\x67\x02\x79\x7d\x1f\xb1\xac\x8f\xd6\xf6\x0f\x5f\x39\x5b\xa4\x16\xff\x7a\xe2\x62\x48\x22\x12\x44\x30\x8e\x7e\xf3\x26\xa7\x7e\x44\x3e\x0b\xb8\x51\x73\x04\xb6\x23\xf2\x1e\x26\x11\xb9\x86\xcf\x62\x99\xcf\x8b\xe5\xc4\x78\x78\x8d\x33\xd9\xe4\x85\x2f\xae\x31\x48\x52\x44\xad\xcb\xdb\x44\xd9\xb4\xdd\xce\xfe\xee\x01\x3b\xf8\x03\xb1\x76\xe7\xf1\x43\x4f\xd9\x65\xd9\x49\x7a\x94\x1e\xee\xce\xe7\x5b\xe3\x14\x31\xdc\xa3\xed\x8e\x4f\x71\x0b\xf5\xd5\xaf\x76\x3f\x45\x1a\xb8\x0c\x39\x51\xdc\xde\x12\x2d\x89\x17\x99\x02\x93\xd6\x33\x29\xec\x76\x0e\x69\x4f\xd3\xe1\x8b\x5c\x7f\xb1\x2e\x08\x78\x7c\x48\xe7\xf3\x9d\xc7\x84\x10\xda\x6c\x66\x95\xe6\xd0\x3b\x07\x0f\x1f\xed\xb1\xfd\xba\x6f\xb5\x82\x71\xdf\x7b\xfc\xf0\xa0\x80\x29\xf3\x31\x78\x16\xcc\xc3\x87\x0f\x0f\xd8\x41\xdd\x79\x5e\x41\xd3\xf1\x76\x0f\x1e\x15\x30\x07\x2b\xd1\x74\x76\xbd\xbd\x83\x92\x9e\x87\xab\x11\xed\x1f\xec\x5a\x44\x3f\x5a\x0d\xf4\x68\xb7\x73\xf0\xa8\x00\x7a\xbc\xb2\xba\x1d\xef\xf1\xe3\xfd\x9d\x02\xa8\x4c\x05\x51\x41\xb5\xb3\xbb\xff\xe8\xa1\x05\xd5\x59\x8d\xeb\x60\xe7\x60\xbf\xec\xa6\xce\xce\x6a\x5c\x8f\x1e\xed\x9b\xce\xac\x29\x8b\xb6\xc0\xd3\x21\xaf\x5a\xe0\x5d\x49\x94\x9a\x44\x79\x4a\x6d\x44\x83\xc8\xfa\x23\x22\xd4\x47\xdf\xf3\xf4\x81\x71\x8a\xf6\x30\x04\x29\x72\xda\x0e\xb6\x5e\xee\xd8\x2f\xf5\x6f\x8c\x61\xb4\x61\xaa\xec\x54\x2e\x3d\x8b\x7e\xff\x82\xaf\x5c\xa7\x10\xd9\xcd\xae\x3a\xdd\x9b\x9a\x33\xc8\x03\xe9\xd2\x5c\x57\xdb\xb2\xae\x22\x60\x44\x5f\x4f\x84\x18\xa1\xd8\x2d\xae\x58\x60\xd9\x1d\x1f\x33\x34\x8a\x40\xe8\xfb\x5d\x41\x9a\xab\x3d\xa6\x11\xb9\x49\x61\xb6\xc1\x64\xe0\x7f\x20\x25\xd4\x5b\xda\x56\xb8\x5b\xb1\x76\xd5\x72\x3c\x56\xba\xd4\xfe\x63\x52\x32\x52\x93\x43\x46\x75\xe1\xb9\xb3\xdc\xad\xfe\xaa\xbe\xf6\xad\xbe\x86\x4a\x3d\x06\x67\x2a\x0d\x9c\xab\x10\xb2\x08\xab\x77\x9e\xd9\x6e\xcb\xa1\x93\x68\x4d\xce\x72\x55\xee\x9b\x21\xa4\xce\x0c\xb3\xe8\x57\xe5\x3b\xa6\x7c\xcb\x94\x5f\x09\xd3\xce\x60\x14\xbf\x5c\xfe\xa2\x6d\xe5\x77\x11\xa1\x01\xa2\x11\x70\xf0\xf2\xff\x63\xa5\xa1\x9a\xf3\xbf\x9f\x05\x86\xdb\x68\x83\x95\xac\x94\xe4\x8a\x7e\x7c\x1c\xfd\xad\x2b\xbe\x8a\x1b\xe6\xac\x1b\xb4\x74\x8c\x8f\x39\xb1\xac\x59\x29\x94\xfa\xb2\x04\xfb\xa6\xac\x7c\x06\xd6\x41\x8f\xa3\x3a\xa8\x76\x4f\xac\x98\x33\x84\x23\x45\x7d\x07\x42\xff\x04\xfa\x7e\x07\x02\xdf\x53\x8d\x50\x16\x82\x95\x67\x45\x29\x34\xdb\x12\x69\xcc\x6a\x89\x3f\xc1\x3a\x73\x88\x5a\xc1\x16\x18\x8e\x56\x21\x9e\xa1\xeb\x48\x2f\xab\x4b\x38\xbe\x33\xd8\x4e\x41\x2b\x3e\x06\x09\xd5\x57\xd7\x2f\x9b\xbb\x66\x11\xd2\x09\x53\x6c\x24\xca\x2a\x2d\xf3\x25\xcb\xcc\x2a\xf5\x03\x66\x50\x69\xe3\xf4\x2c\x5a\x63\x02\x14\x52\xa2\xdd\xa9\x5f\x33\x05\xcc\xdc\x18\x73\x15\xad\xbf\xf2\x44\x67\x3a\x4c\xdd\xed\xf9\x5c\x19\x0d\xc5\x1b\xa6\xde\x30\x97\x66\x19\x12\xb3\x64\x92\x26\x95\xa4\x9d\x5a\xb0\x44\x96\xdd\x8f\xc2\x74\x22\x49\xb6\x94\x81\x90\x69\x15\xba\xc8\x40\x98\xa7\x78\xa4\x6e\x1f\xa8\x95\x7b\x50\x61\x0b\x21\xcd\x52\x1f\xc6\x44\xd9\x2a\x11\x49\x75\x8e\xca\x54\xdf\xab\x99\xba\xac\x5b\xa7\xb2\x46\x52\x0c\x51\x91\x62\x51\x53\x47\x71\x4e\x60\x58\x23\x4b\x7d\xcf\x93\x47\x66\x14\x16\x2d\x28\x88\x52\xf8\xa8\x26\x0a\xc3\xd3\x88\x3c\x85\x57\xd1\xea\x7b\x4d\xea\x37\x82\x66\x95\x28\x80\x41\x0c\x83\x18\x17\x17\x2e\xd2\x42\x14\x4b\x9d\xb8\x51\xea\xc4\x8d\xd2\x65\xdd\x22\x47\xd3\x53\xd5\x94\x14\xd7\xa7\x54\x82\xae\xa2\xcc\x77\x74\x87\x5e\x99\x11\x08\x31\xc4\xf5\xb8\x2c\x5d\x77\x96\x10\x53\x7d\xad\x2d\x6e\x16\x9a\xb0\x44\x14\x63\xad\xbd\x9d\xae\x6a\x9f\x24\x05\x98\xb9\xa0\x45\x0f\x41\x7e\x26\xb5\xbc\xf1\xb1\x9a\xf6\x32\xcf\x3c\xa9\x1a\x28\x5d\x7d\x2b\xcc\xf7\x68\x8d\xa3\x68\x27\xf3\x43\x5d\xaf\xb6\xab\x22\x06\xa3\x14\xbe\x47\xe6\xf6\x8b\x0f\x6b\xb0\x3c\x86\x50\x23\xf9\xb2\xae\x96\x7d\x18\x6a\x80\xcf\x62\x55\x35\x5f\xb4\xb8\xfe\x10\xa1\x63\x89\xcd\x49\xd4\x5b\xa5\x35\x6b\xa9\x79\x1e\x29\xdb\x7e\x7f\x01\x27\x95\xfe\xa9\xf8\x17\x1a\x4b\x93\x9e\x2b\x51\xa7\x0d\xdc\x7a\xea\xdb\x80\x21\x96\xa5\x70\x80\x0c\xca\x54\x2f\xe1\x43\x84\x98\xde\x84\xe3\x56\xc4\xb8\x96\x14\x9f\x4d\x7a\x60\x2d\x1e\x74\x4a\x19\x38\x8f\x34\x63\x6e\x70\x7a\x18\xaf\xdd\x1d\x3a\x89\x72\xd7\xdd\x79\xa4\xef\xf1\xc9\xae\xb9\xfd\x18\x91\xc2\x91\x95\x26\x0e\x9c\x0b\xe3\xd8\x62\xbc\x9f\x1c\x49\x07\xde\x9b\x9f\xe9\x54\x49\xa7\xbe\xf5\x26\x91\x54\x48\x1b\x64\x10\xf1\x21\x13\x53\x11\x71\xa9\xbd\x5e\xfa\x65\x9e\x14\x37\xd1\x6e\xb3\xcf\xb9\xdb\x8c\x72\x1e\x4b\xed\xdc\x4d\x1c\xb8\xd6\xee\xb4\x5b\xf4\x14\x9c\x21\xe3\x4c\x50\x19\x8b\x8f\x1f\xde\x38\xf0\x59\xe8\x2f\xc7\xd2\x14\xd2\xe9\x02\x0a\xf8\x80\xa1\xd7\x45\x42\x3d\x8c\xe1\x4d\xd6\x10\x9d\x30\xc4\x54\xf7\x31\xc2\x55\x2a\x1c\xf8\x1c\xaf\xc1\xf5\x41\xdf\x31\x03\xcf\x56\xae\x8c\x84\xa3\xaa\xec\x97\xbe\xb9\xcc\x54\xd9\x3e\xfa\x9a\xcc\x0d\x46\xd9\x29\x83\x67\x91\xce\xb5\x61\xec\xb2\x9f\xeb\x6f\xe0\x2b\xf3\x44\x23\x63\x4b\x63\x78\xbd\xd2\x79\xc0\x78\x18\xf7\xd9\xc7\x0f\xaf\x9e\xc6\x93\x69\xcc\x99\x49\xb9\x0e\xdf\x14\xea\x53\x0c\x2f\x37\xac\xef\x7a\x8b\xe4\xd4\x1c\x4d\xd7\x6e\xc2\xf7\x51\x16\x20\x4a\xfe\xc3\x81\x1d\x9d\xad\x6f\xeb\x3f\x1c\xd8\xd5\x4f\xc4\x01\xcf\xbc\x22\x8e\x3e\x98\x03\x3f\x22\xf2\x0e\xde\x46\x2b\x59\xce\xf6\x49\xa5\x44\x22\x81\x51\x7e\xb5\xd4\x36\xbe\xa7\xf6\xd5\x52\x5a\x3f\xe5\xf3\x39\x85\x54\x2d\xbc\x66\x05\x49\x5d\x0a\xa1\x16\xfc\x4a\xd8\x6b\xdb\x2e\x2c\xe7\x8c\x36\x37\x75\x91\x50\x09\x51\x3d\x7d\xd4\x2f\x41\x42\x37\x73\x41\x19\xbd\xf4\x49\x54\xbd\x59\x47\x6e\x72\x41\xdd\xa2\xb7\x91\xb9\x93\x42\x66\x06\xec\xf3\x68\xbd\x1b\xf0\x89\x92\x82\x2b\x45\x54\x12\xc1\x59\x0a\x26\x54\x40\xbb\xef\xb4\xc4\xe8\x74\x16\xf0\xa2\x66\x88\xb3\x95\xe4\x18\x05\x41\xfb\xf1\x64\xe6\x10\xa3\xb8\x4c\x65\x2f\x56\x5d\xf0\xc0\x30\xf6\xf5\xa7\x9d\xdc\xa3\x98\x7b\xf6\x3a\xb5\x04\xf5\xdc\x0d\x5a\x1d\xad\xd1\xb9\xc1\x71\xab\x93\x67\xaa\xf7\xab\xa5\xb8\x4b\x3f\xb4\xea\x45\x45\x5e\xac\xbc\x27\x02\xc3\x27\xdd\xba\x87\x0b\xf8\xba\x41\x39\x2c\xd6\x45\x51\xb9\xef\x2c\xf7\xb1\x84\xda\xc7\xd2\xd7\x5b\xc9\xf5\xc5\x4f\xe7\x7a\xca\x97\x3f\x63\x80\xb0\x15\xb7\xb3\x96\x37\xa2\xc9\x72\x99\x33\xe0\x69\x45\x63\x5c\x60\xf8\x73\xdd\xb0\x6e\xcd\xd0\x57\xa3\xde\xb9\xdb\x0b\x0c\x7c\xb0\x7e\xf8\xff\x8c\x72\xef\x8a\x18\x90\x74\xcd\xb6\x4e\x75\x1e\xc4\x96\x6f\x36\xf3\xcc\xc6\x96\x67\x36\x77\xc3\xaa\xfe\xa6\x15\x17\xec\xa2\x1b\x12\x44\x09\xca\xbc\xb0\xf1\xaf\xbc\xb0\xb1\xed\x85\x55\x7a\x8f\xf1\xa0\xcb\x01\x59\xb6\x4e\x5e\x46\x18\xd0\xa7\x90\xdc\xd3\xaf\xfe\xa7\x08\xc2\x27\x3e\x7a\x11\x92\xfb\xf0\x89\x7f\x97\x42\x78\xa6\xf7\x5a\x4f\xfc\xbb\x74\x81\xdd\xf0\x89\x7a\xf1\x22\x74\xc3\x33\xf5\xee\x45\xe8\x06\x27\x0b\x58\xc3\xb9\x9f\xd4\xd7\x92\x7b\x75\x6b\x85\xed\x87\xae\x31\xf0\xa7\xd0\xa5\x5f\x71\xee\x76\xcb\x59\x39\x40\x62\xa0\x3e\x85\x4f\xc0\xf0\x6d\xc6\xb4\x9d\x2c\xed\x70\x5f\xbb\xdd\x7d\x0b\x4c\x94\xee\xe3\x56\x15\x0a\x14\x65\x99\x1f\x57\xd6\x66\xd5\x0c\xf1\x01\x08\x8d\xe2\x6c\x69\x6e\x65\xa4\x15\x33\xc4\x5c\x26\xa7\x8d\xba\xb5\x59\x3b\x2c\xe9\x66\xec\x30\xed\xe6\xbb\xd5\xe9\xe0\x17\x26\xd1\x3e\xa0\x77\x21\xf9\x87\xf3\x0f\x63\xf2\xe9\x10\xbc\x11\x43\x1d\x0c\xa3\x18\x75\x30\x86\x3f\x63\x54\xb9\xd0\xc0\xcb\xef\x3a\x78\x17\x1a\x03\xef\x79\x04\xef\x42\xf8\x90\x66\xcb\x1e\x5b\xcb\xad\x62\x81\x21\x1d\x6c\x74\x16\xb2\x41\xce\xce\x74\xb0\xd1\xc6\x68\xcc\x50\xaa\xba\xea\x26\x35\x16\x6d\x3a\xd0\x17\xb8\xe9\x77\x33\x14\xac\xb4\x6f\x8f\x52\x94\x5d\x2b\xaf\xd4\x67\x9d\x68\x7f\x2d\xec\x75\x8a\x3e\x4a\x94\x9a\xab\xcd\x2e\xcd\x35\x5f\xe1\x60\x93\x5a\x93\x53\xf2\xab\xba\x99\xc2\x39\x43\x71\xa4\x29\x8d\x23\x9d\xfd\x3b\x8e\xb4\xb0\xff\x35\x45\xba\x74\xa8\xaf\x9c\xb8\xd4\x44\xc5\xff\x2f\x77\x6f\xa2\xdd\xb6\x8e\x2d\x0a\xfe\x8a\xc2\xa7\x4e\x01\xc7\x5b\x8c\x64\x3b\x13\x73\xf8\xb4\x12\xdb\x99\x63\xc7\x53\x1c\xe7\x54\x6e\x16\x48\x82\x36\x1d\x8a\x74\x40\x50\xb2\x92\xe8\x5f\xfa\x5b\xfa\xcb\x7a\x61\x03\x1c\x44\x91\xb2\xeb\x54\xdd\xdb\x6f\x75\xad\x3a\x31\x45\x80\x20\x08\x6c\xec\x79\xe8\x5a\x27\xc5\xc4\xdd\xe1\xdb\x9e\xdd\xbe\x9e\x77\xfd\x32\xb3\xaa\x90\x60\xa8\xac\xfe\xb3\xa0\x10\x2e\xcd\x70\x55\xd3\x5e\x0c\xff\xa5\xd5\x1c\xcb\x9a\x32\x07\x92\xbc\x5d\xf2\x57\xf2\x55\xb1\x68\x2c\x2c\xbf\xb8\x29\x74\x2c\x77\x4c\xeb\x1d\x57\xdc\x09\xd3\x48\x7f\xac\xd4\x7f\xf2\xea\x57\x35\xc6\x9c\xf8\x61\x31\x02\xa5\x5a\x59\x0e\xbc\x7b\xc3\xf6\x17\x20\xcc\x26\x79\xa1\xfb\x53\xb4\x55\xed\xe8\x99\x0a\x35\xd6\x3f\x93\xf2\x4a\x98\x9a\x20\xf0\x53\xb8\x05\x7a\xcc\xa5\x51\xa5\x90\x73\x85\x15\x27\xce\x41\x4e\x8a\x4b\x6b\x61\x81\xbf\xeb\xc8\x10\xfc\x4f\x8e\x05\x16\xf8\xdf\x1c\x2f\x54\xa8\xd1\xfa\x65\x81\xbf\xe5\x0c\x15\xd2\x9c\x50\xd5\xe9\xdc\xb7\xfd\x5d\xd5\xef\x20\x27\xea\xfa\x13\x55\xbd\xd5\xd5\x37\xf5\x80\xbe\xeb\xed\x53\xf5\xd8\x5d\x52\x6f\xaf\x56\x9f\x6d\x2c\xed\xe6\x62\x81\x2f\xda\x42\xa0\xcf\x43\x08\xcb\x57\xe0\x4f\xfd\xea\x8c\x84\x45\x8b\x9a\xa8\xbe\xa9\xe7\x1a\x96\x13\xd5\xc3\x50\xad\x39\xa2\x90\x29\x32\x72\x9c\xc2\xf3\x88\xfc\x14\x14\x3e\xe7\x14\xe2\xf0\xef\x94\x44\x54\x9c\xea\xd8\x80\xb0\xa0\x58\x1f\xda\x94\x45\x0c\xc2\x75\x8c\x71\x82\x49\x11\x29\x4c\xc3\xc2\x71\x27\x74\xb3\x04\x26\x61\xb7\xf2\x74\xbb\xae\x3c\xbd\x0c\xdb\x85\x42\x93\x39\x05\x2e\xc2\xf5\x25\x82\xaf\x43\xe3\x4b\x33\x0b\x8d\x67\xcc\x3c\xec\xac\x3d\x7c\xd3\xd6\xd4\x33\x06\xa4\x05\xec\xad\xfd\xd2\xbe\xd6\x23\x61\x3a\xdb\x52\x0e\xf8\xbe\x8e\xdf\x28\x24\x72\xa6\x24\x72\x66\xb3\x92\x07\x1b\xa8\x57\xfa\x76\xff\xfe\x7d\x7d\xc1\x0c\x53\xa6\x7f\x05\x8a\x2b\x8f\x42\xa2\x1e\xf5\x51\xbf\x53\x38\x2b\x16\x39\x27\x15\xe5\x5a\xb4\xfd\x10\x8b\xba\xac\x2f\x97\x94\x3a\xf5\xc2\x20\x36\x07\x6d\xcd\x7e\xbe\xbc\x22\x1d\x55\x39\x0a\x36\x21\x4c\x6b\xc5\xfe\x51\x23\x88\xce\xfd\x3e\x5a\x7e\x02\xa3\x10\xf2\xb1\xda\x85\x99\x7f\xde\x50\x05\xe9\xf5\x79\x1e\x92\x5c\x31\x42\xcf\x56\x57\x48\xf3\x66\x58\x67\xa5\x6c\x4d\x97\x66\x90\xb8\x69\x5d\x97\x75\x15\x41\x6a\xb3\xb2\x06\xc9\xf3\x90\xa4\x76\x80\x46\xaf\xee\xf7\x2e\xe0\x38\x6c\x2b\x3e\xdb\xe4\x8a\xcd\xf7\xd6\x2d\x8f\xe8\x60\x09\x0c\x39\x64\x1f\x39\xe4\x28\x24\xcf\x89\xa2\x68\x7f\x0e\x1b\x5b\xae\x2f\x96\x17\x40\x83\xc7\x8c\x1c\x87\x58\xc0\x4a\x2d\x82\xfe\x64\x66\x07\xf5\x0f\xfe\xfd\x3b\x2d\x96\x23\x52\xcb\x21\xaa\xe5\x88\x56\x26\x18\x2e\x97\x6c\xb9\x8a\x20\xb2\x19\x44\xb6\x07\x91\xed\x17\x2f\x8b\xd4\xb2\x84\x74\xb1\x7e\x36\x95\x43\xeb\x95\xba\xe7\x91\xef\x21\x2c\x83\x35\xba\x93\xb5\x2f\x9f\x9a\xdd\xdf\x58\xbe\x0b\x5c\x3e\x25\xd0\xb7\x94\xc8\xbd\x0d\x26\x13\x54\xd7\x04\x8b\x05\xf1\x0b\x32\x5e\xcc\x63\x1c\xa6\x0e\xae\x86\x2c\x0b\xbb\x30\x05\x03\x7e\xbd\x46\xf2\x55\x54\x7e\x9b\x59\x07\xfd\x89\x3b\x61\xab\xc6\xf9\x58\xf3\x66\xcf\x6a\x9f\x8b\x31\xf4\xb4\xd4\xaf\x2f\x6b\xdb\x9a\xf5\x79\x16\x14\xde\x84\xad\xca\x0c\x41\x8c\xc0\x23\x69\x23\x1f\xe2\x4e\x88\x77\x1d\x5d\x83\xae\x52\x6f\x1c\x74\xe2\x46\xcf\xc1\x0c\xa0\x96\x41\x92\xdf\x42\x37\x97\x70\x74\x0b\x33\xf9\x2d\x24\xbf\xbc\x27\x4a\xec\xb9\x76\x08\x73\x73\x38\x08\x49\x53\x6d\xd6\x0f\x01\x2b\x8c\x3c\xa9\x32\xcb\xea\xad\x19\x5f\x9b\x22\x59\x7d\x59\x2a\xc4\x14\x5f\xee\xcf\x1c\x01\xfe\x5b\x27\x01\x7f\xe4\xec\x49\xf0\x1f\x39\x12\xfc\xa7\xce\xbd\x51\x51\xc1\x7e\x41\xe1\x73\x27\xe2\xcd\xc8\x51\x08\xd6\xab\xbd\x13\x4b\x91\x23\x98\x86\x9a\x2f\x3e\x31\x64\xaf\x1f\x12\x54\x9f\x59\x5c\x88\x54\x58\x30\x53\x34\x51\x11\xc1\xfd\x70\x8d\xf5\x8b\x08\xe3\x15\x79\x1a\xba\x57\x09\xbc\x0f\xd7\xfa\x5d\x9f\x86\x30\x23\xfb\xe6\x9f\x1d\x0e\x09\x85\x3e\x32\x22\x47\x7c\xa5\xe1\x5a\x96\xe5\x70\x77\xd7\xc8\xab\xef\x43\xe4\x77\x7f\x08\x10\xb0\x87\x53\x21\x6d\x74\xc9\xac\x6c\x9e\xeb\x95\x3d\xe7\x64\x6d\x35\xfc\x32\xb8\xf6\x24\x9a\xf0\x34\x97\x3d\x7e\xe3\x73\x1e\xf0\xa0\x19\xc4\xbe\xcf\xe5\x2c\x15\xdf\x7b\x7a\xd1\x9e\xd5\xa2\xb0\x1a\xd2\x52\x10\xc2\x15\x9a\x60\x0e\x6d\x3f\xa4\x1b\x56\xcf\xda\xd0\x3f\xde\xc0\x49\xa8\xee\x3f\xa1\x0d\x83\xb7\x75\x9a\xf0\xc2\xc3\x45\xf0\xec\x3a\x4d\x32\xde\x0b\x45\x3a\xe9\xb1\xeb\x08\xad\x28\x36\x6b\x86\xfc\x7e\x60\x71\x98\x8a\x09\x0f\x7a\xb9\x88\x4d\x1f\x8c\xe1\xd2\x24\xf6\xac\xfd\x20\x56\x29\x42\x92\x4a\xbb\xa8\x2b\x7b\x65\xd6\xc6\xfb\x5c\x87\x1b\x14\xdf\xb2\xab\x00\xe5\xb3\xe2\x7e\xf7\x53\xf2\x3e\xd2\x5b\xf4\xf3\xee\x23\xaf\x1f\xf0\xd4\x0c\xf8\xd6\x80\x65\xcb\x66\x16\xfd\x93\x0d\xeb\xc1\x8a\xf6\x55\x7b\xcf\x92\x75\x8c\x0e\xa5\xf0\x65\x8d\xd4\x78\xa0\xed\x8a\x78\x38\x5e\xff\x2d\x06\x70\x4e\xde\x84\xba\x26\x36\x7e\xc2\xcf\x88\xf8\x98\xc1\x45\xff\x0c\x42\xa2\x05\x00\x0a\x37\x92\xd2\x7a\x05\x4c\x0a\x87\xab\x13\xab\x9b\xbf\x45\x69\xfe\xbe\x37\xaa\x3c\xbc\x50\xf3\xae\xf9\xa8\xa2\xcc\x9f\x31\x69\xa3\xfa\xf3\x47\xd8\x5d\x86\xfb\x30\x6c\x77\xe5\x43\xa1\x59\x4f\xe9\x43\xe8\xce\x12\x78\x11\xba\x3f\x12\x62\x5d\x73\x91\x45\x99\x7c\xa5\x60\x63\xef\xe6\x9a\x25\xc1\xf3\x38\xb6\xe0\x43\x48\xe1\xe5\x9a\x53\xba\x53\x8a\xe1\x1f\x3b\x7b\x1d\x11\x01\xbf\x7c\xe9\x34\x98\xa8\x46\x76\x93\x5d\xf2\x97\x06\x24\xf4\x3d\xfd\xba\x92\x24\xba\xa8\xff\xd7\xe7\x90\x73\x2c\xa4\x38\x23\x6f\x24\x28\x31\x26\xa1\x54\x1d\x85\x6f\x5a\x7b\xf9\x2a\xbc\x45\x49\x9d\x14\xb6\xce\x4f\xeb\xa7\xec\x5d\x39\x65\xfe\xf7\x57\x21\x64\x21\x24\xf6\x6b\xa3\x22\x3d\x5f\x7d\xb4\xdc\xb5\x1b\xa4\xe3\xec\x02\x09\xf9\x31\x12\xf2\x23\x24\xe4\xaf\x21\x75\x85\xfd\x0d\xeb\x73\xdc\xa9\x06\xe0\xfa\xfa\x7f\xc2\x3e\xb9\xb5\x02\x60\xbe\xbe\x02\x20\x2b\x9a\x4d\x9d\x42\xb8\xce\x15\xe5\x5f\xa9\x09\x28\x97\x6a\x02\xa6\xf8\xc3\xcf\x33\x99\x4e\x10\x60\x30\x81\x02\x1f\xdf\x48\xa2\x4b\xba\x62\xaa\xfb\xaf\x8d\xb2\x81\x9c\x8e\x4f\x48\x02\x4b\xa5\x03\x39\xd5\xee\xe9\xef\x50\x30\xd9\x5e\x40\xe2\xb9\xbb\x82\x58\x61\xea\xe7\x99\x45\x41\x78\x2b\xcb\xdc\x6d\xb6\xff\x46\xf4\xb6\x45\x4e\x62\xb3\xb1\x65\x39\xc2\xf6\x22\xc0\x5d\xfc\x11\x1a\x56\x5c\xd8\xde\x15\x1d\xab\x7f\x9d\x13\x85\x9c\xaf\x8a\x74\x2b\x0b\xaa\xd6\x03\xc9\xce\x54\x92\x77\x21\x85\xc4\x6b\x0f\x4b\xad\xd4\x07\x95\x7b\x40\xeb\x0c\x4e\xd5\xba\xd5\x26\x11\x87\x30\x89\x88\xa2\x7e\x6a\x12\xff\xfa\x0b\xb7\x5a\x3f\x54\x8d\x33\x82\x83\xb4\xe9\xad\x65\x3a\xb1\x77\xcb\x9d\x1a\x52\xf7\x37\x22\x4c\x0b\x2a\x5c\x3d\xb5\x0d\x4f\x16\xc0\xbd\x56\xf6\x0b\x47\xfc\xe6\x60\x21\x3e\x5f\x52\xdb\x1b\x5f\x10\x01\xcd\x93\x4b\x8d\x25\xc8\x54\x96\xb3\xb0\x5a\x87\x82\x0d\x8b\x56\xae\x0c\x07\x44\xc2\x9b\x54\xeb\x73\xbe\xa5\x20\x6d\x7f\x0f\x66\xe4\x5c\xa3\x13\x5a\xae\x8b\xf4\xf4\xba\x20\xe4\x0d\xbc\xf9\x20\x8c\x78\x1c\x18\x9f\x92\x05\x85\xdc\x5b\x87\x56\x93\x3f\xdd\x61\xc5\x74\x2e\xe3\xd9\x9e\x50\x18\x66\x30\xd2\xa1\xcb\xea\xeb\x99\xb7\xde\x47\xa9\x3e\x9a\x6c\x8e\x26\x8d\x91\x5c\xd8\xac\x3e\x2e\xe6\xbd\xf4\x15\x0f\x29\x71\x85\x7d\xaf\x0b\xe5\x9c\x4a\x32\x27\xcc\x43\x41\x62\x5f\x2b\xec\xbc\x76\x7b\xfd\xf0\x4f\x51\x78\x14\x1e\xa0\x89\xa8\x35\xa0\x51\xcd\x8f\xdb\x5e\x7d\x86\xe5\x2d\xdb\x2b\x31\x30\xaf\x89\xce\x26\xb4\xaf\x70\x0f\x54\x4d\x05\xd7\x54\x74\x31\x30\xa6\xad\x64\xbc\x82\xfc\x5d\xf2\x57\x6e\xa3\x41\xcc\x66\x5f\x4b\x78\xc5\x24\x24\x5e\xf1\xc6\xc2\xd5\xc2\x03\xdf\x65\x4b\x8f\x32\x5b\x1b\xc6\x7c\xfd\xf8\xa2\x19\xf5\x58\x1f\xe7\xfe\xfd\xf2\xb2\x18\x34\xc5\x41\x23\x37\x35\x15\x3b\x3d\x94\xb2\xf5\x6b\x18\xb0\xfa\xcb\x70\x33\x52\xf4\x92\xf6\x25\x4a\x80\x78\x91\x17\x17\x3e\x8c\xf8\xd6\x9f\xc9\x78\x46\x7c\x0f\xc4\x60\x1b\x98\x12\x29\x52\x0f\x92\x8d\x91\xf9\x8d\xf5\xd4\x8b\xa5\x2a\x64\xa4\x32\x0e\x72\x97\xfc\xc5\xcd\x57\x94\xda\x3d\x0a\x91\xd7\xcd\x96\xa4\x9e\x76\xb6\x40\x0d\x68\xdb\x96\x97\xde\x17\xab\xc4\x50\x17\xc3\x95\xb6\xa7\x1d\xa8\x3c\xe0\x1b\x8a\x69\xce\x3d\xe0\x03\x84\x41\x7d\x4a\xbc\xce\xd7\xeb\x7a\x25\x09\xba\x10\xa9\x9e\x99\xe7\xbe\x82\xd8\xeb\x24\x6e\x46\x46\x2d\x66\xf2\x49\xe8\xca\xbc\xad\xc9\x0d\x4c\xa7\x7b\x17\x44\x97\xa6\xbe\x7f\xff\x82\x70\xc3\x7e\xa0\xd2\x6c\xcd\xa2\x7c\x93\x80\xe6\x60\xb3\x2e\x53\xaf\x4d\x96\xd6\xdc\x0c\x2d\x35\x8a\x7a\x92\x7d\x4e\x04\x45\x1a\xfb\x8d\x83\xe7\x91\x7d\x14\x44\x02\x0f\x66\xe4\xb3\x84\x11\x48\x0a\x6f\x4d\xfd\xe5\xdc\xd5\xc5\xf9\x85\xab\xab\x8b\xe5\x20\xe9\x83\xcd\xdf\x43\x8a\xc7\x77\x46\x4e\x52\x98\x91\xd0\x03\xa1\xb5\xb7\xed\xe3\xe5\x38\x9e\x56\xc9\x27\xee\x27\xa1\xde\x9e\xbb\xc9\x83\x5c\x91\x9c\x07\x12\x4c\x21\x99\xa2\x40\x70\xec\x11\x6e\x18\x46\x29\xe1\x9d\x24\xf6\x43\xad\x23\x06\x2c\x68\x37\x97\xc8\xa2\x56\xfe\x13\xe3\xd1\x83\xad\x3f\x48\xbe\x21\x37\x48\x32\x10\xf4\x41\x42\x9d\xe1\x82\x42\x7f\x1d\xb2\x2a\x90\x43\x21\x85\xdf\xe3\x36\xb3\xbd\xdf\xbf\x9b\x08\xa1\x38\xc6\x0c\x96\x0d\xd8\xe8\x54\xa4\xae\x8b\x70\x8c\x5c\xb1\xa0\x89\xeb\x83\x70\x39\x48\xf7\x84\x48\x85\xf5\x99\x22\xf1\x3c\xce\xb8\xc6\x4a\x9f\x50\x64\x2c\x07\x37\xa6\x4f\x90\x8a\x59\x5d\x50\x98\x78\x6d\x8a\xc5\x33\x92\x50\xfb\x2a\x8d\x12\x24\x0f\x70\xd9\x05\x13\x56\xa1\xf5\x56\x7f\x85\xa2\xee\x7a\xf7\xc7\x89\x33\xf1\xc8\x9c\xf4\x3d\xbd\x0b\x7a\x6f\x0d\x06\xbd\xe8\xc0\xa0\xcb\x83\xd1\x52\x45\xdd\x02\x53\xc5\xe7\x6c\xd8\x9b\x0f\xff\xe8\x73\x32\x23\x97\x9e\xa6\x51\x7f\x90\xd1\x00\x55\x0c\xd7\x77\x9c\xf3\xb0\x98\xf2\xc8\x99\x93\x0b\x8d\xe9\x67\x64\xea\x95\x21\xcb\x33\xcf\x7d\x0b\x73\xaf\xd5\x76\x54\x24\xbb\x2a\xd9\x9e\xc7\x55\x96\x2a\x5e\xa7\xa4\xbc\x46\x49\x35\x07\x70\xe1\x48\xc5\x01\xdc\x95\xaa\x36\x52\x82\xcc\x09\xd7\x53\x3d\x22\x1c\x7e\xb1\x2f\x6a\x30\xb5\xfc\xdc\x66\x5f\xc0\x97\xce\x09\xe1\xb6\x8f\xe0\x80\xbe\x61\x4a\xfa\xf1\xbe\x3b\xfb\x8b\x65\x36\x09\xdb\x9e\xad\x1b\x32\x2f\x47\x2c\x38\xa5\x9c\xaa\x8f\x91\x8d\xf1\x6a\x25\xcb\xf0\x71\x2f\xd6\x9f\x57\xb1\x40\xdb\x8d\x1e\x6c\x53\xf5\xd0\xc1\xb7\xf5\x7e\x0f\x9b\x23\xed\x34\x47\xda\x6a\x8e\xf4\xae\xd9\xa3\xf4\x3e\x67\x6e\xf1\x31\xea\x53\xb1\x0f\x39\x22\x0c\x7e\x79\xdf\x1d\x86\x59\x42\xbe\xd0\xf1\xbe\x33\x23\x53\x8e\x49\xc3\xf4\x0d\x66\x7b\xdf\x1d\x44\xdc\xa3\xa1\x2e\x6f\x33\xf3\xe0\xda\xd3\xdd\xa1\xc2\xf3\xad\xa2\xc2\x0f\x54\x48\xaa\x15\x5a\x40\x94\x53\xf0\xd4\x6b\xfc\x57\xda\xe6\x54\xff\xda\x95\xb4\x66\x1c\x19\xbe\x42\xb3\x74\xe3\x19\x87\x88\xbd\x2e\x40\xc6\x80\x3f\xf0\x8f\x4c\x7c\xad\xb7\x2e\x52\xcd\xff\xe9\x08\xf0\x5f\xab\xfe\x1f\x1c\x09\x7e\xdf\xe1\xea\xa1\xe7\x9e\xfb\x41\xc0\xb1\xe7\x3e\xb0\xff\xeb\x01\x5c\x79\xee\x0b\x41\x46\x0f\x86\x14\x76\xee\x08\xef\xb5\xf4\x6e\x5b\xe8\xc1\xc7\xec\xfe\x98\x3b\x7a\xe7\x7e\x3a\x88\x60\x5f\xe7\x20\x31\x9f\x41\x7d\x8b\x9a\x00\xa3\xc3\xad\x71\xdd\x7d\x09\xea\xd3\x2c\x14\x75\xfd\x23\xc7\xb2\x16\x50\xec\x06\xb7\xbd\x9f\x6a\x1d\xcb\x08\x6c\x85\x75\x8b\x03\x74\x63\x0e\x50\x21\x24\x95\x07\xe8\xce\xa0\x55\x1d\x0b\xf6\x6c\x65\x7e\xf9\xed\xd3\x2b\x80\xa5\xbb\x1e\xb4\x37\x59\x28\x50\xca\x29\x35\xdf\x42\x69\x2b\xe4\x16\x53\xc4\x50\xef\x35\xc7\xc8\x7b\xbd\x7a\x3c\xda\xe6\x6f\x22\xd7\xcd\x70\xed\x38\xc9\xbb\x71\x10\x29\xe6\x46\x54\xc8\xa9\xc2\x4f\x9d\x90\xba\xa0\xf0\xc6\x5b\x2b\xab\x17\x05\x19\x0b\x17\xa6\x03\xaf\x43\x8f\x5b\xe6\x51\x7a\x8a\x22\x39\x7b\x83\x32\x39\xfb\x06\x91\xe2\x6b\xe6\x10\xaa\x5f\xaf\xc1\x73\xf3\x0d\xeb\x7f\x3d\x28\x74\x66\x99\x7b\x1e\x12\xaf\xac\xb0\xb6\xaa\x4f\xc4\x8f\x57\xc8\xf6\xb5\x11\xb7\xca\x45\x1a\xe2\x46\xc7\x68\x0e\x0a\x5a\x0a\x37\xc6\x8d\x24\x4f\x89\x1b\xa3\xfb\x38\x22\x0f\xff\x95\xf3\xd3\x54\x99\xc4\x04\x83\xc8\x90\x74\x6e\xf8\xe5\x02\x42\x49\x4d\x31\x4a\xf4\xcd\xec\x2a\xd1\x52\x61\xe2\xa3\x14\x2e\xb5\xee\x54\xc3\x5a\xae\xe3\x8c\xad\x05\x32\x6a\xa1\x82\xb2\xd7\xe1\x2d\x2f\xd5\xad\x12\xe2\x90\x74\x22\x2a\xc5\x63\x20\xaa\x42\x36\xa9\x16\x95\x74\x40\x72\xa4\xdb\xa2\x49\x78\x0e\x48\x94\x02\x5b\x4d\xdc\x98\xe3\xdd\x15\x48\x39\xe7\x24\x56\x5f\x81\xb9\xad\xe8\xb3\x95\x6d\x09\xd4\x0c\xde\x38\x31\xb0\xa7\x4e\x60\x7b\xcb\x18\x42\x2d\xcc\xd4\x9d\x91\x8f\x21\x48\x60\x14\xfa\xee\x8c\x7c\x52\xd7\xe9\xca\x48\xcb\x39\x21\xa4\xcd\x2e\xc6\xa1\x93\x63\x6a\x08\xfc\x91\xa7\x8e\x0f\xec\x9b\xd3\x57\x2f\x9a\x2a\xc8\x45\xda\x8c\x6d\x33\xb2\x63\x36\xf0\x13\x87\x77\xc8\x06\xfe\x0c\x51\x36\xa4\x4e\xd5\xf6\xaa\x68\x3b\x33\x6d\x50\xb5\xc9\x1c\xdb\xde\x86\x84\xd7\xa9\x75\xed\x30\x0b\x7d\xe4\x84\x3a\x72\x19\x39\x22\x12\x7e\x61\xd9\x52\xf4\xba\xa5\x74\x25\x3f\x55\xeb\x13\xc7\x2d\x4f\x8c\x86\x0d\x58\xf7\x4e\x9a\xb0\xbe\x8d\x0b\x39\x71\x67\x44\xa0\xbb\x12\xa4\x6a\x29\x27\x08\xf9\x13\xdb\x83\x4b\x77\x62\xfb\x70\xe1\x56\xdf\x93\xe8\xef\xb9\xa4\x30\x71\x33\x32\x23\x6f\x3c\xe8\xdb\xde\x15\x7e\xf5\x85\x1b\x8c\x6b\x8c\x0d\xce\x6f\x42\xe1\xe2\x2b\xa5\xce\x45\x73\x5b\xd4\x82\x63\x82\x0c\x0a\x17\x25\x12\xbe\x70\x6f\xc8\xdc\x03\x0f\xdd\x12\x99\xda\xd8\x4b\xf7\xa2\x5e\x09\x5a\x3f\xfb\xd4\x99\xba\x17\xf8\x25\xd5\xc4\x44\x31\xb1\x12\xd7\x4d\xdd\x1b\xb2\x53\x0d\x16\xa9\xc1\xa6\x2b\x83\x79\x73\x67\xda\x18\x4a\x36\x86\x7a\xd2\xc4\x19\x7b\xcd\x75\x7c\x8a\xeb\x78\x5d\x57\x80\x56\xe0\x87\xae\x5f\xd7\x20\x6c\xb6\x4b\xc7\x33\xf2\xb2\xfc\xe1\xcc\xc8\x97\xf2\x07\xf8\xa9\x46\xc0\x2f\x42\x72\x6f\x54\x43\xad\x97\x88\x89\xae\xdd\x2d\xd7\x75\xc9\xb5\x7b\x40\x42\x35\x33\x45\x4d\xef\xdf\xbf\xb6\xbd\x31\xe2\x9b\xcf\x4a\x0a\xf9\x24\xc8\x35\x52\x53\x4a\x9d\x2c\x5d\x9d\xc8\xb5\x7a\xc7\x65\x09\xe1\x2f\x42\x72\xa9\x3e\x9b\xa7\xc0\x73\xd8\xe1\xe4\xd2\x68\x69\x16\x14\xbe\xad\x65\x19\x4c\xb1\xa0\x7a\x10\x88\x7a\xe8\xc8\x33\x6e\x0b\x1f\xc4\x5a\x17\xe5\x54\xbb\xa9\x2f\x28\x7c\xf6\xdc\x19\xf9\xa0\x35\x52\x46\x6b\xfa\x66\xd7\xfa\x4a\x75\xa2\x04\xcf\xbd\x49\x60\xdf\x73\x67\x09\x9c\x76\x52\x93\x57\x92\x24\x0f\x50\xe6\x7c\xef\xdd\x3d\x64\x4d\x6c\x24\xcd\x20\x09\x6d\xf3\xe6\xb6\xb7\x8f\x61\xeb\x55\x4f\xae\x63\x84\x40\xb8\xa2\x88\x46\xdb\xed\x12\xb8\x97\x50\xf5\x7b\x7d\xa6\xb0\xfe\xff\x39\x4a\x39\x8f\xf8\xb6\x49\x66\x71\xd6\xf9\x41\xef\xd4\x7e\xee\x1a\x21\xc3\x66\x9f\x16\x14\x7e\x7a\xdd\x0a\xfd\x7e\x04\x9b\xdb\xfa\x1d\xe5\x63\xf0\x68\xa8\x4d\x37\xeb\x9f\x1b\xf1\x2d\x3d\x2f\xd5\xf7\xcb\xfa\xbe\x8f\x86\xd5\xf0\x68\x8f\xe9\x54\xf4\x36\xe6\x9f\xfe\xff\xa5\xfa\x37\x85\x43\xaf\xdb\xfe\x3a\xfc\x33\x19\xcf\xc9\xa1\x07\xc9\xff\xfe\xdf\x23\x38\x21\x42\x6d\xc3\xe8\x7e\x32\x56\xa2\xb7\xa0\x8e\xa4\x8e\xe2\xee\x7f\xac\x51\xa1\x1c\x6a\x19\x0b\xb5\xae\xf0\x61\xcd\xbb\x4e\xc8\x8c\xfc\xf0\x20\x19\xf4\x39\x91\x14\xf6\x70\x0b\x35\x58\xbd\x58\x33\xfe\x07\x35\xbe\x35\xc4\x2a\x06\x7a\x13\x5f\xde\x61\xcf\x4b\xd8\x1d\xf1\x2d\x9d\x2b\xe2\x8e\x90\xeb\x21\x32\x13\x77\x0c\xcd\x7e\xa5\x30\xc1\x0b\x01\x43\xd8\xa7\xf0\xc9\x73\x13\x06\xe7\xea\xd6\x7e\x08\xfb\x1e\x7c\xf2\x28\xbc\x5b\xf3\x69\x07\x12\x88\x74\x13\xe8\xb4\x23\x5d\xe7\xd9\x25\xd1\xa6\x3f\xb1\xa0\x14\xfe\xfa\x5a\x44\xab\xaa\x5d\x49\xb2\x3b\xe6\xfc\x10\x59\xf7\xae\x68\xab\xfe\xc7\x83\x63\x63\xd6\xd7\x4e\xbc\x20\x57\xc7\x4e\x9a\xf6\xd8\xb2\x18\x84\x2e\x00\xf1\x0a\xdd\x26\x0f\x88\x65\x2c\x0a\x99\x05\x33\xf2\xce\x83\x4f\x39\x10\xe1\x0a\x6a\x7b\x57\xda\xbc\x53\xc5\xc5\x9c\x7b\x44\x60\x29\x43\x75\xbb\x88\xa7\xc1\x9b\xec\x4c\xdf\xd4\x45\x1b\x74\x52\x19\xbc\x7f\x6a\xee\xa7\x93\x09\x4f\x64\x71\xf7\xd8\xd8\xa7\x02\x4b\x17\x4e\x3e\xf1\x74\x31\xe5\x7d\x34\xc5\xf8\xa6\x50\x8a\x70\x67\x24\xc9\xc0\x62\xd7\xd7\x71\xa4\x33\x27\x3d\xb8\xca\x52\x2c\xc2\x77\xdc\x50\x88\xed\x86\x64\x4e\x44\x86\x2b\xf2\x59\x2b\x2d\x79\xe7\x7a\xb3\x63\x45\x5a\x8f\x81\x9d\x3a\x58\x77\x7b\xea\x08\xc8\x9c\xc4\xce\x80\x9d\x39\x58\x7b\xdb\xf3\x1d\xac\x98\xe1\x1d\x38\x58\x92\x1b\x05\xbd\x05\x85\xbc\x6b\xcc\x5e\x1e\x11\x84\xe0\x8d\x44\xc7\x74\x51\x60\x99\xdb\xca\x1b\x4b\x11\x4d\x08\x5d\x80\x9f\x99\x30\x95\x99\x05\x8f\x86\xdb\x4f\xf8\x43\x5c\x94\xc0\x02\x0c\xe3\xc7\x1f\x97\x16\x6c\x3d\x32\xd7\x13\x4b\x63\x76\xb5\x25\x16\x1e\x94\xaf\x14\xd2\x6c\xed\xe1\x7a\xbc\x12\xbd\x85\x37\x6b\x81\x58\xbf\x7f\x3f\x5e\xa8\x69\x3f\x1e\xdc\xc5\x5f\xf3\x7f\xa2\x1a\x99\x59\xbe\x28\xeb\xa0\x7e\x9c\x91\x84\x8e\x47\xce\xb0\x4c\x88\x7b\x9b\x57\xe9\xd6\xa8\x31\xcd\x87\x4f\x37\x64\x63\xa6\x4f\x87\xc5\xad\x72\xb2\xa3\xcd\xf2\x5e\x39\xe1\xd1\xc3\x51\x71\xaf\xa4\x0d\xa3\x27\xe5\xbd\x92\x3e\x6c\x8e\x36\x8b\x7b\x25\x8d\xd8\xdc\xde\x2a\xee\x95\x74\x62\xf3\x71\x79\xaf\x96\x1e\x60\xb8\xbd\x21\x9b\x2b\xb3\xb5\xb5\xbd\x81\xf6\xfd\x30\xbb\x9d\x4b\xf1\xba\xe1\x54\xad\xde\xc6\x8c\x44\x78\x56\xe8\x86\xc2\x9f\x59\xd6\x45\x66\x57\x96\x76\xab\x09\x01\x7a\x37\x36\x9f\x3a\x9b\x4f\x9a\xb0\xd0\xa4\xb9\x5b\xc3\x26\xd1\x1d\x35\x13\x3a\x0c\x8b\xa5\x6d\xe6\x67\x68\xe6\x62\x18\x36\xf3\x2e\x8c\x56\x17\x71\x65\x05\x91\xbe\xc6\xd9\xad\x2a\xf2\x19\xc9\xf4\xea\x40\xee\x32\x66\x1c\x08\x47\x9b\x7f\xba\xf9\xef\xdf\xcf\x31\xde\xa9\xb2\xf9\xfd\x62\x9f\x1c\x4c\xa6\x27\xc0\x1b\x39\xc9\xc2\x30\x71\x3e\x23\xf9\xc6\x88\x82\x74\xe5\x00\xd9\xd6\x20\xeb\x36\xb6\xcc\x48\x68\xde\x87\xe7\x53\x18\x4b\xc8\x34\xd3\x3e\x31\x11\x6b\x17\xb2\x59\xba\xa0\xd0\x37\x9d\xa6\x59\x4b\x27\xc2\x70\xb7\x37\xe9\x83\xad\xdf\x4a\x06\x9a\x74\x9c\x29\xe0\xae\x78\xd6\x75\xe6\xa5\x9b\x32\xb4\x5e\x30\x22\xe9\xc6\x68\xc5\x4b\xde\xcb\x40\x77\x20\xd2\xed\x67\xea\xc2\x67\x64\xeb\x0f\x39\xd8\xa4\x2d\x69\x92\xcb\xde\x53\xd5\xb5\x14\xa3\x96\x01\x83\x0f\x66\x24\xcd\x10\xc3\x37\x20\xc4\xb4\x8c\xca\x96\x47\x8d\x96\xcd\xb2\xe5\x71\xa3\x65\xab\x6c\x79\xd2\x68\xd9\x2e\x5b\x9e\x36\x5a\x1e\x96\x2d\x15\x60\x99\xa6\x47\xaa\xa9\x01\x61\xda\x2f\x7b\x0d\xf9\x7e\x8e\x4e\x45\x7f\x0e\xc7\x89\x33\xfc\xf3\x39\x32\x6d\x63\x81\x3c\xdb\xc5\x9a\xa7\x9a\x67\x76\x4e\x2e\xd5\x12\x54\x60\xaa\x0d\x0e\x59\x37\xeb\x72\x91\x01\xd6\x11\x34\x40\xf6\xba\xbc\x3a\xab\xd9\x19\xb2\x75\xe1\x29\x8a\x0c\xfd\x91\x6c\x3c\xe2\xdb\x7f\x08\x4c\x5f\x24\x37\xf8\x82\xc2\xbc\xf3\xa5\x37\x64\xa6\x5e\xfa\xb3\x7c\xd5\x97\xf2\xea\x65\x79\xf5\xb6\x7a\xfd\x4d\x17\xbe\x47\xb2\x6a\xe8\x15\x09\x19\x99\x91\x6b\xfd\xd9\x6a\x49\xe6\xfa\x72\x20\xe9\x83\x47\x7c\x1b\x81\x7c\x2f\xeb\xb4\x96\x86\x0a\xf2\x36\x24\xe4\xee\x8c\xdc\xa8\x27\xf3\x88\x70\xe4\x34\xd4\xc5\x40\x7d\x5c\x4e\xd1\x38\x7f\x93\x95\xce\xaa\x17\x24\x87\xca\x82\x25\x9e\xf1\xaa\xb3\xac\xc5\x33\x4a\x30\x4f\x71\x4a\xc7\x1c\xc3\x00\xbe\xaf\xd9\xd3\x39\xd9\xcb\xd0\xd2\x33\xc9\xd0\xa3\xf1\x3a\xc3\x66\x0a\x43\x74\x2e\x6f\x7b\xb2\x59\x6f\xa7\x3c\x14\x05\x01\xd9\x76\x9a\x83\x9b\x51\xa1\xda\x0c\xfc\x89\x9b\x61\xae\x5e\x9a\xab\x61\xa9\xd1\xd9\xfa\xd7\x87\x19\xd6\x1e\xdf\xbc\xeb\xe3\xc3\xa5\xc7\x46\xd5\x63\xdf\x33\x18\x8d\x74\x68\xfe\x32\xfa\xc0\xa6\xcd\x5a\xcb\x70\xa9\x65\x58\x6b\x69\x0c\x57\x6b\xd9\x5a\x6a\xd9\xaa\xb5\x6c\x2f\xb5\x6c\xd7\x5a\x1e\x2e\xb5\x3c\xac\xb5\x3c\x5a\x6a\x79\x54\x6b\x79\xbc\xd4\xf2\xb8\xd6\xf2\x64\xa9\xe5\x49\xad\xe5\xe9\x52\xcb\x53\xdd\xd2\x40\x32\xfa\x83\x4c\xfc\xde\x82\xc2\x71\xd7\xc1\x41\x96\xda\x63\x15\xb7\xf7\x8b\x45\x8e\x00\x76\xe5\x14\x2d\x82\x2a\xca\x95\x54\x59\x60\x13\x60\x3f\x75\x10\xf5\x55\xa6\xa4\xa3\x9d\x35\x10\x7c\x44\xfa\x29\x98\x4c\xb3\xa2\x48\x34\x9b\x50\x98\x3a\x33\x72\x9c\x01\x86\xb0\x02\x3a\xfc\xe4\x19\x3c\xde\xe4\x0f\x75\xc4\x1c\xa6\x97\x45\xae\xdf\xb2\x40\x32\x82\xa9\x8f\x68\x91\x6d\xf6\x2a\x6b\xf6\xd7\x59\x67\xaf\x32\xf4\x36\xd3\xe6\x83\x05\x85\x37\x9d\x78\x07\xd1\xc5\xe0\x3c\x27\xa8\xfb\x39\xc8\x6e\x09\x4c\x31\x8e\x94\xb5\x5c\x27\x37\x89\xcb\x3a\xf3\x9b\xd6\x65\x88\x5c\x7d\x33\x57\x13\x97\x4a\x60\x60\xe0\x5d\x29\x21\x0e\x43\xd1\x0f\x1d\x1f\xbc\x4f\x4e\x8a\x4b\xb9\x4e\xcb\xe5\x85\x0e\x07\xef\xc2\xc1\xf2\xea\x09\x78\x9f\xb5\x0c\xf8\x51\xb8\x98\x3f\x20\xca\xf6\x7e\xe4\x2c\xc6\xd4\x01\x2c\xc5\xf5\xbc\x37\x34\x49\x00\xa2\xec\x88\x5f\xf0\x1b\x0b\x98\xc9\x01\x30\x65\x71\xce\xab\x54\x05\xf5\x74\x9d\x57\xda\xfe\xb7\x2f\xdc\x7a\x96\xcf\xdb\x72\x98\x16\xb5\xb9\xfe\xa5\xfc\xa5\x45\xa1\xaf\xbf\x9f\xbc\xb4\x23\xbd\xe8\x47\xe3\xcd\xfb\x2d\x73\xbb\x53\x38\x60\xce\x87\x7d\x93\xf3\x01\x05\x4c\xb3\x1a\xa5\x08\x5a\xde\xa8\x64\xd5\xe2\x56\x23\x47\xc4\x4a\x46\x88\x4a\x54\xde\x4f\xc9\x47\x81\x33\xbd\xa9\x72\x36\xc0\x51\xeb\x71\xe9\x10\xc4\x2d\x10\x5f\xeb\x95\xff\x76\x52\x90\x50\x39\x47\x7f\xcb\xb4\xc1\xe6\x73\xe6\x92\x77\xbe\x9b\x47\xcd\x5c\xc3\x09\xd9\x4b\xc8\x3b\x9f\xec\x32\xc9\xed\x24\x9d\x11\x6d\xcb\xa3\x70\x92\xd5\x7d\x77\x4d\xde\xee\x1d\xf5\xb5\xa9\x50\x12\x38\x85\xfd\xec\xb6\xf4\x85\x45\x1c\x88\x49\x8b\x32\x36\x19\x6a\xc6\x26\xc3\xce\xd8\x44\xea\x8c\xaf\x25\xc9\x88\x09\xac\xb0\x19\x70\xf4\xf6\x62\x58\x27\x04\x03\x1a\x98\x09\x6c\xc8\xcd\x5f\x6e\xfe\x4a\xf3\xd7\x9c\xb9\x2b\x25\x0a\xdf\x90\x2b\x85\x13\x2c\x0b\xee\x8d\x0c\xa0\x2b\x11\xf7\x34\xc3\x7c\xd6\x96\xa5\x8e\x9b\xfa\xf7\xcc\xd1\xf5\xa2\x4d\x56\xea\x2b\xe7\x4a\x1d\x3a\xbc\xb7\x80\xf7\x6d\x9a\x90\x03\x92\x29\x16\x0a\x32\x86\x3c\x27\x66\x2e\xc1\x38\x23\x9d\xd1\xbe\x32\x5b\x3d\x43\x3b\x58\xa5\x5f\x57\x4c\xda\x39\x1a\xc2\x6c\x46\x8d\x1a\x53\x61\x0d\x6b\x87\x25\xff\x90\x3d\x8f\xf7\xa2\xa4\x27\x2f\x79\xef\x9a\x65\xd2\xd2\x58\x04\x43\x18\xda\x71\x32\x73\x85\xed\xf9\x80\x2e\x53\xc7\xda\x1f\xf8\x14\xfd\x81\xbd\x03\x74\x0c\x64\x67\xc5\x8b\x3f\xe7\x24\x24\xfb\x19\xac\x0d\x3b\x39\x22\xa7\x19\x62\x60\xa1\x96\x06\x93\x2d\xe7\xcb\x98\x88\x23\xb2\xec\xe3\xc7\x07\x8c\x48\xdb\xfb\x8c\x17\x1c\x2f\x32\x46\x72\xbc\x98\x91\xf7\x19\xa8\x4b\x10\xea\xb7\x0e\x1e\xe8\xc4\x9c\xcf\xf1\x17\x32\xb1\x8a\xcb\xf9\xd9\xcd\x6f\xc9\x4e\xa1\x62\x4e\x7e\x2a\xaa\x3c\xda\xfc\x43\x00\x6f\x18\xbf\x23\x46\x38\x05\xe6\x8e\x36\xff\x50\xf3\x1b\x0d\x46\x74\x43\x49\x32\xb9\xcd\x52\xaa\x7e\x08\xf0\x95\x80\xa5\xd5\xcb\x9b\xc0\xa8\x92\xb4\x98\x8b\x62\x14\x83\xd1\xa6\x92\x52\x2a\xd6\x99\x19\xd6\x99\x81\xaf\xae\xce\xd4\x97\xb2\x4f\x9a\x7b\xc6\xa8\xac\x06\x93\xc1\x37\x1e\xff\x21\x9a\xf8\x8a\x6f\xe8\x84\xc5\xed\x3c\xf2\x4a\xf6\xb0\x15\x16\x4d\xab\x87\x38\xdd\x28\xe5\x89\x6d\x27\x71\x47\x0f\x41\xb8\x98\x68\xd4\x64\xfb\x68\x49\x09\xb1\x55\xf4\x43\xd6\xbb\xbb\xdf\x66\xd1\x0f\x39\xf5\x35\x1d\x47\x4b\xfc\x99\x04\xdc\x8a\xad\x82\x51\x53\xdf\xa3\x00\x62\x6e\x2e\x57\x38\xb0\xea\x99\xd1\xfa\x67\x86\x4e\xe2\x6e\xaa\xef\xdb\x5c\x37\x1b\xd3\x69\xdd\x12\xe0\x0a\x8c\x40\xb8\x8f\xdb\x3b\x55\x69\x32\xd6\xf4\xc2\xfc\xd1\x6b\x84\x95\xb7\x99\x02\xa5\x44\xd7\x0a\x5f\x50\x78\x7d\x4b\xdf\xad\x5a\xdf\xc3\xee\x13\x80\x45\xf4\x4d\x9a\xa9\x52\x54\xe0\x20\xe9\x58\x3a\x38\x50\x82\xcb\x88\xd1\x6d\x3f\x5a\x50\xd7\xc0\x45\x7e\x19\xdf\xf4\xac\x73\xb2\x1f\x3a\x9e\xfc\xd2\xfa\x64\x7d\xea\x2f\x1a\x4f\xe6\x85\x3b\x40\x8b\x8e\xa4\xc8\x41\xca\x97\xcc\xf8\x3f\x3b\x42\x16\x74\x9a\xf9\x45\x2d\x9c\x34\x19\x1f\xa8\x33\xfc\x13\xb8\xfa\x73\x45\xa9\x73\x40\xf0\xf2\x27\x05\xbc\xb1\xd0\xb2\x57\x10\x82\x46\xf0\x78\xe0\x9f\x75\xbb\xf6\x1f\x91\x1c\x7e\xb1\xd4\xb9\x91\x64\xc2\x74\x60\x5a\x83\xf9\xaf\xf5\xc0\x9d\xd8\x54\x1f\xae\x57\x61\x04\x83\x91\xfa\x55\xdd\x97\x9a\x80\x36\x20\x5e\x0f\xf1\xa3\x32\x7d\x37\x64\x85\xa2\x7d\x4f\x2e\x68\x3d\x20\x8c\xb7\xf9\xa6\xd6\x42\x3c\x2f\x31\xad\x73\xe9\x41\xfb\xac\x42\x14\x44\x28\x99\x17\xdb\xd5\x82\x9c\xe7\x44\xdd\xc4\x0b\x9c\xec\x08\xe7\x9d\xb8\x8a\x21\x46\x3e\x77\x80\x8c\xb2\xd8\xd0\xa1\x79\xac\xb3\x14\x83\xdd\x1f\x0b\xed\x82\xcf\xd1\x66\xa8\x05\x6f\xdf\x5d\x29\x8e\x93\xdb\xec\x47\x23\xbf\x17\x6e\xdd\x5c\x6d\x49\x54\x2b\xf4\x0b\x8a\xb6\xd6\x1a\xca\x00\x1e\x56\x44\x62\x97\x65\x63\xca\x71\x74\x79\xe3\x3d\x59\x7c\xfc\x12\xc1\x2d\x9a\x51\x7a\xa0\x0b\x13\x0b\xdd\x39\x82\x2e\x32\x5c\xaf\xfb\x1c\x81\x5e\x3b\xa1\x17\xe7\x12\xe1\x82\x8e\xf1\x21\x2d\x8f\x08\x04\xbc\xa5\xd7\x10\x0a\x29\xe6\x5c\xf2\x5d\xbf\xb2\xad\xea\x9d\x8d\x1c\x5f\x8b\x4c\x8a\xff\x47\x30\x55\x82\x52\xaa\x04\x25\x75\x2f\x05\x04\xe0\x45\x29\x28\xea\xb8\x63\xb5\x36\x21\xc6\x96\x78\x18\xe6\xdd\x2d\x86\xfc\x39\x1c\x0f\x1d\x24\x65\xc6\xab\x39\x75\xe7\x44\x40\x04\x61\x97\xa1\x29\x19\xcf\xc8\x07\x25\x48\x67\xf0\x68\x08\x98\x76\xd9\x99\x91\x1f\xfa\xce\xe6\xb6\xbe\xb3\xa8\x58\x4b\xfd\x1d\x57\x4e\x6a\x7b\x6a\xd6\x29\xa6\x42\x2f\x4b\x07\xe1\xf4\x42\xfd\xce\xda\xb4\x57\xe2\x27\x8a\xd3\x2f\x97\x02\xc7\x9a\x8b\xad\x98\x61\xe4\x0b\xca\xe4\x08\x33\x92\x8c\x5f\x67\xce\x97\x0c\x3c\x0d\x70\x2b\xf3\x0a\xf5\xbc\xc2\x22\x15\xd3\xcb\x35\x42\x67\x93\x2f\xbd\xc6\x17\x14\xfc\x27\x6d\xe7\x2d\x75\xa2\xbb\x8f\x99\xfb\x5a\xc0\xab\xcc\x3d\x14\xf0\xa9\xdb\x36\x74\xe4\x08\x9b\x1d\xa1\x5b\xb5\xed\x51\xf0\x1d\x81\xa9\xa3\x84\xce\x3a\x05\xde\x9e\x23\x74\xea\x28\x61\x33\x35\xdb\xf3\xcc\xfd\x22\xe0\x5d\xe6\xbe\x15\x90\xc4\x6d\x87\x7e\x4e\xde\x29\x81\x7d\x84\x1b\xfc\xfb\x37\xfe\x7c\xf4\x54\xab\xb0\x0a\x8b\x3e\xa6\x3f\xc2\x96\xed\x2d\x4c\x0f\x6a\xfa\x6d\x3f\xc4\x5f\x63\xb9\x31\x72\x24\x5a\xc4\xce\x33\x9d\x3e\xb4\xa6\x78\x12\x74\x3c\x10\xd5\x9a\x27\x0b\x0a\x22\xee\x56\x06\xaa\x61\x1f\xe1\xeb\xc7\x33\x92\xc4\x80\x63\x62\x24\x83\x01\xa5\x24\x2e\x60\x51\xc6\x6b\x1d\xe6\x38\x7a\xfe\x73\xed\xd6\xd0\x96\x86\x1c\x63\x99\x5e\xa4\x60\x32\x8a\x3e\x13\x75\x54\xa7\xa6\xce\x4c\x9a\x22\x9d\xc9\x5f\xd2\x3f\xcb\x07\x92\x7a\xaa\x22\xed\x1c\xff\x29\x2b\x53\x57\xf2\x78\x7d\x8a\xc0\x7a\x75\x03\xd3\x88\x81\x41\x71\xa7\xe0\xa3\x53\xae\x2b\xe1\x25\x75\x67\x44\xc4\xe0\xeb\x6a\x87\x51\x48\xd2\xca\x51\xa3\xf8\xaa\x1b\xc2\x63\x60\x08\x08\x36\x3b\x1a\x90\x74\x83\x61\xc2\x70\x60\xb6\x4f\x8b\x7a\x05\xb6\x07\x69\x7b\xa2\x28\xc4\xbd\xba\x97\x5f\xeb\x93\x11\xa9\x96\x5e\x67\x9f\x50\xb3\x7a\x56\xc6\xe3\xb4\xaf\x2b\x53\xcb\xf4\x2c\x37\x1b\x21\x23\x82\xa9\xa1\xf0\xc5\x5a\x06\x7b\x56\xcb\xd4\xc1\x31\x24\xbe\xf9\x74\xb9\xc8\x39\x0a\x67\x7a\x99\x53\xd0\x11\xc4\xaf\x3b\x7d\x6c\xb8\xfb\x8b\xbd\xc7\x53\x46\x81\x9d\x3b\x09\xb0\x91\x92\x7e\x30\xf9\x30\x33\xf7\x3d\xae\x6e\x49\x49\xc1\xdf\x71\x04\x78\x89\xbe\xbf\x68\x26\xfb\xd7\x20\xf9\xa4\x4c\xb3\x55\x73\x49\xc4\x73\x21\x2a\x7e\x5e\x9f\xa5\xcd\xa1\xae\x51\x46\xc7\xb8\x62\xdc\xf6\x77\x80\xdb\x1e\xc3\x8c\x3b\xaf\x32\xed\x8b\x45\x21\x51\x1f\xa7\xfa\xa3\x36\x71\xb5\x7f\x82\x9c\xec\xc7\x0c\x9e\x34\x9f\x78\xfa\xa4\xed\x01\xf6\xbe\x78\x60\xb3\xf6\x40\x48\xf2\xaa\xc7\xb9\x1a\x97\xab\xab\x11\x1c\x10\x01\xc3\x7a\x3e\x8d\xce\x9e\x38\xe6\x68\x58\x2e\x80\x7a\x46\x7b\x0e\x29\xee\x12\x33\xc7\x0b\xb8\x02\x45\x7a\x62\x77\x29\xef\xd9\x3b\xa9\x3a\xfc\x9d\x24\x5e\x9e\x49\xe2\xe5\x67\x8a\x79\xf8\xd2\x92\x2b\xc8\xfc\xf3\x32\x82\x21\xb4\x54\xbf\xa8\xa5\x9e\x16\x51\x9b\x8b\xe7\x4e\x4e\x92\x0d\x44\x23\x38\x12\x8b\xc1\x0b\x29\xa6\xa3\xbe\xc2\x77\x17\xb9\x8d\x4c\x42\x1e\x3f\x2e\x13\xf2\x7c\x11\x14\x0e\x32\x32\x95\xc4\x3a\x13\x69\x72\xd1\x0b\x72\x81\xf6\xff\x9e\xae\x98\xac\xb3\x44\xc7\x6b\x7d\x7f\x59\xe0\x4c\x53\xf0\x3e\x3b\x09\xca\xc0\x51\x6c\x92\xdc\x84\xb7\x3e\x46\x30\x3d\x26\x0a\xc2\x76\x7f\x7c\xa1\x4b\xa3\x3a\x51\x8c\xe3\x78\x8d\xc7\x65\x6b\xec\xa9\x5e\xfa\x8c\x11\xe6\xe2\x7e\x72\x75\x2d\x6d\x76\x86\xe2\x76\xbe\xca\x71\xcd\xc9\xcb\x0c\xde\x64\x28\x0a\x34\x38\x2f\x69\x67\xb6\xf7\xf9\x59\xe2\x4a\x46\xea\xc9\xde\x14\x3b\xa7\xdb\xd0\x51\x90\xd4\xe8\xaa\x84\x5f\x99\x33\x23\x69\x0c\x39\x48\x3b\x43\x25\x2a\xfe\x54\x54\xd2\x3b\xa8\xf1\xc9\xa6\xf8\x08\xfa\x16\xe3\x14\xbd\x03\x9c\x22\x4e\x98\xfd\x7f\x31\x55\x76\x56\x4d\x95\x9d\xd5\xf8\xf1\xae\xa9\xfa\xae\x1f\xeb\xa9\x92\x5c\x4f\x2e\xcf\xc0\x47\xef\x20\xf5\x56\x5c\x73\xe7\x2a\x43\x15\x54\xdb\x7b\xd9\xf2\x7b\xf3\xe5\xf7\x6e\x39\xcd\x47\xc2\x18\xfc\xb8\xf6\x4c\x18\xc3\xfb\xac\x9a\x0e\x3e\xad\x17\x3c\x8c\x21\x6b\xac\xf8\xf6\xd2\x70\xec\x54\xbf\x33\xd1\xec\xcb\x69\x8d\x8d\x5c\xe9\x16\xc6\x10\x34\x7a\x3d\x5a\xee\x75\xbc\x34\xd8\x71\xd9\xed\xf1\x6a\xb7\x72\xb0\xaa\xd7\x13\x47\x07\x4d\xae\xec\xf7\x01\x91\xf6\x54\x89\x6d\xf8\xe7\xaa\x53\x7d\xf6\x0d\xd7\x00\xbf\x1f\x57\xa7\x92\x12\x50\x4e\x58\x62\xf3\xbf\x91\x62\x75\xb4\x6e\x5f\xe8\x38\x24\xbd\x94\xfa\xb7\x69\x49\xca\x16\x3f\x86\x65\xcb\xc1\x8c\x20\x14\x1a\xfd\xeb\x2a\x54\xf9\x6e\x6a\xfb\x6a\x8f\x38\xc6\x80\x7a\x07\x8e\x70\x53\x9b\x19\x23\x42\x61\x70\x49\x5d\xe2\xd7\xe1\x49\x43\x8d\xfa\x60\x25\x2a\xf9\xea\xf4\xfa\xee\xe7\x9c\xd4\x0e\xf0\xf2\x8b\x8c\xbd\x23\x05\x9f\x6a\x3f\xa1\x15\x65\x37\x76\x33\xa6\x8b\x05\x85\x2c\x5e\x17\xdb\x52\x8f\x22\xd0\xfa\xb0\x19\xd9\xcd\x00\xa3\x11\x84\xcd\xb6\xcb\xf4\xb3\x0d\xcf\xdb\xbe\x33\x43\x4c\xf5\x8d\x02\xdb\x76\xae\xf1\x7a\x9b\x82\xf7\xde\x39\xe7\xc4\xda\x49\xf3\x38\xe8\x25\xa9\xec\x65\xb9\x37\x89\x24\x2a\x2d\x15\x32\x85\xa2\x40\x6d\x2f\xca\xb0\x7d\xce\x65\x0f\xd3\xb7\xdb\x56\x11\x19\x52\xe6\xb2\x6d\x7a\x35\xbf\x77\xf2\xb4\xf4\xf4\xad\x5c\x9b\x5f\xe6\xf0\x05\x19\x36\x99\x61\xc2\x72\x0a\x27\x19\x61\x0a\x72\xb5\x23\xf0\x0b\x74\x04\xc6\x1b\x35\x6f\x75\xed\xde\xec\xbb\x5b\xf7\x5c\x97\x60\x64\x84\xda\x8a\x83\xd4\xa9\xfc\xbc\xe5\x46\xbd\xd8\xae\xb5\x11\xd7\x8e\x74\x35\xa7\x78\x81\xc9\xa1\xcc\x11\xd1\x50\x9d\xd4\x5d\xb2\x85\x9e\x86\x2e\x17\x4d\xbe\x19\xa7\x74\xdb\xbb\x82\x54\x9d\x08\x9c\xfc\xe7\xac\xc5\x27\xfe\x17\xdb\x41\x43\x19\xd6\x7b\xd4\xa5\x1e\x91\x96\xb2\xbe\xa2\x35\x6c\xdb\x99\x93\x9d\xcc\x64\x6a\xf0\x21\xb1\x03\x0a\xbe\x92\x17\xfc\x3d\xb5\x0f\x51\xba\x68\x0d\x33\x13\x30\x27\x47\x6a\xa5\x74\x38\x08\x4e\xeb\x23\x2e\xe1\x8a\x6f\xf8\xd6\x3d\x23\x5c\x8f\xd5\x63\x07\xa9\x92\x64\x5b\x26\x85\xac\x80\x60\xa0\xc9\x8f\x5a\x48\x74\xe6\x2b\xa7\x49\x3c\x97\x84\xae\xba\xed\x2b\x0c\x6f\xb3\x53\xf0\xd5\x9f\x63\x48\xd5\x9f\x33\x88\x0d\xc4\x7f\xf2\x48\xe6\x86\xe8\xf3\x47\x21\xac\x6e\xa6\xea\xb7\xb1\xfd\xf9\x85\xed\x2f\x2f\x6d\x7f\x31\x84\xab\x86\x3e\x3c\xae\x19\xe8\xb4\xd4\xa5\xb5\x4f\x51\x7e\xcf\x57\x47\xcd\xab\xd9\xfa\xb2\xd2\xd6\x57\x5f\x40\x61\x7b\xef\x17\x7a\xef\x72\xa9\x40\xe8\x0b\x27\x47\x1e\xad\xb9\x99\x64\x6d\x87\x25\x5b\x3d\x2c\x6f\xf4\xa1\x60\x09\x82\x7c\x10\x65\xd7\x31\x9b\xf7\x58\x18\xea\xfc\x3b\x58\x9e\x3a\x5b\x7b\x34\xa0\xe3\xc0\x99\x23\xc3\xdc\x6c\xd5\x4b\xff\x8d\x3e\x2e\xd5\x41\xf9\x51\x1c\x14\x8c\xf7\x88\x99\x12\x19\xae\xe8\x6a\x6a\x87\x6a\x80\x64\x4d\x50\xa3\xea\xb1\xb3\x1a\x68\xb4\x32\x46\x84\x30\x30\x23\x9e\xa6\x15\x25\x9e\x88\xd2\xa5\x50\xb4\xa1\x51\x65\xcc\xc8\x8b\xac\xec\x68\x4f\x57\x42\x5f\xd4\x50\x1c\xd7\x7c\x1b\x22\xda\x08\x2e\x64\x55\xd4\x87\x5e\xaa\xc8\x65\x55\xbd\xa5\x2a\x4c\x03\x43\xf2\x15\x78\x2e\xad\xcf\x07\xbd\x3e\x91\x29\xa1\x10\x82\x07\x19\xa8\xe3\x1d\xc7\xeb\xd3\x5e\xed\xee\xbd\xdf\x3b\xd9\x5b\xce\x7c\x15\xc4\xb5\x30\x00\x6d\x28\x34\x31\x00\xd3\xf8\x5f\x33\xdb\xd9\x9e\xdf\x61\xb9\x8b\x63\x10\x10\xc4\xda\x82\xd2\xff\x57\x86\x6d\x49\x94\xb4\x62\x12\xdc\x4f\x09\x5a\x05\xd5\xe0\x93\xce\x15\xb8\xd0\x69\xa7\xbc\x8f\x3a\xfa\xe1\x32\x6e\x75\x0d\xde\x63\x3a\x8b\x7f\x11\x9f\x79\xe9\x7c\x12\x24\xa1\xc0\x6e\x9c\x04\xbc\x13\x53\x9f\x27\xee\x74\x0c\x5c\x2d\x6e\x9d\xe7\x85\x31\xbd\x01\x79\x51\xda\xd0\xca\xe6\x69\x93\x42\x9e\x9b\xf2\x20\x0b\x0a\xd7\xb1\xbb\x4b\xfe\x1a\xc1\x26\x0c\xd5\xf6\x74\x29\x20\x5a\x2a\xbf\x37\x80\xea\xc6\x99\x91\x8b\xb8\x5d\x39\xfd\x8d\xc3\x65\xac\x3e\xf7\x3a\xa6\x0b\x48\x1a\xb1\xa1\x2b\xe7\xee\x9b\x63\x02\xbf\x34\x0c\x7b\x6f\x10\xe1\xde\xe8\xe3\x3c\x27\x7d\xe4\x1a\xe1\x66\xf5\xd3\x0b\xa2\xf4\x46\x21\x38\x23\x0c\xd8\x9e\x4f\x57\x63\x3e\x35\x67\xeb\x2f\x13\xaa\xe2\xe1\xda\xdb\x34\xad\x9d\x93\x69\x8c\x2e\x09\x53\x49\x6e\x50\x78\x1a\x57\x54\x92\xd7\xa9\xa4\x45\x9d\x83\xb4\x22\xb2\xdb\x86\x1e\xae\x9c\xca\x14\xc3\xfd\x53\xcc\x8f\x81\x80\x37\x57\x24\xd1\xa7\xa8\x0d\x39\x0f\x1b\x83\x02\xc6\x64\x45\x45\x4c\x96\xef\xb2\x95\x98\xac\x94\x82\x3a\x25\x4e\xcb\x61\x8f\x34\x81\x5e\x09\xd6\x5b\x89\x1f\x53\xf0\x30\x8f\xd7\xb8\x42\x75\xef\xfe\x0a\xaa\x1c\xdd\x8a\x2a\xb7\x57\x77\x0d\xc3\x7b\x57\x71\x69\x9d\x0c\xd7\xb0\xaa\xf7\xce\xc1\x72\x6e\xfa\x19\xa7\x8e\xf6\x55\x5b\x9e\x13\x6e\xe8\x71\x81\xe5\xbe\x33\x8d\xfc\xa5\x42\xfe\xbc\x40\xfe\x28\xf9\xd7\x6d\x09\x79\x4b\xcc\x97\x9a\xdd\x08\xc1\xef\x28\x03\x09\x39\x3c\x67\xb5\x78\xae\x75\x4f\x54\x7b\xb4\xcc\x4d\xe5\x3a\x2a\xeb\xad\x58\x5b\x72\xf4\xd0\x49\x80\x9d\x38\x12\xbc\x33\x87\x83\xf7\x45\x63\x8a\xd7\xc6\xb1\x24\x15\xd1\x45\x94\xb0\x78\x5d\x99\xd8\x6b\xe3\xc7\x71\xb5\xa6\xde\x2f\xfa\xc2\xfc\x50\x6f\x39\xd4\x2f\x38\xec\x8c\xf1\x60\xaf\x1c\x01\xa6\xf6\x2c\x7c\x31\xf3\x60\x41\x20\x78\x96\xb5\x3b\xc3\x1c\xa2\xb8\x81\x7a\x95\xf5\xfe\x2f\x82\xb3\x60\xde\xe6\xfd\x92\x71\x29\xe3\x86\x93\x4b\xe1\xff\x12\x44\x19\xf3\xe2\x7f\xab\x7a\xef\x17\xe3\xfd\x73\xcd\xd1\xd9\xe4\x4c\x90\xfd\x94\x7c\x51\x42\x51\x51\x3a\xa4\xf0\x74\x39\x14\xba\xce\x88\xfe\xbc\xaa\xbe\x48\xe1\xf2\xa3\xbe\x72\xbd\xab\xc7\x2f\xf6\xc1\x91\xc0\xf6\x9d\x1c\xd8\x81\xc3\x81\x3d\x71\x18\x78\xbb\x6a\x4d\xcf\xf5\x9a\xee\xe8\x25\xbd\x48\x3f\x71\x91\x45\x18\x7f\x61\x16\xd5\xcb\xa3\x38\xd8\xd5\xae\x44\xf5\x5b\xa7\x19\x17\xb5\x5b\x82\x25\xfe\x65\xbd\x84\xca\x34\x5a\x1e\x67\x5a\x1b\xb8\xd8\x1f\x9c\xfd\x4d\x5c\xb8\xfa\xc8\x48\x7d\xe0\x61\xe1\xcb\x93\x84\xd1\x85\x05\xaf\xc5\xd2\xf3\x6f\x92\x30\xb5\x60\xc7\xf4\x89\xf3\x0c\x93\x97\x7d\x11\x58\x5a\x45\x8f\x09\x7b\xf1\x1d\x22\x65\x0a\xee\x40\x87\x4b\x7f\x0e\x21\x81\x9b\x78\x99\x2e\x0b\x45\x97\xb5\x9f\xf6\xf7\x78\x9d\xc5\x4d\x8b\x1b\x7b\x31\x94\xbc\xfd\x31\x83\x1f\x8a\xb7\x2f\x10\x84\x77\xe8\xfc\xf2\xb0\x56\xc5\xc2\xa0\x3d\x0a\xcf\xe3\xee\x4c\x72\x8f\x75\xb6\x9d\xed\x55\xcb\xe8\x43\x83\x2d\x32\x72\xe0\xc1\x5c\x1d\x72\x6f\x0b\x52\xa4\x2c\x1c\x24\xd5\x09\x90\x9a\x58\x61\xcb\xc1\x24\x2f\xb9\x93\x82\x7f\xea\xfc\xe4\x04\x27\xc0\x96\xd9\x40\xdf\xcd\xc8\x2c\x86\x1b\x44\x1c\xe7\x66\x4c\x85\x17\x80\xd5\x4d\x66\x66\xcc\x62\xb0\x97\x6a\x30\xf0\xcf\x1d\xbf\x0a\x7e\x7d\x8d\x12\x61\xd3\x17\xa1\x78\xf0\xd4\x79\xae\x59\xc0\x92\xc7\xff\xc1\x89\x09\x90\xe2\x4b\x81\xca\x85\x5c\xa7\x88\xe6\x9c\xcc\x63\x30\xa9\xaf\xd3\x85\x9a\xe2\x97\xce\xa9\x9d\x3a\x1f\xf5\xac\xbe\xd4\x67\x75\x58\x9b\xd5\xd3\xca\x86\xb7\xfa\xf0\x0b\x4e\x22\x5a\xcc\x10\xb7\x73\x0f\x0b\xaa\x46\x5a\x6c\xdc\x51\xcd\x95\xd8\xd6\x31\xc8\x87\xfa\x20\xa5\x28\xf3\x56\x3f\x4b\xdb\x52\x31\xe8\xe7\xbe\x2f\x73\xe8\x9b\xed\xd1\xda\x49\xc5\x83\x8d\xb6\xea\x5d\xae\x52\x52\x6f\xdb\x6e\x7f\x5c\xd6\x52\x1c\x34\x93\x94\xf6\x66\xe4\xbb\x91\x1a\x96\x12\x19\x84\x1a\xe4\x34\x43\xe1\x6d\xa9\x81\xf2\x02\xe4\xc2\x36\x90\x43\x63\x5f\x09\x64\x5b\x4e\xa8\xe1\x4b\x0f\xe0\x9f\xeb\x01\x0c\x7c\xad\x0e\xe0\x9f\x9b\x01\x56\x21\x0a\xa7\xe3\x69\x88\x30\xa3\x19\x58\xf0\x56\x87\xf9\xe2\x78\xed\x20\xb0\xe9\x78\xee\x0d\xc9\xca\x21\x3e\x17\x73\x69\x19\xe4\xb3\x19\xa4\x3c\x31\x0f\x9b\x7c\xc4\xd3\x15\x5e\x64\x65\x6f\xa7\x2b\x5d\x1e\x37\xbb\xc4\x2b\x5d\x9a\x21\xe3\x7e\x54\xeb\xd2\xce\x5d\xf9\xe9\x32\x77\x75\x1c\xaf\x2f\xc5\x37\x73\x38\xcc\x1d\x09\xb9\xae\xaf\x94\xc3\x4f\x4d\x13\xae\xe2\xf6\xe0\x36\xef\xfe\x7d\x62\x59\x9a\x3d\xd2\xfa\x41\xaf\x48\x8e\x6f\x33\xb8\x8a\x51\x5f\x42\x9d\xfd\x05\xec\x74\x62\xcd\x1b\x59\xd8\x12\xc7\x3a\x73\xaa\x53\x64\xd7\x37\x79\x6d\xdf\x2c\xbf\x5c\x8a\x79\xed\xd1\x80\xb7\x54\x92\xa2\x0b\x9f\x19\xc2\x5e\x96\xed\x59\x2c\xe0\x20\xee\x74\x9d\x7e\x23\xc1\x72\x2d\x53\xe7\x4c\xaa\xef\x92\x3a\xcf\xdc\x3d\x59\x24\x99\x4b\x5c\x75\xc9\x40\xba\x6f\x62\x34\x3a\x17\xd6\xba\x5a\xd5\x3c\xf1\x4c\x62\xf0\x77\xa2\xfa\x24\x4b\xe6\x38\xed\x8e\xa1\x93\xc7\xc2\x4e\xac\xb3\x6d\xd4\x13\xd2\x53\xf8\xd6\xb9\x46\x1d\xe9\xbe\xef\x25\x55\xf4\xb9\x29\x4c\xb4\x9c\x72\x5e\xd8\x79\x91\xf0\x8f\x56\x6b\x86\xc6\x8e\x67\x45\x0a\x2c\x6e\xb3\xfb\xf7\x97\x93\x82\x95\x7d\x12\x57\x2e\x16\x24\x21\x19\x39\x8e\x61\x7f\x25\xba\xf0\x8d\x26\xa3\xd5\x87\xea\x1d\xbf\x7f\x5f\x27\xbd\xb2\xd9\x38\xb1\x3d\x27\xa1\x0b\x25\xee\x49\x0a\x6a\x1d\x08\x96\x60\x3e\xa4\x76\x7f\xec\xa5\x0e\xc6\xee\x1f\xc4\xe0\xa5\x26\x93\xeb\x7d\x4b\x23\x34\xb5\xfb\x0f\x21\x97\x5a\xfc\x3d\x8a\xd7\x54\x0f\x28\x18\x9b\x0b\x47\xc2\x37\x05\xb8\x37\x0e\x87\x23\xc7\x87\x63\x87\xc1\x89\x93\xc3\x6b\x0d\xc4\x9f\xe3\x7f\xa1\xac\x96\x24\x65\xda\xd7\x73\x13\xa3\x85\x27\xe8\x64\x8d\x2a\xe2\x38\x06\x61\xff\x04\x61\xe7\x20\xec\x39\x08\x7b\x06\x85\x65\x69\x41\xe1\x6c\x89\xa5\x65\x45\x3d\x6b\x7f\x5d\x3d\xeb\xc4\xfe\x89\x7b\x99\x63\xf9\x80\x39\x96\x0f\x98\x3d\xab\xcb\xb5\x27\xb8\xe2\x9f\x29\xf8\x7a\x93\x2a\x86\x4f\xcf\xf7\xaa\xbd\x20\xe1\xe7\x18\xc3\x50\x8e\x53\x38\xc9\xe1\x67\x44\xf0\xf2\x8a\xc1\x24\x22\x56\xc8\xe2\x8c\x5b\x5a\xcb\x0e\xfb\x5d\x67\x26\xc1\x28\xb2\xee\x05\xc4\x3c\x35\x1c\xf4\x71\x5c\xc7\xd1\xf7\x66\x64\x5f\xcd\x86\x48\x57\xb4\x1a\x34\xe7\xb5\x24\x91\xf8\x8d\x6a\x55\xd4\x9a\x08\x48\xec\x19\x10\x34\xa4\x7e\xa6\x04\x7d\x7d\xe8\x57\xf4\x39\x29\xa2\xb0\x0f\xab\xcf\xf7\xd7\x94\x2d\x6b\x5d\x67\x2c\xd0\xed\x7d\x36\x18\x41\x23\x00\xac\xd3\x56\x11\x85\x0b\x92\x80\x4f\xc7\xc5\xc4\xca\xb2\x20\xe5\x26\x7c\x55\xc8\xaf\xcc\x01\xb9\x50\x82\x53\xf7\x7e\xb4\x56\xe3\xf0\x0c\x5e\x2d\x2a\xeb\xa1\x38\xb9\x80\x1d\x77\x46\xce\x04\x1c\xa9\x97\x1e\x8a\x5b\xfe\x39\x24\x45\x7a\x6e\x25\xba\x95\x89\x7e\xa9\xfe\xa5\x13\xfa\xa2\xdd\xe5\x73\x0c\x2b\x29\x7d\x0b\x28\x99\x60\x61\x49\x6a\x1e\x2a\x33\x03\x2b\xc9\xae\x96\x9e\x58\xff\xac\x52\x11\xeb\xdf\x26\x7c\x40\x3d\xdc\xa8\x62\x96\xfe\xbb\xbb\xa2\xcf\x90\x46\xba\x58\xb1\xd3\x4d\x89\x6f\xd0\x78\x89\x97\xf7\x35\x5e\x6e\x02\x92\xc9\xf6\x29\x28\x24\x7a\xbf\x88\x44\x00\xaa\x36\xcc\x98\xca\x23\xb0\x8e\x4f\x8e\xde\xec\xbf\xb2\xe0\x46\xa2\xcc\x8c\x40\x7b\x48\x2a\x7d\x9e\x5a\xbe\xfd\x58\x89\x1f\x87\xc4\xe2\x41\xa4\xad\xd6\x57\xcb\xd5\x36\x93\xca\x55\x34\x81\xdc\x95\x6b\xcf\x8f\x00\x0c\xb0\xc9\x8b\x13\x14\xad\x09\x70\x2f\x14\xef\x82\x82\x77\xe5\xcc\x31\x83\x50\x48\xb2\x90\x42\x10\x92\xfd\xa2\xb4\x7d\xd4\x3e\x6d\x0d\x21\x09\x9f\x59\x14\xe6\xe4\x0a\xa2\x25\x20\xc1\x1f\x45\x38\x85\xd9\x7f\x0d\x7c\x2d\xb3\xf6\xc8\x91\x82\xe4\x3d\x09\xf7\x46\xea\xdf\xe2\xff\x58\xcf\x47\xbf\xa7\xf6\xea\x25\x58\x34\x4b\xbd\x32\xbf\x2f\x42\x1d\xe3\x43\x52\xc8\x69\x14\x4e\x63\x97\x24\xa9\x8b\xfa\xa7\x33\x01\x2f\x39\xbc\xd6\xd1\x8d\x02\x9e\xa7\x70\x68\x2e\x5f\x70\x88\xcc\xe5\x47\x8e\xa3\xe0\xf5\x07\x0e\x6f\x8b\xeb\x9f\x1c\x76\xcc\xa5\x4e\x7e\xd7\x96\x8f\x1e\x79\x92\x05\xfd\x4a\xdb\xbc\x22\x10\xcb\xb5\xb1\x46\xe8\xf8\x92\xa0\x6c\xf5\x3e\x76\x5f\x0a\xd8\x45\x8f\x84\x47\x0b\x38\x8b\x8b\xc2\x8a\x3f\xf5\xd5\xf6\x02\xde\xc6\x77\x29\x1d\x74\x26\x3a\x1c\x8f\xfb\xc0\xdd\x1d\x1d\xb5\xeb\xce\xc8\xdb\x58\x3b\x1e\xf9\xa9\x2e\x99\x6a\x5d\x8b\x34\xc8\xf1\x21\x0b\x58\x8a\x99\xb7\x94\x6c\x38\x56\x44\xd9\x51\x04\xdc\x6f\x7b\xea\xa2\x91\x05\xbe\x78\x32\x6d\xc9\x5f\xc6\x6b\xce\x10\x8d\xd8\xde\x95\x32\x06\xbd\xeb\x74\xb1\x20\x5a\xd6\x7e\x1b\x2b\xa0\xad\xbd\x34\x68\x84\xaf\xcc\xcc\x4b\x73\xf7\x8c\x93\x62\xc6\x97\x52\x5e\x3b\x0f\x1e\xc4\xa9\xcf\xe2\xcb\x34\x93\xce\xd3\xe1\xd3\xad\x07\x56\x4d\x62\x7f\x1e\xc3\x1b\x1d\xa4\xdc\x77\x7f\x69\x23\xd9\x3e\xda\x96\x64\xa5\xcd\x8d\x52\xf0\x4e\xd0\x45\xa4\xf8\x9c\xe9\x2a\x2e\x0a\x56\x6f\xc5\xab\xb7\xb2\xd5\x5b\xde\xea\xad\x70\xf5\x56\xb4\x7a\xab\x05\x21\xb6\x50\x2e\xb6\x7a\x2b\x5f\xbd\xd5\x52\xa9\xb3\x85\xe9\x11\x6b\xf8\x20\x25\xbc\xc5\xe0\x6d\x3b\x29\x78\x8f\x9d\x08\xbc\xa7\x0e\x03\x3f\x72\x24\xf8\xb1\xc3\xc1\x4f\x1d\x01\x7e\xee\x78\xe0\x4f\x9d\x1c\xb0\x36\xab\xff\xd2\xf1\xc1\x3f\x75\x32\xf0\x3f\x3b\x01\xf8\xe7\x4e\x5f\x49\xdf\x53\xf0\x0e\x9d\x70\xd1\xf8\xdf\xb2\x39\xb3\xd8\xa0\x7b\x23\xf0\xde\xa1\x81\xaa\xcd\xdc\xb9\x5f\x33\x6d\xf6\x8b\x87\xb4\x3d\x8b\xcc\x48\x9c\x82\x04\x9f\x52\xc2\x29\x49\x29\x99\xa4\x54\x33\x9f\x24\xa7\x84\x51\x92\xa7\xd5\x7f\x09\x25\x3e\x25\x86\xc5\x7c\x29\xdc\x9f\x89\x1a\xfb\x4b\xe7\x59\xac\xd8\x78\xb1\x41\x2c\xc7\xda\xb8\x2a\xeb\xb7\xc0\xeb\x78\x8d\x17\xab\x11\x6f\xa4\x73\x42\x24\x9c\x90\x4a\xb4\x39\x8c\xbb\x99\x22\x5f\x91\x8b\x7d\xcf\xa4\xd5\xf9\x11\xbb\x87\x31\xb1\xfc\x98\x65\xd9\x3e\x9b\x70\x8b\xc2\x87\xb8\x88\x94\xf2\xae\x94\x88\xf5\x22\x76\xcf\x13\x62\x05\xd1\xd4\xa2\xf0\x52\xff\xc8\xae\x59\x62\x51\xf8\x18\xbb\x9f\x12\x78\x15\xbb\x33\xf2\x42\x31\xf2\x88\x3a\x5f\x9a\xab\x8f\x31\xb1\xde\xa7\x2c\x88\x92\x0b\xdb\xb6\x2d\xfa\x55\xe7\x63\xf9\x14\xbb\x89\x80\xf3\x0e\xc9\x4f\xa6\xa7\xd7\xd7\x5c\xec\xb0\x8c\x13\xba\x80\x77\xff\x8a\xe9\x48\xdb\x8d\x0a\x09\x7e\x59\xd7\xfe\x2a\x6e\xa2\x8b\x13\x56\x1a\x8b\x92\x00\x3f\xca\xcb\xa5\x4c\xd5\x67\x89\xe0\x8e\x99\x80\xa4\x7e\x30\xb2\x28\xf0\xc0\xcd\x05\xe4\x41\xf7\xc2\xf3\x00\x12\x30\x23\x08\xf4\x02\x63\x81\xcb\x04\xf8\x41\x8b\x2e\x31\x19\x5b\x21\x1b\x4c\xa2\x24\xcf\x2c\x47\x5d\x5e\xc7\x79\x66\xd5\x2a\x74\x06\x6a\x89\x4f\x11\x19\xfd\x88\x89\xe5\xc9\xa4\xe7\xc9\x64\x90\xe6\x32\x8e\x12\x3e\x88\x92\x30\xed\x79\xa9\x08\xb8\x18\x0c\x7b\x13\x31\x18\xf5\x26\xde\x60\x84\x74\x99\x05\x60\x4d\x98\xb8\x88\x92\x41\xcc\x43\x69\x81\x35\xd8\x12\x7c\xa2\x76\x48\xef\xa0\xc4\xc1\xd5\xb0\x21\x43\x85\xf2\x57\x0a\xfb\x45\xa5\xc7\x00\x01\x46\x46\x32\x56\xc0\x12\xe9\x15\xc8\x63\x8b\x42\xa8\xaf\x99\x45\xc1\x0b\xb4\x17\x5e\xe7\x62\x3c\xe7\x45\x5e\xb5\xce\x2e\x87\xa1\xe9\x42\x76\xc9\x5f\x05\x72\xb6\x00\xaf\x32\x75\xf9\x15\x03\x81\x83\xb5\xb5\x4f\x92\x7a\x4e\x7e\x93\xa5\x02\x19\x3e\xa6\x19\xbe\x67\xc2\xf5\x02\x92\x63\xd9\x86\xb1\x3e\x50\xcc\xee\x8f\x09\x46\x53\x94\x55\xb3\x12\xd7\xa4\x70\xbf\x96\x24\xa7\xfa\xb2\x2f\x89\xdc\xb0\x7a\x6a\xeb\x29\xc5\x6a\xb3\xad\x7d\xb0\x87\x68\xeb\x21\x28\x16\x70\xa8\xbf\xb5\xf1\xb6\xbe\x24\xc5\x44\x74\xf1\x9f\x1c\x5f\x56\x1b\x49\xbf\x60\xc3\x8c\xb6\x74\x5f\xdf\xc3\x5c\x25\x81\xdb\xca\x80\x14\x49\x84\xec\xec\x3a\x8e\x24\x79\xf0\xcf\x6c\xe3\xc1\x85\x12\xce\x02\xb3\xc7\x4c\x5c\x70\x69\x51\x98\xea\x8d\x95\x81\x45\xa1\x6f\xae\x2f\x2d\x0a\x13\x73\xad\xb8\xb9\xcb\xa0\xdb\x92\x9c\x60\xc0\x84\xcd\x86\x14\x7d\xc5\x0b\xd0\xdd\x93\x77\x81\xdd\x82\xc7\x6f\x05\xce\xe2\x8c\x28\x08\xdf\xb4\x34\x9c\x82\x42\x3c\xa8\xc4\x57\x20\xeb\x14\x2f\xbc\xf5\x4d\x8a\x03\x64\xa4\x9c\x69\xd7\xfb\xd4\x41\xec\x7e\xdd\x82\xc2\x85\x5e\x95\x58\x61\x85\xeb\xa5\x55\xe1\x50\xca\x20\xdd\x39\xf0\x45\x3d\x07\xbe\xe2\xd4\xc7\x07\x45\x69\x53\x2c\x6e\xe5\x1c\x60\x92\x08\x23\x11\xd6\xaa\x5e\xa0\xce\x43\xc0\x01\xd9\x07\xcd\x99\x53\x98\xad\x3d\x1c\xcd\x6a\x7b\x49\x19\x47\x14\x3c\x53\x5c\xd4\x2c\xd0\xfe\xfc\x36\xa7\x45\x55\xa0\xb9\xd9\x71\xe6\xe1\xe9\xbf\x09\xdc\x84\xc1\x5e\xd0\x15\xb8\x06\x89\x7b\x42\x08\x77\x67\xe4\x3a\xd0\x02\x1f\x97\xf0\x51\x92\x5a\x49\x0f\x5a\x4f\x45\xdd\x89\x0b\x7e\x94\xb8\x20\xa1\x98\x8f\x3a\xc4\xca\x04\x97\x4a\x6e\xc0\x80\x61\xaf\xc6\xa5\x5d\x04\x7a\xe7\x14\x9e\xbb\x4e\xb3\x48\x73\xa8\xc8\xe7\x47\xbe\xa5\x61\x8e\xc5\xd1\x45\x32\x88\x24\x9f\x64\x03\x0c\xd0\xee\xc5\x51\x26\x07\x3a\x69\xb9\xba\x5d\x01\xe0\xb5\x42\xa0\xde\x60\xbb\x02\xc1\x17\x71\x01\x12\xb3\xc1\x68\x88\xad\x9b\xbd\x60\x10\xc6\xfc\xa6\xb7\x32\x70\xf1\xd8\x0f\x25\x05\xc2\xf0\xcf\x43\xf4\x00\x7c\xab\x4e\xc2\xa7\x56\xbf\x82\x77\x68\xaa\x78\x82\x65\xa7\x50\xbe\xb9\x0c\x74\x52\x0f\x47\x41\x9a\x45\x81\x60\x26\xc8\x47\xd4\xee\x8f\xf5\x1d\xe7\x27\xd3\x5e\xf7\xaf\xf1\x1d\x27\xb9\x5a\x9b\x43\xdb\x0b\x6a\x5d\x66\x24\xbc\xdb\x31\xe8\x49\x7e\x23\xf1\x56\xf7\x99\xe3\x73\x3e\xc8\x62\x96\x5d\xb6\x1c\x84\x52\x42\x37\xf4\xfe\x82\x08\x0c\x90\x2a\x8e\xff\xbc\x1c\x0b\xa1\xa8\x57\x2e\xe1\x08\xc7\x41\xed\xd3\x19\xd3\x1b\xcc\xde\xd2\xea\xb3\x0d\x8f\x81\x1d\xde\x62\xd0\x81\x3e\x6f\xdf\x83\x5b\xa2\x31\xa4\xed\x8d\xbb\xdd\xda\xb5\x53\x69\x05\x3d\xd5\xe6\xaa\x25\x42\x80\xa8\xd3\xcf\x62\x49\x5e\xd6\xbb\xe9\x35\x9b\xe4\xf8\xdd\x86\xc2\xe6\x19\x17\x83\x8c\xc7\xdc\x57\x14\x36\x4a\x22\x19\xb1\xb8\x6c\x1d\x4c\xd2\x9f\x83\x5b\xba\xcc\xb8\xf7\x3d\x92\xb7\xf4\x32\xdb\xe6\xa7\xb1\x92\x6b\xac\xff\xf5\xd0\xf3\x87\x41\x89\x2e\x3f\xc6\x44\x6c\xfc\xc3\xb5\xfe\xb1\x91\x6c\xfc\xc3\xfa\x07\x6e\xc8\x6d\x08\x51\xe3\xc1\x7d\x46\x0e\x88\xd6\x4f\x42\x1a\x10\xeb\x25\xca\xd3\x3d\x6f\xde\x93\x97\x51\xd6\x8b\x99\xc7\xe3\xda\x5b\xac\x8d\x62\xc3\x17\x20\xa9\xd3\xb2\x44\xea\x35\x19\xf7\xd3\x24\x60\x62\xbe\xba\xa2\x6a\x8c\xfd\x54\xf6\x70\xc1\x4b\xf0\x01\xe6\xb2\xdf\xbf\x31\x2b\x6d\x8e\x81\x45\xee\xfa\x53\xf3\xb4\x3a\x35\x7e\xa0\xc3\x7c\x20\x77\xb1\x40\x14\xe4\xee\x08\xb3\x82\x5f\x29\xda\xb8\x61\xf5\x10\x07\x59\xce\xd2\xcf\xcc\x82\xdc\x6d\x4c\x7e\x12\xeb\xa9\x0e\xcb\x35\x9f\x5d\x46\x92\x0f\xb2\x6b\xe6\x73\x0b\xac\x24\x9d\x09\x76\x5d\xfb\x8e\x5c\xcf\x7d\x09\xa4\xf6\x9b\x98\x63\xe2\x0d\xb6\x0c\xc0\xfb\x12\x04\x9c\x10\x1f\x0b\x6a\x01\x1b\xcf\x48\x54\x6e\x50\x85\x95\xcc\x14\x8a\x23\x32\x23\x7b\x01\x56\xeb\x46\x8e\x40\x1f\x11\x7d\x1e\x9e\x07\x6b\x54\xc9\x11\x60\x00\x19\x84\x29\x84\xa8\x45\x38\x0e\xea\x7e\x1a\xfa\xe4\xe8\x78\x9a\xc4\x54\xf2\x49\xea\x2a\xfc\xfa\x27\x5c\xc7\x83\x87\x66\x42\x27\xb2\x0b\x77\x87\xe4\x7b\x80\x07\x32\x44\xe5\xff\x25\x6a\xe0\xdf\x60\x9d\xee\x05\xca\xe0\x75\x07\xef\x37\x90\xb8\x21\x0a\x3f\xaa\xe7\xb3\xe5\x21\x66\xe4\x79\x80\xb9\x7e\xf0\x03\x86\x6a\x80\x8a\x95\xb7\xf6\x53\xbd\x83\x1a\x80\xb2\x5e\x98\xe6\x49\x80\xbe\xc7\x4c\xdc\x22\x0c\x7d\x08\x8d\x30\x74\xa5\xf8\x71\x62\xf9\x97\xdc\xff\x8e\x27\x79\xc7\xf0\xf7\xc9\x75\xae\xf8\xa0\x37\x86\xb2\x6b\xd8\x87\x83\xa0\x72\x05\x34\xbc\x12\x94\x0f\x2b\xd8\x4d\x29\x7c\x33\xac\xd4\xfc\x5a\xd1\xcb\xa3\xa0\x5b\x9a\x2b\xe8\x96\x5a\xd9\x84\x4d\x91\x84\x54\xe8\xe6\x4d\xd9\x36\x91\x0a\x1c\x15\x4c\x6a\x8d\xea\xc0\x4f\x13\x29\xd2\xb8\xfc\xa9\x26\xe0\xa5\x37\xd5\xb3\x3b\xf8\xec\xb7\xc0\x7c\x19\xb6\x21\x11\x5c\x1e\x60\x50\x7c\xe6\x55\x50\x95\x81\xa3\x14\x3e\x30\xad\xc6\x97\xf0\x0e\xf3\x82\x03\x56\xcd\xae\xce\xc7\xca\x28\x41\xe4\xa3\x96\xe5\xf6\xbe\x01\xcf\x7c\x11\x5d\x23\x81\xae\xce\x4f\x62\x30\x89\x06\xe7\xcf\xc1\xfa\x18\xbd\xee\x55\x33\x15\x51\xea\xef\x57\x5d\xe2\x28\xf9\x5e\xe7\x2a\x79\xc5\x23\x1a\x94\xca\xfc\xef\x0a\x8a\x92\xc0\x02\x4b\x0a\x96\x64\xd7\x4c\xa0\x96\xd2\x9c\xff\x30\x4d\x34\x2a\xbe\xe4\x22\xaa\x6e\xfb\xb9\xc8\x10\x09\x5f\xa7\x51\xa2\x75\x9c\xba\xc1\x60\x57\xc4\x15\x09\x37\x8b\x5f\x4c\x45\xa3\x5b\xb4\x67\xe0\x64\xf4\x57\x9f\x04\x77\x4c\xf3\xba\xaf\x61\xb6\x74\x17\xa2\x70\x1a\xb8\xff\xe0\xc9\xd4\xad\xeb\xe7\xfe\x01\xef\x35\x20\x46\xaa\xc7\x6e\xe0\x3e\x81\xb3\xc0\x1d\x6d\xc1\x4f\x05\xc3\x07\x52\xb3\x5f\x37\x12\xa6\x12\x43\xa2\xe1\xed\x1d\x04\xd9\x51\x29\xc8\x7e\x69\x3b\x09\x3a\x8f\x90\x71\x8e\x7d\xad\x7a\xa4\x29\x58\xdf\xf9\x7c\x27\x0d\xb8\x05\x98\x7e\x1b\x4f\xa7\x09\x45\x8b\xca\xb8\x31\x2f\xac\xc7\x8e\xc9\xb0\x8a\xee\x3a\x0c\xca\xe8\x2e\x66\xca\xad\xff\xd0\x87\x32\x9b\xb0\x58\x1d\xca\x0f\xfa\x3b\xf5\xcb\x29\xbc\x08\x3a\xb3\x5f\xfb\x67\x5a\xba\xbb\xd2\xb9\x53\x22\x2c\xa6\xc8\xde\x81\xef\x1e\x2a\x89\x10\x52\xf7\x94\xe9\x83\xf0\x2e\xd4\x89\x65\x45\x80\x69\x93\x7c\x6d\x6f\xf8\x19\x28\xaa\x12\xba\x17\xc4\xc7\x25\x33\x35\x11\x3e\x32\x40\x7e\xd9\xd4\xa0\xb0\x2c\xc7\xb7\xfb\x63\xeb\x92\x65\x83\x80\x25\x17\x5c\x58\x0e\xfe\xc8\x72\xdf\xe7\x59\x5d\xa6\xaf\x30\xab\x48\x67\xbd\x24\x1d\x5c\xe4\x52\x72\x91\x75\xf0\x94\x27\xc6\x41\xdf\x57\xef\x6b\x50\x17\x3f\x8d\x7b\xd6\x86\x28\xa5\xfd\x28\x19\xcc\xa2\x40\x5e\x5a\x20\xc7\xd6\xd6\x70\x78\x7d\x63\x39\xd6\x26\xfe\x6d\xe1\x6a\x5b\x5f\xaf\xce\x2c\x4f\xe4\x20\x93\x82\x4b\xff\xb2\xed\x39\xf5\x56\x44\x22\x03\x63\xfb\x59\xc6\x40\xef\x83\xf6\x1a\x84\x78\x1c\xc2\x54\x94\x78\x01\xb7\x51\x6d\xc1\x3b\x46\x1a\x41\x9e\xea\x4c\x9f\x05\xb5\x70\xd8\xd6\xed\x29\x8a\x2d\xed\x06\xd4\xd4\x64\xba\xe7\xba\x79\xf1\xd0\x0b\xa6\xfa\x3d\x4b\xdc\x03\xc2\x20\xaa\x85\x97\x19\x5f\x83\xfe\xf8\x5d\xe8\xcc\xc8\x49\x00\xf7\xd0\x06\x68\xb3\x92\xd2\xbc\x0b\x15\xd1\xf2\x49\x47\xf9\xd3\xdd\x80\x8e\xd5\xe8\x23\xea\x60\xcf\x73\x46\x5e\xb2\x16\x34\x58\x5b\xa4\x81\x27\x93\x6a\xa1\x56\x39\xb3\x6b\x11\x4d\x98\x98\x5b\xea\xa4\x93\x90\x42\xda\xc2\x72\x29\xae\x4e\x8e\x9b\x3b\xe1\xa7\xf1\x80\xe5\x32\xed\x35\xde\xa6\x88\xc7\x66\xdb\xf6\xb5\x6e\x5d\x78\x1b\xaf\xf8\x9e\x91\x57\xcc\x60\xaf\x56\x61\xc1\xe3\x71\xac\xa5\x85\x41\xba\x22\x2f\x98\xd8\x88\xda\x97\x54\xbc\x0c\xcc\xc8\x8f\x6a\x2c\x05\x1f\x8a\xcd\x5e\xe2\xb5\xab\xc5\xd8\x41\xca\xd2\x33\x40\x05\x3d\x6e\x5f\xd8\x56\x3b\xb3\x8b\x04\x00\xd9\xd1\x89\x81\x6d\x8f\x65\x1c\x31\x34\xe2\xe2\x97\x8c\x9c\x06\xb4\x1a\xfb\x34\xa8\x66\x67\x4a\x43\xbe\xbc\xab\x86\xf0\x63\x9b\x72\xaf\xd0\x88\x50\x3a\x2e\x33\xa0\x59\x2b\xca\x3d\x35\xe5\x40\xa4\xd7\x41\x3a\xd3\x87\x1f\x55\x71\x80\x48\xe9\x25\xc2\xa6\xa8\x4d\x52\x18\xc2\xf1\x6a\x2d\x8b\x51\xb1\x11\xe1\xa3\x5e\x10\x79\xbd\x89\xb7\xd9\x9b\x88\x56\xe9\xd5\xe7\x9a\x88\xad\x65\x23\xae\x02\x22\x90\x45\x90\x2d\x60\x7e\x5d\x03\x34\xb5\x47\x46\x97\x68\x66\xfa\xe9\xae\x24\xee\x5c\x91\xab\x77\x81\xbb\x3d\x84\x64\xaa\x88\x96\x98\xba\x5b\x4f\x40\x4e\xef\x58\xf5\xb8\xf0\x61\x29\xcb\x1e\x5f\xe8\xc2\x84\x25\x16\x51\x02\x33\xad\x17\x3f\xe6\xd3\x95\xe2\xc7\x05\xd5\x60\x5f\xd0\x6a\xec\x7d\x47\x2d\xa2\x17\xa3\x1a\xd1\x97\xe8\x73\xcf\xde\xa1\x9f\x7b\x10\x82\xa6\x0d\x88\x95\x3a\xc9\xa8\x9c\x16\x3a\x8d\x53\x89\xf9\x43\x73\xf4\x14\x15\x63\xe9\x39\xc5\x0e\x4b\x6d\x5c\x0d\x42\x25\xd4\xff\x0b\x23\x72\x1c\xad\x8d\xb4\xdc\x82\x9f\x9b\x55\xdb\x3a\x70\xb3\xec\xc2\xcd\xef\x2a\xdc\x1c\xfa\x24\x2a\x31\xb1\x98\xd6\xef\xa7\xe5\xfd\xa4\xba\x2f\xc6\xf8\xa1\xd2\x64\x71\x7f\xa9\xb1\x79\x8e\x2a\xbc\x62\x35\x8a\xc7\xce\xeb\x88\x5d\x16\x23\x44\x7e\x89\xd8\x7f\x06\x84\x61\x78\x40\x85\xdc\x4d\xa8\xa9\x5e\xde\x4f\x06\xb9\xb3\x1a\x72\x97\xde\x5a\xe4\x7e\x1e\xd0\xb1\x7a\xc3\x88\x3a\xd8\xf3\x9c\x11\xdf\x47\x6d\x4c\x8a\xef\x85\x19\xc9\x91\x39\xf4\xf3\xcc\x02\x8f\xab\x3d\xa5\xe0\xf9\x24\xc5\x87\xfe\x83\x64\x40\x20\x16\x48\x68\x3b\x29\x58\x50\xc8\xa7\x9d\x5c\x8f\x81\x5f\x5f\x42\xee\xde\x43\xc1\x5a\x42\x62\xfb\xaf\xa8\x12\xb3\x7f\x84\x85\x87\x22\x93\x44\x52\xc5\xbc\xe4\x6b\x58\x17\xcc\xb3\xd4\x22\xd2\x62\x68\xc9\xab\x00\xac\xbd\x04\x55\x3c\x9a\xeb\xd7\x92\x19\xfa\x7e\x08\x90\x7e\x3d\x46\x49\x8c\x2b\x21\x59\xc2\xdf\xe5\x84\xb2\x6e\x4e\x88\xb5\x70\x42\x4d\xfe\x87\x4f\x51\xf4\x05\x5c\x86\x9a\x86\x12\xb8\x92\x21\xee\x4e\x89\xde\x2b\xf1\xb0\xf7\x9d\xcf\x7b\x61\x2a\xca\x8f\x2e\xf4\x0b\x46\x27\xfd\x1f\x1a\xee\xdf\xa2\x73\xbe\xbf\xac\x88\xad\xde\x59\xbb\x6b\xd8\x0b\xc2\x5d\x92\xbb\x09\x55\x18\x2f\x71\x73\x85\xff\xea\xab\xac\xf8\x0c\x3f\x9d\x5c\xc7\x5c\xf2\xc1\x84\x27\x79\xcf\xda\x20\x24\xb7\xd9\xe6\xef\xdf\xb9\xed\xed\xd0\xfb\xf7\xd5\xd1\xb3\xb2\xcb\x74\xa6\x68\x9d\xc2\x76\x3e\xe1\x78\x6e\x28\x30\x7d\x39\xa2\x2d\x4c\x49\x49\x01\xd5\xa8\x95\xfa\xe3\x23\x22\x58\xad\x03\x2c\xe8\xb2\x5a\x59\xa9\x44\x8a\xa9\x71\x89\xf0\xa7\xeb\x89\x6f\x2b\x91\xad\x24\x43\xa7\xad\x7d\x9d\xb4\x77\xca\x48\xec\x13\x61\xfb\x47\x2b\xfa\x9f\xa4\x20\xd3\xb6\x37\x29\x2c\x69\x53\x53\x82\x28\x5a\x9d\xa6\xd0\x48\x3f\x51\x68\xbf\x2a\xef\xda\x59\x8a\xef\x46\xde\x52\xd0\x31\xb1\xfd\xa3\x45\x29\xe4\x7d\x94\x3a\x20\x1e\x83\x19\x9e\xad\x91\xa4\x15\xab\xa8\xb6\x16\x3f\x3a\x15\x60\x49\xe6\xbd\x49\x02\x7e\x83\x85\x59\x46\xb4\x58\x8b\x9a\x96\x5d\xf0\x98\xe9\x05\xec\x10\x7e\x97\xb7\xf8\x94\x91\x74\xaa\xa9\x8c\x56\x6c\x88\xc1\xf6\x9a\x25\xae\x40\xf4\xc8\x38\x8a\x39\x3d\x6b\x43\x7b\x29\xa1\x37\x89\xa8\x61\xc1\x70\xda\xed\xfd\xe2\xbd\xfd\xfd\x5b\xd8\xde\xce\x58\x01\xb6\x74\x05\x55\x88\x11\x6b\x2b\xbd\xc6\x1c\x90\x9a\x86\xa3\x53\xde\xf7\x7f\x89\x90\x4b\x43\xc8\xfd\x92\x6c\xcb\xbb\x3f\x2f\xf5\xb3\xeb\xb6\x02\x57\xa8\xb7\x7a\xde\xf0\x64\xe1\xa9\x0a\xaa\x53\x15\xe8\x53\xf5\x6f\xec\x52\xc5\x19\x34\xcb\x51\x2a\x36\x80\x23\x7d\x63\x53\x24\x86\x7d\x7f\x85\x29\xa8\x51\xcf\x77\x01\x1d\x4f\x7d\xe2\x53\xc7\xb0\x03\xea\x17\xd3\xbf\x92\x29\x1d\xc7\xbe\x96\xb5\x39\x74\x25\x03\xd4\x30\x9c\x53\x4a\x9d\x1b\x6f\x61\x60\xa6\x0e\x2e\x26\x00\xdb\x02\xcb\x8b\x53\xff\x7b\xa5\xae\x35\xf8\x7e\x34\x1c\xfe\x5f\x95\x52\xaa\x03\xc5\xf4\x96\x7e\x0d\x44\x74\x71\x29\x2b\xb4\xe3\x4f\x95\x58\x2a\x35\xbe\x71\x66\x24\x9a\xa2\x43\xb9\xf7\x93\x56\xc6\x65\x60\xe0\x2f\x28\x78\x6b\x68\xf0\x6b\xa4\xc1\xec\xa9\xb6\x44\x7f\xd3\xa6\xe8\xb9\x8e\xdb\x3c\xd1\x61\x31\x7b\x3a\x8f\xda\x2e\x9a\x27\xfc\x74\xbd\x32\xd9\x67\x22\xe8\xd5\xc9\x6f\xb3\x71\x70\xc9\x59\x50\x67\xe6\xa3\x3a\x80\xf5\x14\x90\x49\xe6\x65\xbd\x5a\x5f\xbc\x51\x3c\x70\x43\x3e\x07\x30\x04\x5f\xa3\x90\x03\x86\x45\xe7\xcc\x49\x7c\x69\x3c\xed\x14\x99\xc0\x7e\xa3\xd6\x7e\xc2\x66\x17\x63\x0b\xfd\x3f\x7b\x44\x73\x04\xd4\x72\xf4\x8d\x42\xdc\xfb\x14\x2f\x55\xb5\x9b\x91\x70\xaa\x56\xf7\x06\xb3\x3f\xcc\xc9\x51\x00\x85\xc0\x18\x58\x20\xec\x13\x38\x62\xc5\xfd\xca\xf0\x04\xc2\x3e\x86\x6f\xac\xa0\x5c\x2b\x4b\x61\x80\x43\x7f\x98\x3f\xae\xde\x2a\x8a\xb7\xe6\x53\xe0\xa0\xe6\x8b\x7b\x5c\x34\x27\x45\xf3\x8b\x00\x7e\xf9\x67\xce\xbd\x21\x42\x63\xfd\x35\xfb\x5d\xec\x5b\x21\x33\x57\x06\x90\x15\xdf\x8c\x2d\x4d\x40\xb4\x1d\x63\xa4\x04\xa0\x7b\xc9\xc2\x50\xc4\x64\xbc\xce\xf0\xbd\x55\x17\xa8\x77\xd2\x38\x66\xd7\x19\xef\xb1\x38\x36\xba\x70\x8b\x7e\x75\xd6\x18\xb2\x97\x1e\xd7\x8e\x79\xcb\x0f\x17\x1f\xf8\x2e\x86\x39\x39\x0e\x20\x85\x48\xf1\x49\xd2\xa0\xd9\x6c\x8a\x0a\xb8\xcb\x91\x45\x21\x9e\x2e\xb9\x02\x65\xd3\xca\x15\x28\x49\x65\xa1\x94\x37\x23\x06\x53\x53\x1b\x78\x3a\x35\xc9\xf5\xfa\x78\x31\x1a\x2e\x60\x82\x57\x4f\x16\x70\x89\x17\xdb\x0b\xb8\x98\x76\x9a\xf9\x6a\x62\xdf\xf0\x4f\x17\x73\x2d\xeb\x3c\xcb\x8a\x03\x37\x07\xe7\x54\x92\x5c\x1f\x52\xdf\x4d\x20\x75\x05\x44\xae\x84\xd0\xe5\xe0\xb9\x68\x82\x61\x90\x53\xc8\x5c\x93\x4e\x55\x60\xad\xce\x67\xba\x2c\x79\x0a\xd2\x8d\x80\xbb\x21\xe4\xae\x07\xcc\xcd\x30\xc2\x7c\xba\x36\xe6\xa5\x17\x92\x8b\x29\x3a\xc3\x9f\x4b\x2c\x34\x82\xd1\xb5\xfb\x30\x27\x87\x19\x26\x67\xd3\xa9\xe4\x66\xd3\x2e\x07\x27\x53\x64\xa9\x51\x2d\x6f\xb4\xbd\x3d\xa4\x74\xa5\x9e\x4e\x23\x38\x79\xbb\x59\xa9\xa8\x11\x91\xfe\xa8\x11\x8f\x35\x6c\xd6\x27\x6a\x3a\x4b\x6d\xa2\xf1\xbf\x93\x9e\x62\xd0\xff\x9f\x8f\xc7\xc2\x99\x91\xf9\x54\x9d\x20\x0f\xeb\x43\xc1\x8c\x7c\xc6\x20\x47\x44\x39\xfa\x9e\xd1\xa0\xdc\xe0\xbe\x6e\x2d\x60\x6f\x7a\xab\x87\xdb\xef\xdf\x26\x1e\xc6\x38\xab\x2f\xe5\x6a\x5c\x50\xf8\xbe\x34\x84\xc4\xea\x19\xc6\xc0\x7b\x41\xe6\x3e\x49\x28\xcc\x7d\x05\x0c\x63\x0b\xad\x97\x93\x34\x91\x97\x8a\x0d\x85\xbc\x5b\xcb\x5d\xe6\x00\x4d\xca\xb4\x94\x82\xae\x4d\x8d\x7a\x2f\xb1\xfb\xf7\xef\xab\xa9\xce\x48\x8e\x91\x4a\x9c\x8e\x85\x63\x59\x0b\xcd\x0e\xe0\x84\x23\xb0\x7a\x5c\x1d\x02\x54\x15\xe0\xad\x1f\x60\xf5\x26\x69\x9e\xf1\x14\x1d\xdf\x51\xe2\xc7\x86\x39\x58\x3d\x23\xe5\x40\xd2\x9e\x52\x89\xcd\x71\xd0\xae\x74\x4a\x96\x55\x9a\xd7\x3c\xf4\x2d\x39\x20\x66\x6e\x38\x3b\xb5\x53\x98\x81\x0b\x9f\x17\x36\xfb\xfd\x7b\x13\x53\x63\x7a\xd5\xf3\xe6\xa2\xe7\x71\x39\xe3\x3c\xb1\x16\x84\x16\x1c\xd2\x01\xc1\x44\xeb\x0a\x05\xef\x4d\xa1\xb3\x30\x51\x04\x98\xaa\xa0\x9a\x6c\xab\x62\x22\x60\x92\xf7\x3c\xe6\x7f\xb7\x36\x08\xb3\x99\xfa\xc7\xdb\x48\x68\xab\x4c\xa0\xba\x86\x22\x4d\xa4\xb5\x91\x6e\x90\x68\x83\xf8\x1b\x26\xf7\x93\x92\xc0\xab\xb5\x54\x52\xb8\xe2\xb1\x4d\xd8\x27\xfa\xf7\x4c\x2b\x26\xf2\x4a\x81\xd2\x19\x66\x37\xe6\xb4\xc6\x38\x3e\x9f\xae\xd3\xa1\x2d\x11\xe0\xfd\xc2\x1a\xfb\x7d\x5a\x14\xd1\x31\x83\x1c\x4f\xbb\xaa\x24\x29\xa4\xe5\x12\xee\x4e\x18\x72\x9e\x0a\xa1\x90\xeb\x29\x8c\x46\x80\x29\x79\x39\x90\xdc\x9d\x29\x76\x68\x36\x35\x53\x2b\x93\x0d\x3f\x1e\xe4\xc5\x6c\x21\x77\xeb\x49\x87\x95\x2c\x5e\x3d\x93\x63\xce\xdf\x32\x45\x31\x79\xac\xce\xcf\x78\xe8\x48\xaa\x5b\x41\x2e\xbf\x53\xaa\x7b\x7c\xf9\x5e\xae\x00\x44\x7b\x96\x62\x4c\xa3\xde\xf6\xf9\x14\x18\xec\xd3\x2e\xab\xc5\x55\x9e\xc9\x28\x9c\x97\xd6\x82\xa6\x12\xb1\xe6\x3a\xc3\xf9\xf7\x1a\x57\x82\xcb\x78\xe3\xa3\x09\xe9\x38\x57\x3c\xea\x07\xe4\x54\x4f\x72\xc5\x94\x9e\x29\xba\x6e\x9d\x5c\xe6\x16\x58\x2f\x45\x64\x81\x75\xcc\xa4\x76\x00\x6c\x02\x06\xbe\x9a\x45\x89\xb1\xbf\x55\x00\x91\x4b\x84\x87\xcb\x29\xad\x19\xd1\x9f\x9b\x6d\x63\xc5\xb6\x5d\x21\x5e\x1a\x2e\x60\x67\x6a\x52\xfd\xbd\xd1\xf4\xcd\x53\xe3\x1d\xe8\xeb\x6b\x8b\xc2\xb7\x6e\x20\x51\xd2\x67\xbd\xe8\x2c\x16\x9c\x5d\xdb\xfd\x61\xb3\xfb\x67\xfd\xa2\x4c\x8a\x34\xb9\x50\xcc\xf6\x1a\x23\xf6\xfb\x36\x91\x8f\x24\x76\x7f\xbc\x9b\x3a\x1e\xa7\x3a\xb8\x07\x66\x24\xe2\x65\x55\xb0\x6e\xa3\x78\x51\xd1\xa4\x56\xcc\x84\x58\x7e\x99\xf8\xcb\x4f\x93\x29\x17\xb2\x97\x49\x11\xe1\xcc\x4e\xa6\xda\xf6\xf7\xc5\x78\x78\xcd\x38\x30\x41\x29\xec\x4f\xbb\xc2\xe2\x9a\x30\x23\xa3\x09\xbf\x8e\xfc\xef\x75\x38\xf9\x3c\x2d\x7a\x64\xb9\x77\xc5\x7d\x59\x93\xfd\xe4\xd8\xda\x4b\x02\xcb\xb1\x8e\x0b\x0d\xd0\x32\x14\x5c\xa6\xb9\x68\xd3\xa5\xe5\xd7\x03\xed\xe2\x6b\xb4\xdd\x25\x70\x2a\xbc\x30\x27\xdf\xa6\x20\x61\x08\xa3\x6e\x4b\x06\x4b\x2e\x62\x3e\x30\x8a\xd3\x7d\xf3\xde\x1d\x63\x39\x56\x80\xe6\xc5\xb9\xb0\xf4\x42\xcf\xc8\x11\x8e\xa7\x16\x88\xa2\xb0\x54\xc3\xdf\x51\x48\x64\x81\xc5\x85\xcd\xae\xea\x19\x0e\xad\x21\xfa\xa4\x14\x09\xdb\xb5\x23\x38\x76\xfb\x79\x6b\x37\x42\x29\x7c\xf7\xc9\xa6\x16\x8e\xa6\x11\x9f\x35\x3f\xb5\x90\x7e\x6a\x26\x06\x25\xeb\xdc\x61\x5d\x06\xb7\x2e\x8c\x1a\xa9\x5a\x9a\x15\xde\x3b\x8d\x57\xdf\x50\xc9\xf3\xce\xea\x3e\x2a\x06\x57\xf2\xbf\xbb\x93\xa3\xff\xf0\x4e\x8e\xfe\xfe\x4e\x7e\xb9\xdb\x4e\x7e\x59\xbb\x93\x7f\x7f\xef\x46\xff\xf6\xde\xa9\x13\xda\x01\x4d\x7a\xfb\x48\xd2\x8d\x9e\x74\x04\x59\x97\x54\xaf\xd0\x8d\x65\x39\x33\x72\x99\xc2\x13\x48\x90\xdd\x5a\x18\x24\x25\xc7\x86\xd3\xbb\x52\xff\x44\xe8\xa7\xa1\xae\x7e\xaa\x7f\xe6\x25\xc5\x46\x16\x27\x59\x50\x38\x9d\xb6\x25\xce\x36\xe1\x15\x95\xc9\xb8\xc5\x2f\x6d\xd2\xf3\x2e\x06\x21\x0b\x78\xb0\xea\xa7\xb6\x89\xfe\x9d\x1d\x7e\x7d\x92\xdf\xc8\x75\x4e\x7d\xcb\xed\xad\x1e\x7d\xba\xcb\xb2\x21\xef\x59\xb2\x9c\x71\x77\x55\xa2\xbc\xdb\x94\x6b\x66\x82\xfa\x77\xb7\x6a\x9a\x45\xc5\x00\xbd\xef\xe4\x5d\xdc\x15\x47\xca\x8a\x61\xd6\xb1\xee\xbb\xe4\x2f\x9d\xba\xd0\x43\x67\xc0\x13\x42\xe4\x2d\xfe\xbf\x85\x67\x18\xba\xf2\x4a\xdb\x5b\xe5\x0f\xd7\xfb\xf2\xfe\x3d\x4f\xdd\x9a\x5f\xae\x30\x7e\xb9\xc2\xf8\xe5\x5e\x06\xc8\x14\x94\xfe\xb6\xe8\x98\xdd\xf0\xb7\x55\xb2\x87\x76\x6f\x5d\xf6\x6e\xbf\xbb\x7b\xab\xe8\x76\x6f\x3d\x9d\xa2\x73\x3c\x2f\xf6\x63\x77\xda\x7d\xbc\x6a\xfe\x78\x6c\xa8\x5f\x57\xbd\xe9\xfd\x54\x09\x3f\xda\xbb\xed\x6c\x7a\x97\x58\x9a\x95\x52\x12\x2b\x8b\x68\x16\x4e\xda\xde\xf8\xaf\x19\x39\x98\x16\xdc\xf0\xe7\x9a\xb0\xff\xbc\xc8\x81\xa8\xcd\x14\x4e\x0f\x83\x9a\x3e\x09\x8c\x8a\x2a\x11\xcc\xee\x14\x9d\xda\x51\x3b\xd1\x3e\xce\x7e\x5a\xa5\x53\xac\x19\x50\xcc\xb9\x5f\x16\x82\xf5\x3a\x36\x24\xe1\x5a\xdc\x4f\xe7\x57\x69\xbf\x41\xfc\x77\x30\x63\x22\x41\xbe\xa6\xe2\x36\x70\x0f\x16\x14\x7e\x4e\xbb\x53\x88\xb5\x44\x23\xd5\xa7\x33\xac\x12\x9f\x7c\x8a\xe1\xd8\x87\x19\x39\xd3\xdf\xde\xac\x13\x62\xa2\x92\x9a\xf2\xf7\xab\x18\x73\x6a\x4d\x5d\x6b\x22\x07\x0f\x2d\xf8\x32\x45\x2f\xa7\x4b\x39\x89\x5f\xa6\x18\x4c\xbb\x6a\xb1\x5e\xfd\xd2\x13\xf2\x76\x0a\xc6\xe9\x7f\x6c\xf5\x96\xec\x7a\x16\x6d\xf8\x1a\x7e\x99\xb6\x7b\xeb\xd4\x38\xb4\x72\x9b\x3e\x98\x8a\x66\x3d\x4d\xc4\x11\x02\x70\xb7\xaa\xdd\x2b\x1c\x87\xb3\x9a\xc3\x87\xd6\xf8\x7d\xc8\x6b\x3a\xb5\x11\xe2\x7e\x58\xa6\xb0\x5a\x26\x2d\x31\xb8\x59\xd9\x92\xe8\xb6\xed\x69\xdd\x64\x3d\x08\x39\x0f\x50\xb2\x6c\xf8\x18\x2e\x88\x39\x5f\x87\x05\xaf\xff\x43\x31\xb3\x6b\xd4\x77\xc6\xf0\x89\x88\xf5\xc8\xab\xb1\x2f\x1f\x85\x26\x90\xcf\x2b\x20\x85\x0f\x77\x3a\xb5\x0a\x61\xf5\xac\x8d\xb7\x85\x5c\xfa\x63\xda\xc8\x87\x81\x3c\x02\x46\xae\x72\xcb\xb1\x4e\xb1\x5c\x5d\xab\xb3\xc8\x24\x1e\x6c\xf6\x56\x0d\xc6\xa7\x8c\x1c\x4e\x57\xbe\x3c\x59\xe6\x24\x96\x9e\x35\x40\x81\x8f\x8a\x25\x9b\x4b\xc6\xa5\x55\x91\x89\x17\xb8\x6c\x4f\x17\xf0\xb2\x50\xe6\x7c\x9c\xde\xe2\x8b\xcf\x6c\x16\x14\x19\xa7\xfc\x5a\x8e\xe2\x36\x85\x80\x76\x49\xd1\x0c\x47\x1a\xe7\x13\x9d\xb7\xdd\x78\x43\xd7\xec\xd0\x3d\x6b\x43\xd6\x81\x77\x15\x8f\x88\x0a\xe7\xa0\x21\xee\x43\x40\x18\x66\xb7\x38\x67\xc6\x86\x91\xaf\x7a\x3a\xf4\x96\x60\xa8\xdc\xfa\x82\x47\x5a\x39\xd9\x77\x9d\xf9\x7f\xcf\x6c\x6b\x13\x2b\x70\x87\xce\x6d\xe5\xb7\x1f\x90\xdb\x57\x57\x43\xc1\xff\xc8\xe2\x16\x00\xb7\x6a\x9b\xb9\xed\x1c\xa7\x05\x34\x2e\x28\xbc\xba\x4d\x6d\x53\x93\x39\x6b\x07\x2e\x24\x1f\xa7\xb0\x13\x80\x11\x27\x41\x49\x2a\x83\xf8\x62\xb0\x8d\x1e\x74\x8f\x4c\xa6\x8a\x37\x3e\x5c\xf8\x14\x2e\x7c\xf2\x72\x4a\x21\xa1\x50\x3e\xb6\x6b\xaa\x0a\x54\x4f\x6e\x2d\x3d\x79\xb5\xf4\xa4\xac\x3d\xb9\x87\x1e\xc6\xad\xaf\xdb\x59\x7a\x48\xb4\x2c\x8a\xde\x28\xf3\xf0\xc8\x3c\xbc\xb2\x53\xea\xd8\xfe\x3f\xff\xb7\xb5\x12\xf8\xb1\xb4\x17\x4d\x34\x67\x70\x47\x4f\xdb\x5f\x07\x4b\x06\xee\x0b\x9f\xbc\x98\x76\x4b\x13\x3e\x8b\xb9\x62\x43\x97\x64\x89\xaf\xf4\x36\x3f\xfc\x54\xc0\x83\xff\x22\x69\xf2\x5b\xcd\xeb\x39\xf6\xe8\xd3\x07\x91\x2d\x79\x26\x49\xe2\x26\x74\x6c\x05\x4c\xb2\x81\xb5\x91\x38\x09\x3c\xf8\xaf\x7f\x66\x7f\x90\x2b\x36\x65\xda\x7d\xdc\xf9\xad\x1a\x1d\xc5\xd5\xfe\xf3\x81\xa2\x89\xe5\xa3\xc2\x15\xe8\xc5\x82\xa9\x7b\x3f\x29\xe4\xce\x04\xe8\xa1\x2e\x04\x9b\x4c\xbe\xf1\x20\xc2\x84\x00\x45\x76\x1a\x38\x9f\x76\x05\x4b\x16\x51\x92\xe2\x9f\xc9\xef\x7f\x8a\xdf\xff\x4c\x74\xb0\xe4\x3b\xad\xa9\x51\x2f\x67\x82\x33\x8b\x42\xd2\x5f\x5b\xee\x06\xeb\x6e\x6f\xc1\xe8\x21\x7c\x12\xe4\x7c\xaa\x8b\x27\xa2\xc5\x38\xff\xef\x41\x8e\x62\xfd\xf9\x4d\x6a\x16\x9e\xa9\x39\xc0\x79\x71\x80\x25\x1e\x60\x7e\x67\xec\x08\x07\x3e\x61\x6a\xa9\xff\x03\x68\xf2\xbf\x65\xde\x6d\x53\xfc\x4f\x23\xcc\xff\x81\x05\x2f\x49\xf4\xd2\xc7\xfc\x6d\xd4\x29\xfa\xad\x41\x16\x3a\x0f\x24\xb7\xd9\x36\x60\x99\xa6\x6f\xe0\xab\x3f\x7d\x48\x5d\x6e\x7b\xef\x21\x52\xbf\xde\x60\xf1\x26\xb6\x83\x39\x75\xfa\xe3\x03\x62\xed\xf3\x59\x91\xf4\x5c\xa1\xb3\xbd\x14\x6b\x4d\x51\xe7\x80\x58\x7b\x41\x24\xab\xb6\xb7\x9c\x98\xac\x5c\xf5\xec\x47\x1d\x16\xc4\x42\xe2\x9a\x93\x57\x53\xc8\x6d\xef\x00\x72\x9b\x9d\x41\x6e\x67\x58\x8c\x7e\x0a\x3e\xb0\x1a\x72\x2d\x73\x7d\xbc\x9d\x6a\x94\x7a\x59\xa2\xd4\xe9\x14\xab\x02\x9e\x52\xc8\x48\xd2\x07\x6b\xa7\x28\xea\x5b\x74\x9d\x94\x5d\x03\xdd\xf5\x78\x69\x69\x4b\xd2\x31\x27\x3f\xa7\xc0\x21\xd5\x19\x61\x3e\x68\x5d\xb2\x9a\xe3\x73\x5f\x47\x81\x2d\x28\xc8\xbe\x2b\x05\xf0\xbe\xcb\x05\xe4\xfd\x36\xe4\xf2\x2e\x21\x91\x40\xbd\x87\x09\xa4\x67\xfd\x3b\x7a\xc6\xfa\x9d\x1d\xbd\x27\x28\x57\x3e\x17\xbc\x37\x4f\xf3\x5e\x96\x9b\x8b\x19\x4b\x64\x4f\xa6\x3d\x5d\x93\x79\x89\x25\x1f\xab\x37\x6f\x39\xeb\xbd\x0f\x4f\x19\x79\xcd\xc9\x8c\xb0\xbe\x0e\x3f\xae\x7b\x60\xa7\x49\x18\x89\x89\xa6\x35\x5e\xea\xbc\xe6\xe4\x26\xa5\xe0\x6f\x3a\xd6\x9e\x7e\x5b\xb1\xef\x6a\xea\x69\xbf\x9b\x1a\xb4\x29\x2a\x8c\x07\x5b\xc6\xe3\xd0\xa8\x9f\x2a\x5f\x72\x73\xac\x36\xd0\xc3\xf8\x26\x30\xf9\xba\xb0\x5e\x4f\xe7\x5b\xd0\x67\x7d\x83\x58\xf7\x4d\x0a\x22\xd7\xda\x78\x1b\x99\x38\xb0\xb0\xaf\x2d\xda\x0f\x2d\x0a\xde\x9a\x79\xfe\x9f\x19\x68\xbc\x9a\x46\xc7\x64\x72\x5f\xa5\x27\x69\x1f\x99\x90\xcc\x02\x61\xb3\xb3\xa6\x2c\xaa\xdb\x91\x2b\xc2\x1e\xde\xc1\x8a\x5c\x6a\x86\x30\x05\xbe\xf5\x28\x98\x85\x07\x75\x26\x9e\x12\x74\xc3\x3b\x47\xdf\xbf\x67\x64\x39\x95\xb4\xac\x41\xd7\x27\xa3\x02\xa5\x14\x8e\x7c\x22\x68\x6b\xc2\xe5\xa5\xef\x5c\xaf\x18\x58\x27\xe6\x19\x44\xbe\x9c\x18\xe0\x35\x27\xdf\x7c\x92\x2c\x81\xbc\xfe\x70\x6b\x89\x88\xfc\x27\xc7\xdf\xe5\x31\xd7\x1a\x70\xb5\xaa\x62\x45\x2f\x5b\x29\x7a\x4e\x7c\xb5\x41\x57\x94\xc2\xbe\x4f\x92\x31\x1a\xc9\xfd\x3e\x08\xb8\x37\xa2\xd4\xd9\x93\xa5\x9e\x54\x2e\x28\x64\xfd\x96\xf4\x61\x66\x0d\x65\x43\x17\xc4\x6b\x09\xce\x30\x92\xdf\x64\x2f\xec\xf2\xd1\x62\x37\x0b\xed\xb4\xb8\xd6\x1b\xd1\x3b\x59\x18\x2f\x44\xb4\x65\x52\x8a\x21\xda\x79\x7f\x35\xec\xb5\xfa\xc4\x96\x81\x0e\xd4\x96\x2b\x20\xf3\xfa\x45\x9c\xb9\xba\x41\xd1\x33\x74\x81\x91\xb1\x26\x36\xb4\x80\xaa\xc2\x05\x65\x55\xed\x22\x3b\xd5\x2e\x71\xbf\x5e\x4b\x20\xe8\x77\x4b\x19\xe8\xfe\x24\x8b\x14\x88\xa7\x3e\xbc\xe6\x4d\x5d\x86\x28\xad\x05\xb5\xac\x71\x1f\x63\xf2\x99\x91\xf7\x3e\xaa\xc7\x96\x5c\xc4\x3d\x16\x5c\xf0\x1e\xfe\x3b\xb8\x8e\xe2\x38\x9d\x99\x1f\x66\xa6\x06\x0d\x20\x9e\x94\xe9\xf5\x52\x6c\x8f\xb6\x70\x27\x86\x05\x5f\x74\xbc\xee\x2b\x1a\x1b\x16\x14\xa6\xfd\x15\x15\x52\x14\x12\x9d\xa4\xbe\xa6\x3d\xef\xf0\x1c\x2b\x11\x97\x31\xac\x06\x0a\xf8\x86\x6a\xf1\x56\xb5\xd9\x77\x1a\xa2\x65\xbb\xe7\x7a\xd4\xc4\xf6\x2f\x41\xc1\x50\x69\x28\xe8\xff\x9f\x4c\x07\x0f\xd1\x07\x01\x2d\xbc\xb6\xe7\x2f\x3a\x29\xe2\xa4\x6f\x3c\x9d\x2e\x35\xf9\xf1\x2c\x0a\x17\xfd\xbb\xa8\x90\x6a\x7c\xa8\x48\x67\x6d\x41\xc3\x4a\x3a\xdc\x44\x19\x11\xfb\x2e\x45\xef\xcf\xc8\x65\xbf\x8d\x37\x5d\x32\xdd\x0d\x46\xc3\xdb\x63\x84\x44\x29\xeb\x5d\xb7\xf3\x93\xdd\xe1\xfb\x87\x65\x7e\x6e\x13\xbe\x7f\xa6\x99\x29\x7a\x0b\x53\x58\x05\x90\x41\x23\x66\xbf\xf4\x85\x3c\x52\x70\xfe\x3f\x4e\x2e\x0e\x39\x99\xf4\xff\xbb\x68\xc5\xca\xe0\xcb\x84\x42\x56\x9b\x78\xd1\x07\xeb\xcd\xae\x05\xc8\x3f\x23\x66\xd4\xf7\x34\x55\xef\x31\x89\x4d\x37\x01\x16\x2e\xa3\x65\xb3\x62\x0a\x96\x1a\xd9\x59\xd5\xa8\x55\x90\xc1\xf2\xc3\x9f\xaa\x76\xad\xab\x0c\x7a\xde\xdc\xbc\x97\x9d\x56\x6d\x05\xab\xad\x1b\x8e\xeb\x13\x92\x1c\x6f\x2b\xbc\x64\x76\xa6\x6c\x2d\x14\xcc\x16\x2c\x7b\xdd\x1c\xa7\xf0\xd9\x37\xe6\x12\x0a\x52\x97\xbe\x00\xa1\x88\x20\xd7\x44\xb0\xdf\x07\x09\xf7\x86\x25\x11\x5c\x50\x98\x19\xf6\x6e\xd3\xa2\x30\xd7\xd7\x7e\x1a\x60\x3e\x1b\xfd\xeb\x5a\xed\x13\xec\x99\x6e\xc6\x47\x05\xbe\xeb\xdf\x09\x9b\x5a\x14\x9e\xf7\xdd\x5f\xbb\x4e\x95\xe2\x1c\xbc\x89\x63\x74\xde\xd6\x02\x8e\x75\xab\xbe\x6b\x2d\xe0\xaa\xe8\x5d\x95\x3e\x51\x2d\xc7\xc5\xaf\x05\xec\x94\x3d\x4c\x49\x06\x6c\xd7\xd7\x0b\x78\xd3\xef\x4e\xae\xd4\x0a\xd8\x2b\x45\xce\x9f\xf7\x1b\x00\x7d\xdc\x2f\x48\xa0\x21\xf6\xc6\x23\x70\xd9\x11\xf0\xaa\xdf\xc8\x22\xbf\xd3\x6f\x02\xef\x71\x7f\xb1\xd0\x46\xa6\xb1\xd5\xab\xa2\x19\x17\x14\x0e\x6e\x65\x9d\x97\xa2\xf6\x37\x66\xe4\x8d\xc1\xa6\xab\xc1\xa7\xb5\x68\xf9\xf7\x8c\x08\x7b\x17\xb3\x92\x98\x68\x8a\x46\x6c\x85\xde\xe5\x6f\x48\xb5\x9f\xf7\xe1\xaa\x0f\x3b\x7d\x50\xab\x5b\x64\x0e\xbb\x16\xe9\x84\xcb\x4b\x9e\x67\x76\x94\x3e\x08\x52\x3f\xd3\xdb\x18\x25\x17\xfa\x62\xc2\x12\x76\xc1\xc5\x03\xbd\x0d\xaf\x79\x7c\x6d\x2d\xbe\x52\x38\x5a\x8f\x8d\x97\xdc\xb2\x8d\x54\xc0\x82\x00\x23\x9e\xac\x6d\x8c\x35\x2a\x02\x1b\x4c\xb6\x16\xb1\x6a\x80\x85\x2f\x3e\x49\x6c\xff\xb4\x89\x81\x8d\x4f\x53\xef\xba\x26\x20\xbc\xf5\x49\x2d\x9e\xf2\x73\xdf\x4d\x31\x83\xae\x12\x21\x4f\xfa\xdd\x95\x9b\x3f\xf8\x60\xe1\x5a\xa2\x6b\xbf\x92\x83\xe0\x73\x9f\x58\x99\x9c\xc7\x3c\xbb\xe4\x5c\x96\x8e\x53\x71\xca\x02\x74\x9a\x12\x24\xc7\x7c\xb1\xa5\x8f\x1d\x17\x22\x15\xa6\xe9\x9c\x13\xeb\x25\x8b\x62\x1e\x28\x8a\xaa\x9e\xe9\xed\x1c\x1f\xf7\x42\x91\x4e\x74\x3d\x12\x6a\x82\xdf\x74\x3a\xc4\xdd\x84\xfc\xf2\x9f\x3b\x67\x02\xfc\x7d\xe7\x0d\x03\xff\xc0\x69\xe5\x11\xfb\x63\x45\x11\xb6\x90\x6e\xb2\x85\xa3\x7e\x6c\x02\x73\xe6\xe4\x75\x0c\xd6\xff\xb2\x80\x24\xba\x10\x12\x7b\x08\xfa\xde\xd8\x52\xac\xc1\x21\x86\x83\x7d\x89\x15\x87\x90\xc1\x89\xc2\xf2\xb3\x71\x95\x34\xce\xa9\x32\xc9\x25\xb6\xe7\x29\x56\xd1\x66\x98\xab\x71\x01\xfe\xd0\x99\x4a\xf2\x52\x50\xf0\x1f\x3a\xcf\x63\xf0\x1f\x3b\x6d\xa9\x2b\x9f\x38\xbb\xe4\xaf\x17\x7a\xed\x91\x98\x3f\xaf\xc1\x8c\xb5\x58\x2c\xe8\x33\x91\xba\xbf\x3e\xb0\x28\x71\x7e\x45\x49\x24\x9d\x97\x82\x9c\x44\x94\x0c\xd5\x4b\x12\x7b\x2f\x9e\x8c\xcb\x42\xe8\xc6\x35\x2a\x4c\x05\x41\x76\xbb\x17\x25\x3d\x49\xf1\x8f\x18\x63\xb6\x1f\xcb\x75\xf9\xf8\x92\x3c\xa2\x4e\x42\xc4\x5f\xfc\x2b\xc8\xbf\xf8\x57\xea\xa8\x4b\x57\x5d\x2e\x08\x0e\x09\x22\xa5\x0e\x5e\xb9\x22\x5d\x10\xc5\xd1\xd0\x67\xff\x6f\x00\x00\x00\xff\xff\xc1\x46\x06\xd3\x92\xa7\x01\x00"), + compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xdc\xbd\x09\x7b\xdb\xba\x92\x28\xf8\x57\x64\xb6\x5b\x17\x38\x2a\x31\x94\xb7\x24\x94\x71\x35\xce\xe2\x2c\x27\xb1\x13\xc7\x59\x7d\xfc\xf2\x40\x0a\x92\x69\x4b\xa0\x02\x82\x92\x1d\x4b\xfd\x37\xe6\x07\xcd\x1f\x9b\x0f\x00\x17\x90\x5a\x92\xd3\x7d\xfb\xbd\x37\x73\xef\xf9\x1c\x8a\x2c\x14\x0a\x5b\xa1\x36\x14\xb6\x06\x29\x0f\x65\x14\x73\xc4\xf1\xbd\x93\x26\xac\x91\x48\x11\x85\xd2\xe9\xe6\x1f\x1a\x02\x71\x10\x20\xf1\xbd\x60\x32\x15\xbc\x21\x5d\x4a\x38\x48\x77\x40\x04\xc8\x45\x01\x36\x42\x25\x88\x40\x3b\x20\xa1\x40\x2d\x8a\x0f\x76\x6d\x39\x3a\x24\x80\xe3\xc5\x02\x97\xa8\xfa\x88\x59\xa8\x76\x81\x95\xa8\xe4\x32\xaa\x8d\xd8\x19\x92\x60\xf0\xdb\x15\x48\x94\x5a\x15\xec\x41\x5a\x56\xc0\x96\xb1\xfd\xdd\x3a\x53\xc4\xa0\xa8\xd5\xae\x36\x44\xd4\xaa\x76\x1f\x68\x59\x6d\xba\x8c\xf0\x5f\x40\x09\x45\x29\xd8\xb4\xd8\xc4\x30\x14\x5a\xc4\x1c\x40\x58\x12\x43\x97\x71\xfe\xf7\xd0\x17\x22\x0a\x35\x0a\x6d\x12\x53\x14\x5b\x24\x3e\x84\xb8\x24\x31\x5c\x46\xfb\xbf\x8c\xea\x18\x85\xb0\x4c\xb7\x4d\x38\x45\x91\x45\xf8\x23\x88\x4a\xc2\xe3\x65\xcc\xff\x3b\xdb\x12\xa1\x18\x56\xb6\xc6\x6e\x4e\x8c\x06\x56\x73\x1e\xc3\xa0\x6c\x4e\xb4\x8c\xfc\xff\xb0\x16\x0e\x50\x04\xeb\xda\x68\x37\x72\x56\x63\x73\x3b\x84\x10\xee\xd2\x1e\x77\x07\x48\xbd\xf7\x55\x3d\x48\x5a\x25\xee\x4c\x09\x28\x89\xdc\xad\x96\x01\x56\x94\x42\xcc\x2a\x78\x9b\x17\x84\xb2\xc9\x7b\xf5\xa2\x90\xda\x85\x51\x6a\x95\x9f\x96\xe5\xa1\xec\xc7\xfd\x65\x0c\x40\x6b\x38\x10\xb5\xd0\x0c\x6c\x34\x50\x8e\xd1\xc1\x2a\x44\x10\x2e\xa3\x42\xa1\x85\x2d\xa8\x62\x83\x72\x1a\x3c\x5c\x8d\x0f\xe2\x95\x18\x51\x8c\x17\x53\x2a\x1a\x11\xe9\xa3\x72\x38\xcd\xd0\x0c\x62\x81\xd4\x37\x46\x8e\x84\xa0\x77\x88\x63\x48\x89\xd7\x4d\x0f\x79\x37\x6d\xb5\x30\xbb\x48\x2f\x89\x44\xa2\x95\xe2\x6e\xce\xff\x17\x18\x12\x32\xaa\x60\x2a\xf1\xc8\x12\x0f\x23\x5e\x97\x1d\xf2\x66\x53\xb8\x41\x97\xb5\x5a\x58\x5e\xb0\x4b\x22\x5c\x0a\x82\xa8\x57\xc5\xe6\x37\x62\x7c\x28\xaf\x08\x83\x53\xb5\xb5\xe0\x05\x86\x6d\x82\xea\x15\xe4\x8b\xe5\x82\x5f\x2e\x30\x6c\x6a\x48\x8e\x10\xd2\x8c\x16\x86\x81\x12\xaf\x4b\x0f\x59\x97\xb6\x5a\x38\xbd\xa0\x97\x44\x5e\xd0\xcb\x9c\x82\xf4\x82\x5f\x12\x01\xe9\x02\xc3\xfa\x66\x89\x1c\x6b\xde\x53\xb2\xd5\xc9\xfb\x4a\x96\x7d\x25\x2e\xd2\x02\x2f\xbb\x90\x97\x84\x03\xfb\x7d\x7a\x15\x32\xa6\x91\x09\xa2\x56\x8f\xbc\x48\x2f\x41\x14\x5d\x2f\x7e\x13\x53\xbb\xd3\xf5\x0e\x09\xeb\xb2\x76\xbb\x40\xc4\x6a\x88\x70\xf7\xef\xb4\x75\x45\x4b\x39\x52\x6d\xad\x4c\x8b\xff\xf2\xa8\x68\x8e\xd1\xa2\xa0\x46\xa7\xc0\x9c\xae\xc4\x5c\xcc\x9e\x64\x14\x85\x4c\xb7\xe0\x17\x14\x88\x92\x02\xde\x66\x40\x73\x3a\x5a\x28\x2d\xa8\x3b\x4c\x7b\xf9\xa3\x9f\x62\x0c\x21\xf1\xba\xe1\x21\xeb\x86\xad\x16\xa6\x17\xa1\x1a\xdf\xf0\xb2\xab\x70\x9a\x2f\x69\xfe\xa5\xc5\xd4\x9c\x0a\x8b\xb1\xa7\x2b\x26\x93\x35\x8a\x6b\x3e\x85\x31\x4f\xe2\x11\x73\x47\xf1\x10\xf1\x96\xe3\x37\x9c\xd6\x18\x61\x0c\x62\x81\x4b\xe9\x71\x8c\x72\x78\xe7\x30\xe2\x92\x09\x4e\x47\xc9\x3f\x9d\x92\x6f\x5c\x29\x5e\x2d\xaf\x44\x3c\x6b\x3c\x17\x22\x16\xc8\xb9\x92\x72\x92\xf8\x0f\x1e\x0c\x23\x79\x95\x06\x6e\x18\x8f\x1f\xb0\xd1\xf8\x41\x18\x0b\xf6\x20\x18\xc5\xc1\x83\x8e\xeb\xb9\xde\x83\xab\x88\xcb\xe4\x81\xd3\xe2\x2d\xc7\x1d\xf7\x1d\x8b\x15\x0d\x6b\x53\x04\x18\xb9\xb8\x84\x94\x4c\x74\x37\x7b\xc0\x70\x37\x6d\x36\x91\x24\xcc\x9d\xc4\x13\x84\x71\x57\x7d\x93\x2e\x05\xe9\x06\xfa\xbb\x35\x9c\x05\xd6\x49\xc9\xee\xa3\x01\xea\x78\xde\xa1\xc4\xf9\x74\x72\x27\x69\x72\x85\x4e\x75\xbd\x18\xb6\xbc\x6e\x34\x40\x9c\x10\x22\x32\x08\xf3\xc6\x89\x83\x6b\x16\x4a\x67\x8b\xc8\xbb\x09\x8b\x07\x0d\x3e\x9f\xf3\x74\x34\x52\xfc\xb1\x78\xca\x8b\x38\x79\xc5\x0e\x29\xc0\x9b\xcd\x2b\xb4\x8f\x61\xab\xd3\xcd\xdb\x96\x36\x22\xde\xe0\xee\xf6\xa1\xd7\x6c\x22\x4e\x06\x52\xf1\x32\xa1\xfe\x55\x84\x70\x1c\x0d\xd0\xd6\x04\x71\xbd\x32\xd5\x1f\xd9\xea\xa8\xe6\x65\x54\x75\xba\x39\x79\x9a\xe5\xde\x90\x11\x1a\x62\x38\x22\xab\x07\x7c\x6b\x98\x4d\xdc\x72\x74\x3f\xe4\x13\x77\x65\xeb\xf2\xee\xd1\x3d\xd1\xf3\x7c\x7e\x28\x7a\xed\x8e\xdf\x51\x7d\xb1\xc5\xdd\xed\xec\x3b\x92\xe4\x03\xe2\x8a\xd3\xba\x14\xe3\xf9\x3c\xfb\x1d\x80\x70\x03\x8c\x7b\xd2\x57\xbf\x42\x10\x6e\x88\x75\xbb\xbb\xdc\x0d\x34\x9f\x6e\x36\xb7\xaa\x65\xbb\x9c\xe8\x72\x8a\x63\x17\x43\x28\xe7\x73\x85\xad\xd7\xf1\x85\x1b\xa8\xfa\x3d\xb3\xc1\x3c\x5f\xd3\x4c\xd3\x28\x7c\xe8\x2d\x30\x5c\xaf\x65\xed\x19\x50\x67\xfd\xf2\xf0\x0e\x3f\xe4\x0b\x7d\x2d\x04\xc9\x41\x30\x3c\x5d\x26\x27\xfb\x5a\xf0\xc1\x43\xaf\xd7\x97\xbe\xe8\x8d\xa4\x9f\xc8\x05\x86\x57\xc4\x2b\x87\xe2\xd4\x46\x7d\x4f\x7d\x0e\x81\x2f\x16\xe5\xf4\xfd\x5e\xe5\x45\x39\x04\x84\xbe\xb4\xa0\xce\x0d\x96\x7c\xd9\xdc\x2f\x74\x8f\x4b\x3d\xcb\xb0\xd9\x22\x2e\xe4\x65\xf9\x52\x98\x97\x42\xbd\x2c\x78\xab\x2a\xfd\x85\x8c\xd0\x99\x35\x53\xce\x0c\x62\x35\x4f\x94\x86\xc9\x87\xd6\xb4\x2e\xe6\x49\x4b\x64\x53\x23\xc8\x5f\x89\xae\x61\xf2\x27\x66\x90\xb3\x11\xee\x5a\x9c\x5a\xcd\x06\xf3\x16\x33\xc2\xdc\xa0\x04\xcd\xc7\x5f\xd3\x73\x4c\xee\xb7\x7d\x6f\x51\x12\x74\x52\xe9\xaf\x6d\xbf\x03\x45\x9f\x29\xf8\x37\x64\x84\x4e\x2c\xfa\x3f\x2b\x36\x95\x57\x2b\xc8\x31\x48\xc2\x33\xde\xdb\x95\xed\x76\x17\x0b\x55\xf1\x85\xac\x6e\x5c\x45\xf1\x67\xd5\xe2\x17\x97\x16\xd9\xc2\x70\x0f\xee\x52\xab\xa4\x82\x7b\xbd\x51\x04\xba\xb8\xec\xea\x35\x20\x95\x74\xa2\x66\x3c\x48\x22\x55\x2f\x18\x74\x7a\x77\xd2\x2c\x8d\x96\xec\xec\xb3\x92\x43\x31\x7c\x24\x48\xd6\x30\x2b\xb6\x56\x30\x15\x1b\x77\xb3\xc9\xaa\x15\x80\xee\x66\x9c\x9a\x6a\xee\xca\x6a\x80\x55\xab\x4a\x55\x55\xe1\x52\x3d\x4a\xe8\xcd\x6b\xa2\x4b\x35\x35\x9b\xe9\xaa\xea\x20\x25\xa9\x1b\x60\x6a\x2a\xbd\xad\x56\x0a\x69\xb5\x62\xaa\x2a\x66\x2b\x2a\x56\xd2\x72\x5e\x75\xb8\xba\xea\x66\x93\xae\xaf\x1f\x28\xa1\x6e\x80\x43\x43\xc5\x74\x99\x0a\xa0\x55\x4a\xc2\xda\xea\x97\x50\x2a\x26\x9f\x91\x9a\x14\x6e\x12\x0b\xb9\x8e\xc5\x68\x6e\xae\x59\xf9\x42\xff\x07\x3f\x2b\xfc\xe8\x37\xb1\x91\x99\x82\xb4\xe7\x25\x21\x24\x91\x3d\xcf\x57\x0f\x7d\xa9\x79\xb2\xae\x60\x1d\xa3\xe2\x2d\x25\x83\xc1\xcb\xb5\xdc\x90\xb7\x37\xc8\x09\xfc\x0f\x5d\xfa\xed\xfa\xd2\x0f\x36\x95\x7e\x20\xe6\x9e\xfe\xfc\x96\xca\x2b\x77\x12\xcf\xd6\xcb\x2a\xff\xce\x57\xe1\xf9\x77\xc2\xf3\x96\x7b\x6a\xab\xed\x5d\xa1\x4e\x07\xfb\xde\xa1\x68\x36\xf9\xa1\x37\x9f\x0b\xb5\x7b\x7a\x87\xbc\x27\x5a\xdc\xcf\xa4\x4d\x5d\x19\x95\x94\xef\x60\xcd\x82\x3e\x11\xfd\x26\x64\xd1\x08\xbe\x99\xe7\xc1\x28\x8e\x05\xfc\x30\x3f\x44\x9c\xf2\x3e\x3c\x31\x3f\x46\xf1\xb0\xbb\xae\x39\xcd\xe6\xa6\xc6\xce\xe7\x9b\xbe\x6e\x11\xa2\x84\x2b\x45\xcf\x7b\xb2\x69\xac\xba\x7f\xeb\xe3\xef\xc9\xd2\x66\x57\xa0\x44\xb8\xe1\x15\x15\x4f\xe3\x3e\x3b\x92\x28\xc5\x5d\x7a\xb8\xbf\xbf\xf3\xf8\x60\x3e\xdf\x3f\xd8\xed\x3c\x3e\xa4\x3d\x64\x4b\xdc\xa0\x44\x70\xdf\x7e\xd5\x12\x17\x69\xab\xa3\xbf\x90\x1d\xbc\x28\x44\xa8\xeb\x38\xe2\xc8\x71\xf0\x46\xc5\xe6\xe2\x12\x2a\xb2\xb1\xd1\x41\x0a\xd2\x94\x88\x13\x2e\x51\x98\xb6\x5a\x10\x56\xa9\x0c\xe7\x73\x44\x5b\xa6\x80\xa2\x10\x38\xa2\x58\xb1\x02\xbd\xb2\x69\x41\x96\xb4\xc8\xea\xfe\x2d\xbd\x28\xa7\x49\x1a\x9a\xe4\x6f\xd3\x24\x0b\x9a\x8c\x36\xa4\xf6\xb2\x45\x29\x8d\xeb\xd1\x7f\x47\x7e\x87\x16\xa5\x52\x65\x74\xa4\x44\x2b\x55\xb4\x4a\x07\xc3\x5d\x53\x47\x4a\xe8\x3f\xc9\xfe\xc1\xee\x8e\xd7\x6c\xee\x3f\xdc\xdd\xdb\xfd\x27\xa1\x3d\x79\xd1\x6e\xb3\xcb\x56\xea\xa7\x55\x0a\xe0\xcf\x75\x73\x4f\xb8\xc9\x64\x14\x29\xae\xb5\xc0\xf0\x62\x3d\x94\xee\x53\x0d\xf4\x95\xfc\xa6\xb2\xf4\x1b\x93\xd6\xec\xc3\xf7\x99\xfe\xa4\xb6\xe2\xb4\x3a\x15\x24\xd6\x72\x39\x62\x24\xad\xb7\x36\xed\x89\x8b\x76\x5b\x5e\xb6\x98\x5f\x8a\xc7\xde\x22\x97\x93\xb3\x6e\xe7\x9c\xfc\xcb\xc8\xd8\xfa\x7d\x3a\x3a\x8b\x82\x20\x0c\x82\xaf\x13\xcf\xb7\xfe\x43\xb8\x11\xef\xb3\xdb\xd3\x81\xe9\x5c\xb9\x0e\x54\xf1\xc1\x1a\x2c\x5b\x0b\x9b\x8b\x39\x87\x45\x0b\x95\xf0\x3d\xa2\x89\x7c\x55\x20\x20\xc5\xb7\x76\x0e\xbe\xc0\x90\x2e\xe3\x34\x5d\x55\x48\x4e\xd1\x00\xc9\xc3\x4e\x2e\xec\x1d\x5b\x72\x9d\x07\x5a\x0a\x69\x77\x0e\x11\xb3\x69\xd5\xca\x59\x2e\x7c\x30\x0c\xac\x45\x64\x55\xe4\xd0\x23\x45\xd7\x35\x47\x49\x7a\x1e\xf4\x73\x51\x6f\x1d\xa7\x54\x60\x1d\x60\x25\x58\x69\xc4\x5f\x82\xdb\x85\x81\xcf\x61\x98\x4b\x8e\xf1\xa6\xaa\xf7\xb4\xcc\x7d\xe5\xf3\x45\x46\xe8\x60\x6d\xbf\xeb\x8a\x2e\xc4\xa5\x1a\x9d\x80\xaf\x5f\x27\x19\x1c\x48\x0d\x99\xf0\xd5\xb2\x5d\x1d\x18\xd8\xe5\x7a\xf1\x6c\x19\x18\xd2\xcb\x4d\x42\xd5\xaa\x02\x40\x75\x91\x74\x65\x11\xcb\xdc\x58\x2b\x04\xa1\x2e\x46\xd7\x14\xb3\xec\x8a\x4b\x05\x21\xd6\x45\xe3\xb5\x45\x21\xda\x54\x18\xa2\xcb\x55\x9b\x8f\x14\x77\x79\xa1\xbe\x7a\xf5\xfa\xc3\xe9\x89\x3b\xa1\x22\x61\x5a\x30\x0b\xa9\x0c\xaf\x2c\x7b\xf3\xb6\x44\x33\x34\x96\xe0\x9c\x5f\x45\x49\x23\x4a\x1a\x3c\x96\x8d\x29\x1d\x45\xfd\x86\x2a\xb9\xd5\x70\x5a\xdc\x1d\xb3\x24\xa1\x43\x06\x0a\x81\x92\x8d\x46\x6a\x16\xf4\xb9\x35\xcd\xfa\x59\xed\xc9\x2c\xd2\xf8\xdd\x6d\x7c\x1f\xd2\x84\x35\x76\xfd\xcc\x3e\x10\xc4\xf1\x88\x51\xcb\x3c\x20\x7a\x13\x25\x2b\xfa\x63\x8e\x1c\xda\x78\x72\x7a\xfa\xc6\x51\x42\x9f\x2e\xb5\x93\x97\xe2\xe9\x38\x60\xa2\x54\xd2\x45\x4f\x83\xf3\xc6\xab\x93\x73\x05\xee\x23\x71\x48\xda\x3b\x9d\xbd\x87\x7b\x8f\x76\x0f\xf6\x1e\xce\xe7\xe5\xf3\x21\x11\xf3\x39\xf2\xe6\x02\x2b\x41\x04\x37\x9b\x68\x2b\x4a\x8e\x23\x1e\x49\xd5\x15\xf3\xb9\xf8\xf7\x0e\xae\xa3\xd3\x24\x19\x1a\xf6\x6a\x34\xac\x21\xfc\xf8\xcd\xe9\xd1\x79\x49\xf9\x41\x5e\xaa\xae\x36\xe6\xa5\x44\x23\xe2\x89\xa4\x3c\x54\x2f\x3f\x68\x20\xfd\xa5\xe5\x38\x39\xca\x0f\xe7\x67\xaf\x4e\x5e\x94\x38\x1f\xfb\x39\x6f\xcb\x6c\x2e\xaa\x00\x77\x43\x03\xaf\x5e\x96\xb0\xfb\x39\xac\xd5\x92\x87\xf9\x3b\x2d\x24\xb9\x51\x62\x84\x25\x81\x7b\x53\x6e\x2c\x17\xf0\x39\xaf\xfb\xcd\xab\x0f\x56\x6b\x1e\xfd\xba\xe4\x36\xcf\x8a\xf2\xc6\xd1\xd9\xd9\xd1\xd7\xb2\x70\xc7\xf3\x73\xfe\xd9\x5f\x69\x4f\x12\xa5\x15\x69\x3e\xdf\xca\x35\xf4\x9c\xbd\x66\x48\x4f\x9f\xbc\x7e\xfe\xf4\xbc\x31\x8b\xe4\x55\x83\x36\x06\x11\x1b\xf5\x1b\x9c\x8e\x59\xbf\xf1\x3f\x9d\x96\x6c\x39\xff\x53\x57\x68\xb8\x70\x3f\x23\xea\x42\x96\x16\xce\x98\x21\x86\x7b\xcc\xd7\x13\xfd\x4a\xaa\x15\xa4\x95\x1e\x43\x62\xc7\x57\xe4\x31\xbd\xc3\xd5\xdb\x58\x23\xa4\x6c\x5d\x34\x40\xa2\xd8\x65\x64\x05\xac\xf1\xe6\xf4\xe4\xc5\xf3\xb3\x06\xd5\xb8\x1a\x27\x8c\xf5\x1b\x7a\x33\x68\x68\x62\x1b\x41\x2a\x1b\x31\x1f\xdd\x35\x12\xc6\x1a\x4e\x2b\x47\xd3\x72\x1a\x8c\x4b\x11\xb1\x44\x57\xf0\x1b\x2d\x19\xd6\x5b\xb2\xe3\xff\xb2\x8b\x7f\xd1\x40\xd3\xd3\x45\x77\xa6\x40\x89\xd9\xe2\x52\x33\x30\xba\xd9\x57\x34\x39\x9d\xf1\x77\x22\x9e\x30\x21\xef\x50\x8a\xf1\xbd\x45\x6d\x7a\x69\x84\x05\x4d\x2a\xb6\x59\xcc\x95\x84\x34\xa3\x97\x92\x13\x74\x8a\xcc\x2f\x28\x65\xd7\x89\x44\x27\x12\x95\x0d\xda\xf5\x4b\xdd\x97\xbb\x03\x88\x09\x77\x87\x10\x11\xaf\x1b\x1d\xc6\xc5\x8e\xdc\x6a\x65\x04\xc4\x17\xd1\x65\x36\x38\xd5\xea\x59\x37\x24\x21\x52\x95\x59\x35\x85\x79\x2d\x7b\x7e\x49\x7e\xad\xa7\xf5\xeb\x2b\x5d\x52\xb1\x06\x96\x95\xd8\x2f\xe8\x1a\x90\x63\x08\x14\x55\xdd\xc0\x0d\xba\x01\x09\xdc\x20\x23\x26\x30\x26\x9d\x68\x80\x6a\xa4\x0c\xc8\x89\x42\x08\x83\x82\x18\xd5\x3b\xba\xe5\x03\x5c\x34\xdd\xaf\x31\x67\x63\x21\xca\xbe\x7a\xd6\x0a\xe7\xaa\x55\x96\xa7\x6d\xb3\x65\x7e\xa5\x6f\xe0\xde\xf4\xb0\x61\xe0\xda\x35\x90\xf5\x60\x58\x1d\xc0\xa1\x54\xdb\x8e\x1e\x40\xed\x4e\x08\x5d\x6a\xf5\xa7\x52\x12\x2c\x93\xf6\xb6\xed\xe5\x9c\xa1\x90\x41\x41\xc6\x0a\x9f\xa7\xf1\x3b\x95\xa5\xc7\x55\xc9\xa2\xd8\xa2\x9e\xdf\x4e\x58\x28\x23\x3e\x54\x9b\x92\xde\x8c\x4a\xb3\x3c\x2f\x0c\x76\xcb\x86\x6c\xee\x6e\xab\x1d\xa0\xb0\xe0\x6e\x75\xba\x4b\xfb\x94\xe7\x57\xbb\x9e\xbb\x54\xe1\x71\x69\x37\xdb\xc6\xb2\x7d\x29\xdb\x1a\x32\x5e\x5f\x61\xb9\x5b\x5e\x9d\x5d\xbb\xa1\xc6\x11\xe6\x6c\x38\xe3\xa9\xf9\x82\xcd\xc0\xae\x78\x61\x3e\x2e\x18\x67\x81\xa1\xaf\x31\xf4\x9b\xcd\x65\x28\x8b\x56\xa6\xa1\xd8\x2a\xa8\xdd\x12\x6a\xa0\xa1\x06\xcd\xe6\x50\x41\x0d\x41\xb8\xc3\x72\x19\x14\x50\x57\x1a\xea\x6a\x15\xae\x62\x73\xb1\x10\x58\xd3\x6f\xb8\x5e\x6a\xde\x2a\x45\xee\x72\x10\x2c\xf1\xb9\xcb\x0e\xa5\xf6\x63\xaa\xc9\xa7\x2a\xd6\x3e\xb5\x0b\x76\xb9\xce\xf8\x3f\x59\x2b\x88\x6a\x81\xc7\xec\xbe\xd1\xe0\x0e\x09\x50\x0c\x10\x38\x6e\x39\x8e\x2d\x18\xcf\xec\x19\xc8\x35\xce\xbb\x0d\x62\xab\x34\x7e\x4c\x99\xc9\xc1\xb7\x9c\x28\xb4\x25\xba\x1b\x0b\xdd\xfd\xb6\xef\x01\x55\x42\x73\xf1\xf9\xa8\xfa\xb9\x53\xfb\xfc\xa1\xfa\x79\x07\x02\x9f\x43\xe8\xab\x2a\x8c\x94\xfe\x7c\x83\x94\xbe\xab\xa1\xfb\x5a\xf0\x87\xeb\x0d\x80\x7b\x16\xa0\x6e\xc5\x53\x6e\x9b\xe4\x5f\x69\x22\xb8\xb6\x3c\x03\xf3\x9f\xf2\x56\x2b\x53\x15\x74\x0f\x5e\xf9\x17\x97\x8b\x9c\x43\x1e\x2b\x58\xe0\x65\x0b\x4e\xed\x15\xff\x81\x23\x7b\x99\x73\x74\xc3\xd1\x2b\x05\x80\xb1\xbd\xce\xbf\x67\x04\x72\xf7\xca\x28\x48\x02\x83\x46\xac\x9b\x7c\x5e\x69\x89\x3d\x16\x35\xf4\xdf\xcd\x57\x30\xd5\xe8\x2a\x4c\xf3\xbe\x70\xb2\xd5\x81\x33\xae\x34\xb3\xa2\x52\x5d\x81\xe2\x13\x67\x3c\x33\x64\x63\xd8\xfa\x92\x99\xbb\x55\x09\xaf\xcb\xc9\x19\x77\x93\xab\x68\x20\x11\xee\xe2\x2d\x3b\x70\x43\x3b\x74\x84\x3b\xc8\x34\x66\xae\xd6\x93\xbb\xad\xa6\xb8\x67\xdc\x62\x1d\xf5\x4f\x01\x37\x54\xba\xe7\x50\xb3\x1f\xde\xc5\xc2\x1d\x12\xf5\x33\xd2\x2c\x56\x2d\x1e\xd3\x20\x85\x50\x7f\x08\x90\x42\xa7\xb6\x9b\x1c\x72\xc1\x46\x09\x53\xd4\xea\xf8\x8e\xc2\xcd\xe0\x0e\xdc\x50\x57\x1d\x54\x7a\x42\xe1\xe1\xaa\x0b\xb5\x09\x22\x1a\xa0\x7d\x43\x4d\x46\x9e\x70\xaf\xaa\x6b\x30\xab\x78\xa0\x2b\xbe\xca\x9b\x8c\x75\xa5\x0d\x45\x83\x9a\x5d\xda\x06\xe9\xf9\x1d\xa5\x0a\x2a\x50\x88\x7c\xe1\x0e\x17\x90\x97\xed\x2f\x16\x0b\xc4\x71\x57\xf7\xf6\x62\xb1\x41\x9b\x3b\x51\x03\xc5\x80\xbb\xe1\x13\xf5\xe7\xa1\xfa\xb3\x53\x6e\x08\xcb\x61\x31\xf8\x7e\xb1\xa8\x38\xf0\x4e\x6a\x8a\x9c\xd9\xbb\x66\x68\xc4\x81\x83\xe8\x09\x77\x30\xa2\xc3\xc4\x9f\xc6\x51\xbf\xe1\xe1\xae\xde\xc5\xe6\xf3\x2b\x94\x59\x45\x63\x72\xbf\x80\x88\xa0\x90\x48\xa4\xb7\x32\xb5\x13\x13\x8a\x02\x88\xd4\xa6\xb8\x42\xf7\x07\xa6\xb9\x14\x53\x12\xd0\x1b\x9e\x1b\xa7\xde\x28\xf6\xd4\x4d\x5d\xda\x6c\x22\x24\x89\x9c\xcf\xef\x17\xf8\x82\x5d\x92\xd4\xa5\x48\xab\x49\xa0\x20\x56\x20\x64\xe4\x7e\xa8\x35\x6a\x43\xe2\x02\x52\xc2\xdd\x10\xa8\x92\x91\x41\xc9\x39\x4c\xcb\x39\x83\xc2\x3b\xe5\x5e\x91\x57\x1c\xcd\xd0\x73\x5e\x74\x54\xc3\x8e\x2a\xd2\x5f\x38\xdc\x6f\xfb\xfb\x10\xf8\xf6\x64\x30\xae\x1b\xee\xd2\x8a\x35\xd9\xdd\xee\xdd\x21\x0a\x4c\x33\x37\x3f\x6c\x36\xe3\xde\xad\x0e\xbb\x13\x6e\x04\xc2\xbd\x56\x6f\xef\xf4\x8b\xb0\x27\x5c\x35\xd4\xea\x95\x1a\x06\xe0\x6e\x80\xf1\x02\xd9\xe6\x35\xb9\x40\x31\x04\xd6\x00\x05\xa6\xa9\x6a\x4c\x18\x70\xd5\xad\x03\x14\x29\x59\x01\x04\x86\x9f\x1c\xc5\x10\xba\x01\xa4\x28\xc2\x05\x8e\xea\x5b\xa0\xbd\xfb\x49\x2c\x64\xe2\xd3\x85\x7f\x9f\x39\xb7\x38\xb9\x5f\xe8\x01\xfc\xfc\xbb\x3c\x41\xb8\x43\x54\x67\x09\x6b\xf6\x8b\x19\x3a\xe7\xc0\xdd\x2b\xc8\xd8\xb6\xa8\x4e\xb9\x67\x9b\x03\xb6\x34\x33\xbf\xf1\x05\x8c\x14\x43\x2f\x79\xdb\xeb\x3a\x47\x1f\xab\xef\xaa\x0d\x1f\x37\xb2\x72\xee\x73\x88\x6b\x36\x9c\x9f\xc5\x66\xa4\xa7\x10\xa4\xb9\xa3\x53\x4f\xcb\x97\x1c\x6d\x79\x20\x20\xd5\x1b\x1d\x06\xf5\xbb\x03\xb2\xf8\xcd\xf1\xf7\x6c\x3f\xbd\xdf\xf6\x9d\xc1\xad\x03\xd4\x4f\x2f\xd8\xe5\x7c\x7e\x1f\xf9\xc7\x70\xed\x1f\x57\x02\xcb\x5e\x5a\xeb\x36\x93\x92\x44\x21\x25\x75\x7c\xb3\x00\x84\x7b\x03\x94\x20\x4a\x52\x88\x09\x83\x19\xe2\xbd\x37\xfc\x82\x5e\xba\xcc\x37\xff\x0e\x2a\x72\x5e\xe9\x49\x8c\xbb\x42\x7b\xab\x7e\x60\xc5\x3a\x27\x8a\x6f\x14\x5b\xb0\x92\x11\x4b\x6f\x94\x5a\x20\x48\x5e\xa4\x97\xaa\x1a\x0a\x29\x41\xa9\xb6\x35\x63\x8b\x6e\xe0\xbd\xd4\x8d\xc8\x09\xa2\x90\xba\x11\xf6\x53\xf7\x3a\xfb\x71\x8d\x21\xc5\x85\x31\xa1\x54\x24\x84\x3b\xee\x86\x6e\xa0\x54\x02\x37\xc0\xba\xad\x6a\x72\xaa\xd6\x66\x15\x77\x2b\x66\x0b\x4d\x46\xd6\x27\x6e\x0c\x12\xee\x27\xbe\x70\x39\xfc\xf0\xd9\xc2\x6c\x53\x14\xe2\xb2\xf3\xde\xea\xe6\xbe\xe1\x17\xfc\xb2\xd9\xbc\x42\xbb\x56\xbf\x7e\xaa\xce\x3a\x0d\x09\x1a\x92\xdc\x33\xff\x1b\x07\xe1\x0b\xa0\xfe\x0f\xbe\x80\x67\xc5\x1e\xf8\x6d\xad\x94\x53\x89\x5a\xf9\x51\x2c\x78\x09\x21\xb9\xb8\x84\x98\x68\xcc\xae\x50\xdc\x4e\x12\x0f\x6a\xcb\xc3\x0c\x46\xc2\xe4\x79\x34\x66\x71\x6a\xf1\xec\x7c\xb7\xc6\x78\x01\xb2\x18\x0c\xeb\x73\x38\x62\x54\xe4\xc5\x84\xb6\x07\xe5\x50\xa6\xce\x80\x44\xa6\x5d\x6e\xb8\xc6\xec\xdf\x15\xb9\xcf\x12\x97\x22\x60\x08\x29\x89\x91\xd0\x3a\xa1\x51\x4f\x72\xf9\x91\xea\xa8\x28\x7a\x89\xca\x38\xb9\x68\x81\xe1\x3e\x49\x83\x24\x14\x51\xc0\x2a\x6c\x2f\xcc\x77\xf5\x05\xa4\x7c\x35\x08\xe2\x6a\x0b\x08\x33\x83\x3d\xc6\x96\x69\x19\x1f\x7a\xf3\x79\xa8\xfd\x02\xda\x96\xdf\xc1\x0b\xb3\x6a\x9f\xf0\xee\x1a\xce\xb3\x4a\xa1\xd1\xae\x50\x9c\x8b\x59\xef\x39\x71\x52\xde\x67\x83\x88\xb3\x7e\xa9\x9b\xf7\xe3\x30\x1d\x33\x2e\x7b\xf9\x83\x7f\x6f\x39\xfc\xdf\x15\xc2\x11\x9d\x4c\x18\xef\x3f\xbd\x8a\x46\x7d\xd5\xe1\xab\x36\x58\x46\x98\xcb\xe3\x3e\x2b\xb7\x8d\x09\x15\x8c\xcb\x93\xb8\xcf\x5c\xc1\x26\x23\x1a\x32\x83\x60\x2a\x10\xb7\xb7\xdc\x05\x06\x86\xe1\xbe\xc2\x6f\xfe\x5c\x29\xcb\xaa\x96\xbc\xa8\xcc\x47\xdb\x2c\xfa\x0b\x1f\x98\x67\x8d\xf9\x7d\xce\x43\x68\x97\xb5\x48\xea\x06\xf3\xb9\x07\x99\x2b\x2b\x2d\x3d\x6c\xad\xd2\x49\xa5\x99\x6c\xe8\x87\xd0\xf7\x47\x42\x87\x3f\xfa\x12\x06\x3e\x85\xc0\x67\x5a\x42\x40\xd9\x8e\x0f\x5f\xff\x5b\x08\xfc\x3d\x12\x77\x7e\x8b\x44\xe3\x99\x11\x9b\xa4\xf4\x6b\x9f\xeb\x2d\x25\xf0\x3b\x2d\x24\x74\xe5\xb8\x32\x40\x42\xd4\xca\xec\xab\xed\x07\xc6\xbe\x80\x9b\x5c\xb2\x58\xac\x63\x1c\x02\x5d\x70\x10\x97\x2b\xe4\x2e\x23\x37\x66\x93\x56\x8a\xf5\xca\x50\x86\x03\xe4\x2a\x2c\x79\x64\xf2\x42\x5b\xe0\x05\xd9\x20\x10\x16\x78\x80\xad\xc2\x54\x46\x2c\x6b\x5c\xa9\x20\x68\xa3\x03\xc0\x42\x07\xe9\x2a\x84\x76\x24\xf3\xe2\x37\x1c\x04\x15\x84\x40\x57\xa1\xac\x06\x37\x2f\x7e\xcb\x85\x50\x43\x0b\xe1\x2a\xc4\xf5\x70\xe7\xc5\x6f\xba\x19\x96\x90\x43\xbc\x0a\xfd\x72\xfc\xf3\xa2\xe6\x8c\x18\x40\x00\x09\x8c\xa0\x0f\x53\xd8\x86\x31\x5c\x55\xaa\x58\xfa\xba\xaa\x12\x41\x02\x90\x24\x01\x46\x46\x90\x92\x3e\x50\x32\x85\x90\x6c\x43\x4c\xc6\x10\x91\x2b\x78\x44\x08\x41\x9c\x0c\xf0\xaa\x70\x6b\x88\xd6\x05\x5c\xa3\x28\x5b\x45\x75\x77\xc9\x62\x7d\x50\x88\x92\x7d\xa8\xe7\x58\xf2\x15\x06\xba\x61\x15\x3a\xb4\x63\x03\x43\xb8\x11\x76\xa7\x02\x1b\x6f\x84\xdd\xb5\x61\xbb\xeb\xd6\x98\x06\xdd\x53\xa0\x02\x62\xff\x7e\xa0\x4b\xc8\x45\x85\x0d\x44\xa2\xe4\xd3\x8e\xda\xe7\x26\xd2\x51\xca\x9a\x33\x71\x7c\xbe\x66\xfd\xab\x4e\xd0\xaa\xe0\x76\x6f\x86\x52\x01\x4a\x98\x41\x92\x70\xe0\x24\x61\xda\x77\x19\x63\xc5\xcc\x98\xbb\xad\xd8\x7e\xef\x0e\x0d\x18\xf0\xc3\xdd\x5e\x20\xfc\x44\x40\xc0\x94\x58\xcd\x5c\x8a\xfd\x19\x8a\x58\x66\x83\x5e\x60\xec\x67\xa1\x6f\xc0\x72\x4f\xa1\x80\x60\x5d\x3f\x34\x4e\x11\x37\x5b\xbd\x62\xb7\x0b\x0c\xc9\xda\x1e\x0b\x5f\xab\x49\xe0\x86\xaf\x31\xd0\x23\x5f\xb8\xf4\x08\x68\xaa\xfe\x4d\x2b\x5d\xa1\x79\xae\x25\x67\xde\x2f\xac\x88\xb5\xc2\x20\x45\x81\x11\xe9\x6e\x43\x4a\xa4\xcb\x75\x74\x41\xdc\x55\x83\xb7\x45\x08\xeb\x21\x49\x84\x16\x8b\x91\xfa\x87\xa8\x9d\x51\x0d\x16\x21\x84\x35\x9b\x4e\x38\xa2\x49\xa2\x7e\xa4\xbd\xbe\x40\xd2\x1c\x56\xd0\xe2\x29\xc5\xbe\xf9\x7a\x42\xc7\xac\x80\x10\x06\x42\x68\x88\xc5\x72\x84\x5d\x5f\x54\x64\x7a\xc2\x2f\xc4\x65\x57\xfd\x21\xac\xc7\x5a\x4e\xc3\x69\x49\xdf\x3a\xae\x36\x15\x55\xd3\xda\x76\xae\xdd\x17\xee\x07\x05\xe1\xde\xe8\xe0\xd1\x1b\xc2\x5d\x13\xda\x8c\x73\xcb\x44\x01\xf6\x9e\xbb\xa1\x60\x54\xb2\x73\x76\xab\xc5\x03\x13\xc8\x17\x0d\xd0\x9e\x06\xb3\x2c\xc7\xdc\xbd\xd1\xea\xe9\x75\x57\x7d\x62\xee\x76\x17\x2f\xf9\x17\xd2\x5e\x4a\x2e\x52\x60\xee\xf5\xa5\x9f\x7b\xb9\x95\xf0\xad\x04\x92\x1b\xe3\xda\x26\xf7\xd7\x7e\x0a\x13\x5f\xe4\xc6\x23\x14\x92\xa9\x40\x0c\x94\xfe\xcd\x46\xe3\xef\x6c\xca\xb8\xfc\xae\xc4\x97\xef\x82\x0d\x08\x85\x70\x11\x0d\xd0\xae\x4d\xf5\xb6\x40\x4a\x39\xbe\x42\xdc\x1d\x62\x10\xc0\xdd\x3e\x86\xb0\x5b\x38\x07\x7a\x45\xb3\x9e\x8f\x98\x12\xa5\x4e\x3e\x20\xee\x0e\x40\x3b\xc9\xea\xdf\xb4\xeb\xac\xfb\x84\x37\x9b\x0e\x55\xeb\xc5\x0d\x9b\xcd\xd0\xa5\xfd\xfe\x73\x45\xc8\x9b\x28\x91\x8c\x33\x81\x9c\x70\x14\x85\x37\x0e\x3c\xe1\x28\xc4\x18\x14\x09\x59\xcd\x85\xe1\x32\xd6\x0a\xfb\x0a\x97\xc4\x3b\x8e\x42\x98\x0a\xd4\x51\x8d\xe8\xc5\x17\xd1\xa5\xaf\xfe\x68\x27\x43\x21\xc4\x86\x96\xbd\x5c\x2c\x19\xee\x95\x6a\x27\xed\x68\x98\xae\x62\x49\x6a\x20\x7a\x2b\xed\x14\x84\xbb\x89\xbc\x1b\xb1\x95\xc1\xae\x0b\xc4\x21\xc5\x7e\xb6\xf8\xab\x18\x6c\xbd\x92\xab\x01\x39\x4e\xf4\x2c\xd2\x4f\x6a\x19\x94\xaa\xa6\x2c\xa3\x84\xd8\x25\x84\x44\xa9\x91\x6a\xea\x50\x6d\x62\x0a\xcd\x5f\xf7\x87\xbb\x4d\x08\xa1\x5a\x69\x74\x7f\x10\xda\x0d\x63\x2e\x23\x9e\xb2\x05\x77\x05\x1b\xc7\x53\x56\xed\x68\xa6\x76\xb7\xb0\x34\x96\x44\xa0\x96\xb2\x75\xec\x27\xd7\x57\x06\xee\x0f\x90\xa4\xaf\x59\x07\xf0\xdc\xbd\x22\xb1\xd5\x6b\x90\x12\xed\xe8\x06\x41\xa4\x4b\x81\x92\xb4\x97\x1e\xee\xf6\x84\x4b\x7d\xc5\x44\x7c\x01\x92\x74\xd4\x12\x15\x6e\xe0\xef\x12\x92\x36\x9b\x9a\xa7\x84\x04\xc9\x66\x53\x75\x61\x3c\x79\x27\xe2\x09\x1d\x52\xb3\x95\x01\xda\x59\x02\x4f\xb1\x02\x9d\x08\x3d\x71\x9f\xb1\x01\x4d\x47\x12\x61\x88\x70\x97\x91\xd0\xbd\xee\x9a\xb8\xe1\xe5\x80\x78\x86\x29\x61\x88\xe2\xae\xb6\xaf\x95\x93\xa8\xd0\x74\xe2\x76\xbb\xab\x60\x2e\xe2\x4b\x05\xa6\x74\x94\xc9\x22\x44\x54\x5b\x60\x72\xb9\xc0\xfd\x41\x38\x0c\x16\x48\x00\xc5\xc0\x97\xe7\x2d\x83\x10\x06\xa2\xd9\xbc\x9f\xd0\x24\x89\xa6\xcc\x4f\x54\x9d\x87\x3b\x4a\x32\x51\x8c\x2d\x34\xe6\xbd\xf5\x63\x61\xc0\x72\x31\x52\x4f\x11\x3d\x77\x76\x57\xcd\xbe\x42\x8c\x36\x33\xce\x8a\x23\xea\xb2\x1e\x77\x13\x26\x8f\xa4\x14\x51\x90\x4a\x86\xcc\x09\xb3\xac\x5e\xeb\x35\x5e\x14\xf3\x73\xef\xef\xd5\x01\x29\x61\xee\x40\x73\x9b\x78\xa9\xbe\x93\x0f\x28\x85\xd5\x75\x9a\x4f\x65\xbd\x53\x3a\x4a\x59\xc1\xea\xaf\x58\x78\xc3\xfa\xd9\x4f\x6d\xc8\x23\x24\x55\x6b\x42\x9b\xf8\xf0\x62\x21\xc5\xdd\xfd\x2c\xe2\xfd\x78\xb6\x82\x6d\x48\xc7\x78\x1c\x4e\x35\xab\x74\x8d\xda\x57\x38\x4c\xef\x17\xe0\x64\x03\xe3\xc0\xfd\x90\x49\xdf\x12\x9b\x06\x82\x6c\x79\x4a\x34\x29\xc3\x34\x2c\xaf\x58\x65\x0b\xb8\x28\x02\xd8\x87\x19\xef\x00\x0f\xdb\x07\x9c\xaf\x84\xad\x1e\xde\x6f\xfb\x02\x84\x2f\x21\xf1\x19\xc8\x4c\x47\x80\x34\x57\x16\x0a\x23\x4c\x19\xa8\x64\xb9\x75\x44\xe5\x28\x89\x0e\xeb\xcc\x19\x13\x57\x62\x82\x92\x26\xd4\x62\x4c\xb7\x08\x31\xac\xa0\xb3\xa5\x7b\x6c\x47\xbf\xb0\x2d\x29\x57\x6a\xef\xf4\x80\x69\x67\x2b\x59\x6d\x23\x52\x3c\xf5\x37\xcf\x49\xa9\xed\xb5\x38\xed\x96\x69\x89\xdc\x55\x4a\x18\x77\xfb\xc0\x7c\x06\x03\x5f\xed\x03\x81\xcf\xdd\x60\xb1\x50\x8c\x81\x92\xce\x22\xb3\x6b\xd1\xcc\xaa\xb5\x5f\xf1\x34\x8f\x20\x56\x95\x43\x44\xc2\xc2\x67\x49\x22\x42\x48\xc1\xe1\x07\xcd\x66\xa4\x56\xea\x80\x84\x17\x91\x9a\x1c\x8a\xb7\xab\x0e\x18\xd8\x6d\x45\x42\x6f\xc4\x37\xb8\xab\x1e\x84\xda\x91\xf5\x86\x15\xd4\xc6\xce\xbd\x01\xe1\xde\x40\xa0\xc6\x4f\x97\xf3\x0e\x83\x22\x72\x4d\xf7\x57\x07\x18\x04\xb8\x08\x4b\xc9\x89\x4d\xd4\xde\x0c\x23\x22\xdc\x6b\xe8\x93\xad\x0e\x4c\x55\x75\x7a\xb3\x9e\xaa\xcd\xba\x4f\xb6\x3c\x58\xda\xb1\x93\x5e\x42\x2e\x12\x98\xaa\x1d\x3b\xc9\x22\xc3\xd5\x8e\x3d\x25\x53\xf7\xa6\xd8\xd9\xb6\x89\xc8\x50\x6d\xaf\x47\x35\xea\x8d\xc8\xc5\x08\xb6\x15\xaa\x91\x41\xb5\xad\x50\x6d\x93\x6d\xf7\x26\x6f\x62\xbf\xd9\x4c\xb2\xe6\x6c\x11\x32\xca\x1e\x7b\xf5\xd9\xe0\x23\xd4\x5f\xb7\xec\x89\xd7\x95\x87\xe5\xf1\x05\xe3\x25\xe4\x17\xf2\x52\xcd\xc4\x0b\x79\xb9\xc2\x45\x88\x12\x18\x61\x3f\x21\x84\x8c\xf0\x7c\xae\xeb\xd9\x01\x06\x23\xd3\xc5\xaa\xdf\x95\xda\x22\x81\xb5\x3a\x4b\x7e\x75\x3d\x08\xdc\xa5\xda\x67\x49\xb3\x31\xd8\xd5\x26\x72\xba\xe4\xa2\xd7\xe8\x26\xc5\x0a\x81\xbb\x7a\x08\xd3\x12\xc4\x6d\x0e\xb1\xeb\x6b\xdf\xf4\x95\xae\xe7\x6a\xed\x32\x09\xc8\x4c\xcd\x92\x3e\x08\x25\x7a\x04\x19\x3d\x7b\x7a\x4e\x74\x03\x22\xdc\xa8\x74\xe6\xda\x2d\xc8\x21\xf7\xcd\xec\xb1\x8d\xd3\x16\x35\x69\xb6\xa5\x77\x0b\x67\xb5\xda\xdb\x32\x4f\x71\x0f\x21\x6a\xd7\x8e\xad\xca\xa9\x52\x74\x73\xbe\x80\x7d\x8b\x62\xeb\x90\xb2\xa8\x09\x18\xb9\xa2\x56\x86\x95\x70\xac\xc4\x9a\x2d\xbd\xb5\x2a\xf1\x24\x7f\xda\x2d\x9e\xf6\xf4\x53\xcf\x04\xa1\xf4\x50\x4c\xf8\x45\x7a\x89\x95\xe6\x68\x22\xa8\x71\xb3\x99\xf1\xef\xac\x44\xce\xbf\x0d\x0f\xca\x64\x1e\xd9\x6c\x22\x14\x92\x18\x2b\xe1\x04\xc5\x84\x62\x77\x5b\xbb\xb8\x43\x97\x42\x9c\x9d\xe4\x42\x8c\x30\xe3\xcb\x31\x72\x7d\xe5\xb7\xec\x65\x02\x98\xec\x39\x4e\x2e\x4a\x49\x55\xc1\xae\x79\x6b\x78\xa9\xd6\xd5\x14\x5b\x1a\x40\x9c\xb3\x57\x7f\xf9\x10\xd1\x45\x7a\xa9\xd0\xa8\x9d\xc2\xcf\x3a\x39\x3f\xf5\xa6\x6a\x84\x54\x75\x76\x9d\x20\xdd\x6d\x51\x16\x8d\xa3\x7b\xaf\x42\x74\xa4\x18\x62\x64\x1f\x1b\x2d\x0f\x7f\x5b\x3c\x3c\xe7\xdc\x4c\x73\x6e\x06\x9c\xa4\x39\xa3\x13\x84\xe6\xcb\x4c\x1c\xf2\x9e\x1e\xd4\x03\x60\x70\x3f\xf5\x05\x44\xbe\x3e\x37\xe1\xf3\x43\x91\xcd\x83\x87\xe6\x13\x07\xe6\xd3\x45\x29\x16\x87\x84\x1f\x8a\x9e\xd6\x5c\x89\xd7\x8d\x0f\xc3\x6e\x9c\x07\x9a\x44\x24\xbd\x88\x2f\xbb\x43\x81\x22\xa0\x17\xf1\x25\x48\x68\xb5\x4c\x5c\x6c\xa4\x0d\x5d\xd6\x2c\xbd\x15\xab\xcf\xfa\x00\x25\xf7\x8b\xdc\xce\x6d\x04\x70\xd5\x8c\x41\xc1\xa0\x21\x20\x51\xfe\x98\x10\x0f\x46\xc4\x83\x3e\x61\xdd\xe4\x70\xd0\x6c\x8e\x0e\x83\xcc\x79\x3b\x85\x6d\x82\xa6\x24\xbe\x48\x2e\xb1\x4b\x61\x4c\xd0\x35\x89\x2e\x46\xfa\xc7\x15\x99\xba\x01\x0c\xc9\xb5\x1b\x28\xc6\xbe\xbd\x45\xc8\xd8\x94\x9a\xc0\x0c\xee\xe0\x16\x6e\xe0\x08\x3e\xa8\xc2\xad\xce\x25\x3c\x57\x05\x5b\x1d\xbd\x09\x7c\x68\x36\xd1\x8c\x7c\x70\x03\xb8\x23\x63\x35\x4d\x27\xe4\x83\x9a\x5f\xf0\xbc\xd9\x44\x37\xe4\xb9\x1b\xc0\x11\x51\x12\x32\xba\x25\xcf\xf5\x87\xa3\x66\xf3\x0e\x0f\x05\xba\x82\x1b\x48\xa1\xd5\xea\x63\x38\x12\x3a\xd7\xc4\x36\x0c\x61\xa4\x44\xb2\x7e\x8b\x5c\x19\x2b\xe4\x87\xfc\xcb\xcc\x40\xf6\x5b\x64\x66\xbe\x24\x2d\xb2\x03\xa3\x16\xd9\x31\xf2\x65\x34\x40\x47\xb8\xdf\x6a\xe5\xb8\xc6\x39\xae\xa2\xa6\xbe\x8d\x37\x69\x91\x4e\xb5\xf4\x1d\x2e\xea\xba\x2a\xea\xca\xa0\x87\x02\xcd\x60\x98\x53\xbb\x4c\x43\xa7\x9b\x3b\xae\xb7\x3e\xcc\xe7\x93\x2d\x42\x6e\x71\x20\x18\xbd\xe9\xd6\x71\xd6\xa9\xab\xd5\x71\xb3\xbe\x8e\x9d\x85\x91\x64\x75\x7b\x6c\x5a\x8a\x16\xb5\x60\xd4\x6a\x2d\xb4\xcb\x21\x39\x1c\x74\xf3\xf6\x58\x83\x6e\xc6\x79\xb9\xa0\x39\xb9\x59\xce\x95\x6b\x78\x4a\x9e\xce\xe7\x17\x97\xdd\x8c\x5e\x6b\xae\x5c\xbb\x01\x64\x02\xd5\x53\xac\x6b\x44\xde\x61\xbe\xa4\xe6\x73\xef\x30\x2c\x9e\x9f\xe6\x1c\xf4\x91\x5a\x39\x33\x3f\x85\x5b\x3f\x84\x3b\xff\x69\xe6\x4c\xba\x11\xc4\xf9\xce\x46\xe3\xcf\x07\x4f\xde\x58\x39\x6d\x8e\xc4\x2a\xb7\xb7\x3e\xda\xa8\x7a\x38\xcc\xf7\x8e\xec\xbc\xd9\xbd\xf0\x53\x38\xf2\x43\x72\x1f\xfa\x1e\xfc\xf4\x19\xa8\x17\x49\x61\x19\xce\xe4\x0c\x55\x9e\x84\x5a\x8d\x52\x7a\x6a\xe8\x86\xf8\xbe\x86\x61\x81\x21\x74\x43\xb2\x93\x79\xcf\x2b\x82\x4b\xe8\xfe\x04\x06\x31\x84\xae\x50\x50\x82\xa4\x06\x6d\xe8\x26\x6e\x42\xee\x67\x7e\x6c\x30\x2c\x72\xea\x5b\x37\x22\x37\x93\x96\x81\x2f\xcb\x3b\x51\xd1\x2e\x5a\x04\x2e\x50\x43\x5a\x4e\x48\x58\x25\x84\x01\x75\x7f\x42\x08\x69\xb6\xb9\x5f\x09\x24\xe0\x31\xa4\xaa\x83\x43\x38\x52\xac\x69\xf1\xa1\x4a\x83\x09\x71\xb8\x17\xa4\x80\xcd\xad\xf3\xba\x53\xee\x43\xbf\x63\xf5\x9b\xb0\xf7\xce\xe7\x16\x57\xda\xb2\x5c\xf2\x35\x6b\xac\x61\x76\x4a\x48\x35\xd6\x30\x12\xb9\xc2\x4c\x2a\xb5\xb7\xd2\x6c\x5a\x05\x90\x90\xc8\x08\xcf\x6a\x08\x92\xde\x73\x45\x8f\x74\x6f\x20\x72\x13\x88\xb1\xff\x48\xbf\x45\x91\x2b\x89\x80\xc8\x4d\x49\x0c\xde\x21\x52\xfc\x2d\x71\x67\xb8\x10\x13\x4d\xf5\x01\x78\x59\xf5\xd8\x7f\xbc\x5c\x10\xa9\xba\x12\xb5\xad\x24\xee\x91\x9b\x10\x61\x50\x6d\x46\x84\x7d\x1b\x07\x86\x2d\xa4\x5a\xd5\x6a\x69\x47\x2c\xd2\xcd\xc2\xff\x2c\x26\x60\xaa\x67\xf1\x88\x48\xd3\xa6\x3d\x2d\x7f\x15\xcc\xbb\x4f\x64\x26\x54\xf6\x8d\x50\xd9\x2f\xc5\x44\x55\x71\xdf\x74\x60\xab\x03\x21\x88\x15\xa6\x21\xb3\x46\xa6\x44\xba\xac\x9b\x0b\xa9\xe1\x55\x34\xea\x9f\xc4\x7d\x96\x14\xdb\xcf\x98\x78\xdd\xf1\xe1\x34\xdf\xc8\xc6\xf9\xde\x73\xa5\x34\x7f\x32\xea\x4d\x2f\xc6\x97\xbe\xfa\xa3\x39\x7c\xab\x45\x5b\xc8\x2c\x7c\xbd\x14\xe8\x21\x19\x34\x9b\x83\x43\x32\x6c\x36\x51\x4a\x38\xda\xbe\x18\x5f\xc2\x55\x36\xb6\x43\x28\xfa\xa0\xd6\x03\x45\x17\x74\x29\x19\x2e\x8a\xfe\xc8\x75\x33\xf0\x40\xb8\x01\xd8\x79\x55\xae\xc5\x92\x43\x44\x9b\xea\x72\x11\x99\xfb\xc8\x9a\x6c\xf0\x54\x3d\x4b\x3b\xe8\xf1\xa9\x58\x25\x28\x0b\x5b\x50\xae\xeb\xca\x12\x58\x2d\x2a\x65\xc9\xc7\x5f\xc8\xc1\x2b\x8d\x44\x84\x5b\xae\x45\x90\x64\xaa\x27\x2c\xee\xca\x15\xe3\x35\x9f\xa3\x55\xaf\x8d\x95\xa9\x3e\xb6\x5d\xd6\x6c\xca\x2d\x42\x78\xb3\x59\x73\x59\x4a\xe0\xd6\xe9\x68\xed\x89\x4f\x40\xb8\x69\x2d\x4e\x3f\x33\xa2\xb9\xa9\xfa\x8e\xa1\xee\xc8\xe7\x39\xd2\x67\x54\x52\xe4\x01\x2f\x64\x1e\x0b\xba\x10\xeb\x4d\xd7\xba\x49\x5d\x98\x5f\x45\x7a\x6f\xd5\x4b\xf7\x9a\x08\x37\xf1\x57\x7d\x22\xf7\xd7\xbe\x6a\xc2\xc4\x17\x6e\xba\xc8\xab\x3e\xf0\xed\xb3\x59\x49\x96\xe1\x45\xba\x91\x0e\x8b\xcc\x2d\x18\xa6\x47\xb8\x35\xed\x2f\xa4\x3b\x2d\x45\x3e\x9e\x87\x9a\x96\x02\x13\xd2\xf8\xb0\x16\xf7\x2a\x05\x19\x51\x45\xbb\xac\xd8\xab\xb2\x9a\x22\x9e\x30\x21\x9f\xb0\x41\x2c\x18\x9a\x0a\x94\xea\x58\x4c\x37\xc5\x40\xeb\xf5\x3c\x56\x2a\xcc\x56\x56\x03\x2e\x8d\x08\xb6\xf3\xd9\x22\x5b\xf5\xb3\x61\xe0\xd2\x3d\xb2\x95\x95\x86\xb7\xa5\x36\x20\xa1\xed\x6a\x6b\x0b\x87\x6e\x42\xcc\x2a\x70\x67\xc5\x90\x3d\x5a\x35\x5d\x73\x2b\x89\xe9\xc8\xea\x97\x68\x60\x9b\x1b\x24\x29\xcc\xc3\xcf\x32\x37\xfc\xb1\xa0\x43\x6d\x27\x2e\xd2\xec\xd8\xfd\x93\x8b\xcf\x17\xec\xd2\x3d\xea\xbe\xe3\x4a\xb3\x24\x84\xa4\x6e\xd8\x4b\xdd\xc4\x57\xfd\xe5\xfe\xd4\xdd\x65\x45\x49\x2d\x90\x74\xef\x4c\x3e\x80\xa2\x01\x65\x52\x0c\x22\xdd\xdb\x2c\xe8\x21\xb5\x83\x1e\xb2\x7d\x3e\xbd\xa0\x4a\xd2\x0d\xdd\x23\x88\xc9\x8e\x36\x44\x84\xbd\xd8\xd4\x15\x67\x75\x75\x6b\xc3\x16\x43\x65\xa8\x43\x57\x5c\xe2\x05\x6b\x36\x75\x54\x01\xb3\x82\x6e\x4c\xca\x86\xea\xd1\x11\xe1\x26\x88\xe3\x6e\xdf\x58\x36\xfd\x2b\xd4\xf1\xf0\x62\x81\x52\x9d\x89\x84\xe8\x25\x8a\x38\x61\x45\xfb\xac\x30\xd0\x57\x22\x8b\xae\x34\xb9\xa5\xd4\x94\x3f\xbf\x9b\xb0\x7c\x6a\xfc\xc9\x11\x77\x25\xbb\x95\x4f\x63\x2e\x19\x37\x47\x0b\x3b\x5b\x6b\x40\x1d\xa7\xec\xa4\x3c\x87\x01\xcd\x4d\x78\x09\xd4\x4f\x8e\x5a\x07\x47\x05\x39\x41\x33\x14\x0b\x48\x5d\x4e\xc7\x0c\x52\x57\x6b\x88\xda\x23\x52\x1e\xe6\xe7\xae\xa4\xc3\x13\x3a\x66\xae\x8c\xdf\xc4\x33\x26\x9e\xd2\x84\x21\x0c\x21\x39\xd6\x9a\x45\xd9\x81\xc0\x4a\xeb\x8f\xae\x2b\x24\x27\xe8\x95\x40\xf1\x05\xbb\xc4\x10\x16\xfd\x79\x87\x5e\xe8\x13\xb0\x10\x56\xe2\x34\x04\x70\x90\x96\x03\x58\x87\x31\xea\x14\x21\x4f\xd4\x9f\x87\xea\x8f\x15\x08\xa9\x0f\xca\xe7\xd1\xfb\xe1\x63\x48\x49\xa8\xbb\x07\x28\x79\x25\xac\x28\x98\xf3\x4a\x2c\x47\x61\x26\x67\x7a\xa1\x91\xb1\x12\x54\x85\x1a\xb1\x6b\x81\xd4\xde\xa5\x76\x0f\xa5\xf2\x2d\x4a\x27\xff\x69\xc5\x3d\x6f\xc8\x64\x55\x32\xd9\x5a\x32\x19\x70\x2b\x54\x62\xa6\x4d\xe3\x33\x73\xbc\x40\x93\x1d\xaa\xa5\x25\x23\x39\x62\x10\xab\xc7\x20\xee\xdf\x41\xa4\x9a\x10\xaf\x6f\xc2\x13\x4e\xd2\xae\x69\x07\xd5\xe9\x04\xc8\x0b\x8e\x1c\x55\xd4\xc1\xe8\x18\x23\xe1\x06\x8f\xb2\xd6\x45\x6a\xdf\x89\x55\xeb\x62\x88\x40\x4d\x6b\x88\x88\x84\x27\x9c\x78\x10\x6a\xa3\x4a\xb8\xd7\x6c\xa2\x9c\x08\xa2\x4f\x5a\xef\x61\xd3\x7c\xf8\x2e\x56\x06\xe5\x08\xf6\x23\x65\x89\x3c\xe2\xd1\x58\x7b\x00\x8e\x05\x1d\xb3\xde\xca\xb7\x95\x98\x22\x2b\x96\x8a\x43\x87\xed\x3e\x38\xf0\xb0\x15\xcd\x73\x2e\x90\xb1\xc4\x22\x99\x9d\xae\xb1\x23\xaf\x29\xc2\xf7\xa9\x96\x4e\xd2\x9e\xe7\xa3\xef\x02\x51\x0c\xda\xd3\xda\x29\x16\x59\xed\x94\x1f\xe1\x20\x7a\x48\xc3\x68\xf6\xa3\x85\x95\x8e\x12\xd7\x3c\xf3\xd3\x20\x49\xc9\x8e\x1d\xa1\xff\x45\x94\xce\xf9\xe7\x4c\x75\xcd\x28\x0e\x75\x8b\xdc\x2b\xb5\x09\xbb\x74\x3e\xbf\x42\x1d\xbc\x58\x1b\x2b\x49\x63\xb8\x66\x95\x70\x32\x7c\x2f\x9a\xcd\xab\x28\x91\xb1\xb8\x73\x87\x31\x12\x18\x38\x32\x19\x20\x74\x4b\xcf\xd6\x7a\x81\x57\x63\xcb\x51\x29\x45\xe4\x83\xa4\x92\x69\x9b\xb9\x03\x16\x5e\x38\x16\x6b\xb3\x33\x6c\x46\x9a\xc9\x00\xeb\xf0\xde\xd7\xcd\xf9\xb6\x75\x7e\x01\x2b\x3c\x26\x7e\x35\x58\x19\x4e\x56\x4f\x2b\xe3\x2d\xe8\x99\x7f\xfc\x63\x61\xfb\xfd\xab\x41\x2c\xa7\x1c\xd5\x82\xf5\xec\xd4\xa6\xf8\xfe\x15\x47\xa9\x0e\x2e\x2b\xf3\x9b\x2e\x7b\x80\x44\xd5\x03\xa4\x4f\x62\x5b\x84\xca\x35\xbe\x1f\x13\xce\xb7\x2a\x86\x22\x3b\x1f\x64\x9f\x90\x12\xb8\x77\x2b\xb5\x17\xdf\xbf\x91\xb6\x2f\xfe\x4d\x36\xd7\x57\x44\xe6\x0a\x7c\xff\x5d\xd8\xe3\x62\x82\xe8\xf3\x00\x38\x77\xc8\x64\xe6\xa4\x7d\x72\xf7\xaa\xaf\xd6\x8a\x40\xbc\x77\xc3\x91\xe2\x69\xd8\x3f\xe2\x68\xa4\x1d\x7e\x66\x11\xeb\x88\x60\x51\x8d\x08\x2e\x83\xf0\xde\xd4\x59\x4b\xbe\x5b\x5d\x88\x4b\x84\xe1\xd5\xa6\x98\x60\x49\x96\x83\x59\x4e\x84\x9b\x84\x22\x1e\x8d\x34\x24\xbc\x5a\xd4\x83\x2a\xab\x2d\xd3\x61\x94\x12\x61\xeb\x3c\x82\xdc\x10\xef\xb1\x9e\xdc\xac\xd6\x37\x6c\xa0\xd4\xb0\xfc\xe7\x79\x3c\x21\x32\x6b\x84\xc2\xfd\x4c\x90\x5f\xe5\xae\x29\x24\xde\x90\x88\x16\x3d\x2c\xad\x7e\x31\xf1\xba\x61\xb3\x19\x1f\x52\xb3\x89\x46\x4a\x9a\x29\x13\x00\x28\xf5\x9e\xf0\x8b\xb8\xd5\xd2\x8e\xb0\x0b\xd1\x6a\x5d\x36\x9b\xa8\xe3\x11\x12\xf5\x90\x6c\xb5\x80\x91\x0e\xf6\x11\x6b\xb5\x40\x67\x88\x20\x04\x1d\xec\xee\x3d\x7a\xd4\x8c\x70\xaf\x56\xce\xef\x94\xfe\xef\xef\x28\xec\x09\xbf\xdd\xc9\x22\xbc\xe0\xf5\xa6\x88\xb3\xc3\x42\x2d\xaa\x56\x21\xab\x94\xe2\x1e\x47\xd2\x4d\xd2\x20\x91\x4a\x31\xd9\xc1\xb8\x27\x5a\x3b\x7e\xbb\xe3\x73\x24\x2f\xc4\x25\xee\x39\x7f\x71\x6d\xae\xbd\x10\x97\xbd\xf6\x8e\x2f\x5a\x1d\xf5\xb5\xdd\x59\x60\xf8\x28\x36\xa5\x77\xa8\xd4\xa3\xa4\x9b\x05\x86\x9f\x62\x65\x86\x85\x2e\x2f\xb5\x30\x9e\x0b\x72\xb2\x9a\x56\xc1\xf8\xaf\xe5\xe1\xde\xa3\xf9\x7c\xff\x61\x99\x9c\x8d\x97\x52\x15\x86\x97\x62\x63\xe6\x0c\xaf\x5b\xf6\x4b\x57\x94\xc2\x69\x8d\xd8\xf6\xde\x23\xed\x9e\x3b\xf4\xe6\x73\x7e\x48\xd2\xcc\x12\xc7\x08\xff\x83\xb5\xd2\x45\x11\x93\x23\xcc\x38\xbc\x15\x1b\xd2\x46\x78\x2b\xdb\xc6\x56\xb5\x6d\xef\xd1\x3f\xd9\x7c\xce\xfe\xb9\xff\x10\x47\x03\x74\xb0\x6f\x7e\x3d\xf4\xb4\x7c\xc8\x0e\x1f\x3f\x9c\xcf\x3b\xde\xce\x21\xcb\xc8\x91\xa4\x73\xf0\x87\x6c\xb1\xf6\xa3\x87\xc6\xae\x57\xbc\xd8\xdf\xef\x56\x5f\xec\x3d\x2a\x89\xe6\x3a\xd4\xb0\xfb\xab\xc9\x9f\x5a\x39\x19\xf4\x84\xa6\x87\x5e\x2f\x5f\x01\x3e\x6d\xf1\xd2\xee\x1d\x66\xc6\x99\xb8\xb6\x0c\x5a\x2d\xdc\x55\x93\x3e\xee\x21\x46\x3a\x20\x4d\xa6\x98\xa5\x49\x1f\xe3\x66\x53\xc1\x2e\x8a\x69\x4e\xb3\x19\x6e\x32\xf3\x54\x7a\xd7\x8e\x36\xac\x31\x4a\x13\xd7\xc1\x09\x67\xb3\xc6\x97\xb7\x6f\x5e\x4a\x39\x39\x33\x62\x88\x1a\x39\x38\x1a\x20\x49\x28\x56\xda\xf2\xb2\x0f\x7a\x22\xe2\xa1\x60\x49\xe2\x54\x38\x4a\xde\xc6\xa7\xf1\x78\x92\x4a\x1a\x8c\x58\xb3\xf9\x4a\xad\x17\x8a\xee\x43\xea\x2b\x61\x80\xf6\x59\x1f\xc2\xc0\xe7\xae\x8c\x25\x1d\x99\xdd\x60\x45\x90\x81\xc3\x84\x88\x85\x53\x89\xf9\x43\x47\x1c\xdd\x0e\xd6\x96\x90\x46\x3c\x5a\x2e\x73\xb3\xbe\x8c\x22\xa8\x56\x60\x95\x9a\xb7\xe2\x58\x46\xf8\x48\x7b\xfd\x93\x49\xcc\x13\xf6\xf1\xec\x0d\x04\xef\xfd\xfb\x70\xe0\x73\x37\x91\x54\xa6\x09\x84\xaf\x8b\xe7\x73\x76\x2b\x17\x10\xde\xae\x38\x3e\x33\x8a\x4d\xf2\x93\x32\xc1\x5b\xb9\x14\x78\x96\x3e\xc6\xf9\x4b\xfc\xc5\x1d\x0c\xab\x33\xd9\x00\x85\xd0\x28\x25\x4a\x85\xcb\x27\xa1\xe3\x37\x1c\xdc\xf5\x0e\x63\x2d\xb7\x85\x19\xc7\x8a\xf8\x10\x79\x10\x2b\x09\xda\x7e\xb5\xd3\x8a\x31\x08\x72\x87\xce\x07\x76\xee\xef\x72\x8f\xb8\x95\xe8\x48\x69\xb5\x3d\xda\x72\x40\x67\x6c\xa0\x3e\xc5\x0b\x7d\x44\xb6\x08\x3d\x43\x5c\x6d\xaf\x47\xa3\xd1\x59\xd6\x2b\x2f\x19\xed\x33\x91\x20\x8c\x21\xb0\x7b\xcb\x1c\xe7\xd2\xbe\x49\xd3\x3f\x87\x3b\x9e\x37\x9f\xef\x7a\xde\x21\xc9\x5f\xe1\x82\x2d\x2a\xd1\x9c\x94\x85\x55\x5f\xc2\x11\x47\xb3\x81\xda\xa7\xbb\x82\x08\x24\x6b\x52\xc3\x8d\x89\xfd\xf3\xd1\xfa\xc2\x68\x32\x30\xb9\xca\xd4\xf6\x89\x38\xa4\x6e\x38\x71\xa9\x16\x29\xa5\xb8\xbb\xe7\x6e\x3c\x61\x1c\xa5\x6e\xf8\xa7\xfa\xf4\x08\xb6\xbc\xe5\xcc\x16\x7a\x6e\xdd\x0d\x14\xd4\x23\x85\x66\x6b\x7d\x26\x9c\xf0\xb6\x2b\xdd\xa0\x6b\x12\xd2\xe9\x20\x92\x6c\xa5\x99\x2e\x52\x2b\xc4\x24\x4e\x73\x03\xa5\x69\x17\xe4\xde\x4d\x34\xe3\x9b\xb8\x01\x70\x77\x16\xc9\xab\xa7\x82\xf5\x19\x97\x11\x1d\x25\x44\xb8\xfd\x40\xad\x52\xe1\x86\xbb\x58\x69\xcc\x6e\xb6\x02\x54\x91\x5d\x97\x9a\x86\xe5\xa9\x0b\x52\x37\x78\x54\x46\x61\x24\x8c\xf7\xd1\x87\x01\x62\xb8\x87\x56\xd0\xe3\x64\xea\x73\x5b\x51\xe0\x98\x83\xf4\xcc\x0d\xb0\xaf\x9f\x6c\x21\xc4\xa5\x41\x2c\x24\xc2\x8b\xba\xb4\x53\x0d\x35\xf5\x20\xf0\xa5\x1b\x00\xad\x2c\x01\x4e\x14\x6b\x28\x8d\x6f\x33\xf4\x7c\x50\xa4\xa7\xd7\x74\x7f\x13\x6b\x52\xe6\x38\x43\xa7\xcb\xdd\xf0\x6d\xb3\x89\x64\x8b\x38\x63\x47\xad\xef\x30\xcc\x7e\x46\x8e\x19\xc7\x72\xfa\x9e\xb1\xe1\xf3\xdb\x89\xa6\x6a\x79\x24\x6f\xa4\x92\xac\x7f\xac\x97\xf9\x79\x3a\x1a\x69\x85\x6f\x9c\x95\xdc\x9c\xad\x16\x32\x2f\xa2\xa7\x56\xa6\x09\x35\xc9\xd3\x03\x41\x4c\xda\x9d\x2e\x6d\xb5\x0e\x79\xb3\xa9\xc3\x61\xd9\x2d\x0b\x51\x88\x71\xb3\x19\x6f\xd9\x90\xdd\x12\x61\x54\x44\x70\xb5\x3b\x30\xc8\xc2\x55\x22\xb5\xbc\xa3\xdc\xce\x4f\xd8\x45\x74\xd9\x1d\x5c\xb4\xdb\xd1\x25\x09\x94\xe0\x1c\x68\xb1\x39\xcd\x33\x04\x5e\x07\xc0\x2e\xbc\x4b\x60\x86\x45\x00\x85\xcf\x3a\x23\x80\x09\x46\xc9\x2b\x2d\x56\x73\xf9\x8a\x48\xc8\x72\x17\x4a\x7b\x5b\x31\x89\xb8\x32\x0b\x92\x57\xa4\xd2\xcb\x15\xa0\x9a\xac\x19\x0d\x50\xd8\x6a\xfd\x93\xa4\x85\x14\x62\x19\x5e\xa8\x18\x6a\xe9\x3c\x0f\xe3\x68\xef\x42\x9e\x89\x5a\xa8\x46\x8a\x22\x7d\x54\x01\x79\x21\x2e\xbb\xf2\xa2\xdd\xd6\xe1\xac\xb7\x12\x31\xdd\xd8\x22\xcd\xbf\x6e\x2e\x87\x12\x7c\xa9\x8a\x9d\x4b\x08\xe1\x73\x21\xea\x63\x78\xb2\x59\x12\x92\xf9\x98\xda\x63\x59\x19\xd9\x2e\x2f\x78\x72\x98\x0f\x2b\xc3\x99\xff\xcc\x48\x20\x45\x00\xab\x39\x18\x44\x21\xe3\xd7\x18\x57\x11\x17\xd6\xfd\x1a\x3c\xc6\xb0\x72\x60\xde\xaf\xd5\x80\x79\x73\x53\xf6\xbb\x8d\xc9\xf5\xfe\xc7\x86\x92\xe2\xf0\x90\x2f\x70\xf7\x78\x6d\xbd\xe2\x9f\xff\xe4\x1b\x92\x28\xff\x53\x7f\xee\x56\x72\x37\xb2\x75\x47\x2d\x8b\xb3\x64\xaf\xb8\x64\x62\x4a\x47\xb6\x16\xf4\x8a\x23\xb6\xf1\x24\x59\x51\x48\xe0\xda\x59\xdf\x77\x56\x74\x7b\x63\x86\x9e\x4a\x70\xfe\xe2\x8d\x46\xa3\xe1\xc0\x0c\xbd\xd2\xbf\x1c\xe0\xb6\x3f\xe4\x4f\xbb\xc4\x1d\x3a\x95\x6b\x1b\xd8\x52\xe2\xbe\xa7\x38\x59\x51\xf8\x85\x5d\xf8\xf1\xc3\x43\x82\x38\x39\xd6\xc7\xb8\x9a\x4d\x7e\x48\x3a\x3b\x3b\x25\xec\x57\x0b\xb6\x00\x3b\x24\x8f\xbd\x66\xf3\x60\xff\x90\x58\xf6\x50\x2e\x57\x42\xee\x3f\x6c\x36\xf7\x1e\x55\x20\x85\x05\x69\x88\x99\xcf\xbf\x9a\x7f\x34\x12\xeb\x3a\x10\x59\xc9\x5c\x50\x5e\x91\x51\x79\xef\x52\xeb\x66\x8a\x35\x25\xa8\xf5\xde\x71\xb4\x4e\xa3\xd7\x87\x24\x6f\x20\x96\x64\x1b\x22\x49\x50\xc5\x52\x61\x1d\x64\x16\x6e\xa8\x53\x17\xf7\x21\x5d\x37\xcd\xee\x50\x2c\x81\xbb\xdb\x3d\xe9\xa7\x3a\x76\x9a\xaa\x11\xb6\x3f\xa6\xa0\xff\x31\x88\x8d\xfa\xb1\xe6\xcc\xa0\x89\xaa\x6d\xef\x68\x0f\xd9\x76\x35\xe1\xae\x92\xbc\x14\x19\x1c\x28\xb9\xd3\xe6\xf5\x00\xa4\x1b\xc2\x1d\x8a\x34\x66\x90\x2e\xd3\xe9\x96\x53\x10\x84\x82\x24\x4c\x9f\xff\x18\xac\x92\x1c\x1b\xba\xd0\x5a\x3d\x70\x86\x42\x09\x26\x79\xb1\x56\x3c\xe0\x58\xcd\x22\x08\x56\xa2\xe2\x84\xc3\x6f\xa0\xe3\x36\xa6\x44\x92\x0e\x8c\x24\xd9\x81\xbe\x24\x1e\x4c\xe5\x5a\xde\xb1\xc0\xb0\xbd\x52\xf6\x2d\xf2\x4b\xc0\x58\x6e\x3a\x6b\x5c\x64\x10\xc6\x70\xb5\x01\xd0\xb3\x01\x87\x1b\x00\x3b\x36\xe0\x64\x0d\x69\xd9\x69\x42\x98\xad\xf9\xbe\x93\x7d\xbf\x93\xe4\x27\xdc\xfe\x02\xc9\x8d\x24\xaa\xde\x05\x1c\x49\xc2\x39\x7c\x90\xe4\x0b\x3c\x97\x64\xc2\xe1\x7a\xf5\x80\xb4\x1c\x67\x01\x4f\xe5\x7a\x8b\xe4\x0b\xe0\xf0\xcc\xa4\xb0\x85\x57\x6b\xe1\x3e\xa3\x19\xfa\x13\x4c\x0e\x1a\x0c\xa7\x92\x6c\x9c\xb4\x5b\xb2\x9e\x21\x9a\xe9\x54\xbd\x66\xc2\xea\x2b\x04\xf2\x6c\xd1\xf6\x04\x85\xef\x92\xbc\x86\xf3\x5f\x20\xef\xe4\x59\xb9\x0b\x91\x3c\xab\x81\x43\x4a\x44\xbb\xa3\x6b\x08\xa5\x2e\xd7\xe5\x84\x81\x20\x29\x48\x42\x55\x05\x5f\xe4\xfa\x95\x7b\x6e\x56\xce\xb1\x6a\xe0\xd9\x06\xb8\xef\x0a\x6e\x86\xbe\x48\xf0\xe0\x4f\x81\x04\x6e\x77\xb0\x49\x23\x7e\x5c\x1d\x82\x3c\xb8\xd7\x52\xa9\x3d\x2b\x59\x78\x25\xb7\xa8\xe8\x09\xbf\xe3\xed\xec\xfd\x81\x44\x5b\x7f\xc0\xad\x4a\xc1\x0e\x6e\xeb\x44\x94\xad\x83\xfd\xfd\xdd\x83\x05\x9c\xac\x59\xc9\xa7\x12\x42\x99\x2d\xac\x37\x7f\x87\x1c\x6d\xda\xac\xd3\x74\x8a\xb8\x12\xd1\x78\xb6\xdf\x77\x30\xf6\xcd\xab\x16\xbf\xe8\x94\xef\x77\x30\xd6\xe2\x0e\x7c\x5e\xd7\x6d\xce\x5f\xfc\x2f\x8e\x9c\xd6\xb5\x44\xbc\xd5\xc1\x2d\xe4\xe0\x86\xd3\x7a\x27\xd0\x33\x99\x25\x49\x81\x67\x2b\x5b\x34\x43\xaf\x55\x77\x1f\xe3\x05\xbc\x5e\x46\xce\xfd\x6c\x5e\xac\xc8\x6e\x64\x1f\x81\xe2\x7a\xee\x2d\x59\x61\xdf\xe4\xb9\x3e\x3b\x26\x51\x44\x19\x56\x5b\xa4\x92\x00\x6e\xdd\x2e\x82\x5e\x08\xa4\x36\x00\x6a\x76\x2a\xa1\x76\xca\x19\x3a\x92\x20\xa4\xea\x70\xa4\xa4\x26\x25\x6b\xeb\xe9\x97\xf6\x1c\xd7\x69\x49\xdf\xb9\xf8\x87\xce\x84\xf6\x8f\x4b\xc7\xcc\x78\xaa\x26\x64\x71\x44\xa4\x51\x38\xe2\xf3\x68\x83\x00\x42\xe2\x5c\x98\xbe\x72\x29\x6e\x39\x97\x8e\x8d\x37\xdc\x84\x65\xc7\xcf\x4f\xe9\x50\x7d\x58\xc4\x0d\xf4\x92\x89\xdd\x20\x3f\x15\x16\x11\x24\xdc\xa0\xe7\x9c\x5f\xb1\xc6\xeb\x24\xe6\xee\x33\x16\xc6\x7d\xe6\xc6\x9c\x9d\x0e\x1a\x54\x36\xae\x93\x98\x3b\x2d\x23\x7e\x38\x70\xa2\xc7\xc7\x77\x96\x40\x1d\xdc\x72\x1a\x03\x1a\x8d\x74\xbe\xb7\x86\xbc\x62\x8d\x41\x3c\x1a\xc5\x33\x93\xad\xea\x5a\xa2\x3f\x05\x8a\xb1\x82\x9a\xd1\xbb\xc4\x77\xba\x35\xc1\x46\x09\x33\xba\x41\x11\xcc\xd0\x99\x84\xcf\x52\x47\xdf\x2c\x38\xa1\x84\x91\x58\x5f\xec\x92\x12\x61\x35\x31\x93\x42\x9d\x33\xca\x1b\x11\x97\x71\x83\xae\x68\x81\x4e\x9b\xc7\xe3\xc6\x24\x4e\x92\x28\x88\x46\x91\x8c\x58\xe2\xb4\x4c\xa3\xd7\xb7\x6f\xcb\x29\xdd\xbf\xa1\x1e\xf8\x58\x27\xb2\xcf\x06\x3e\x22\xba\xfc\x3b\x11\x07\x23\x36\x36\x95\xa8\x26\x6b\x27\xeb\x3a\xac\x2d\xc7\x57\xcd\xd4\x12\x9c\xbf\x5c\x76\x18\x4d\x19\x37\x18\x34\x9c\x83\x5b\xe8\x9d\x40\x33\xf4\x5c\xc2\x1e\xe8\xae\xcb\x5e\x87\x8a\x75\x7d\x94\x1b\x0e\x16\xdb\x9b\x16\x84\xbe\x84\xbe\x3e\x84\x0d\x3f\xf5\x61\xef\x97\x92\x7c\x82\xb7\x6b\x59\xda\x13\x24\xf0\x83\x27\x46\x89\xfc\x24\xc9\x4b\x89\x66\xe8\xad\x84\x1d\xd8\xdd\xc1\x18\xbe\x49\x72\x8b\x3e\x2a\x66\xf7\x49\xc2\x4f\xf5\x1f\x86\x1f\x92\x44\xf0\x64\xfd\x46\xad\x8f\x55\x43\xb4\x56\x18\x37\x75\xbd\x97\xe4\x06\xde\x49\xf2\x0d\xfe\x5c\xbd\x77\xe5\x99\x6a\xe1\xc5\xda\xaa\xf2\x4b\x1c\x7a\x3a\x31\x38\x7c\x95\xe4\x25\x70\x46\x12\x10\x6c\xb5\xa1\xba\x9b\x2b\xec\x33\xc4\x19\xec\xee\x00\x37\x76\xac\x00\xa4\x59\x5f\x59\x57\x4a\x97\x2e\xf2\xec\x7a\xac\xdc\xcd\x4e\x34\xcb\x30\xbb\x8a\x56\xd2\xe5\xaf\xea\x79\x29\x91\x78\xb0\xbb\x53\xf0\x99\xc2\xee\x5d\x12\xe0\xd2\x2e\x27\x33\x24\x98\xe6\x74\x39\x66\xb6\x8c\x59\x27\x24\x64\x39\xe6\xdd\x9d\x3f\x84\xcb\x80\x91\x77\xd9\x88\xed\xee\x80\x6c\x77\x30\x06\x4e\x78\x4f\xcd\x41\x77\x88\x7d\xe1\x0e\x41\x61\x97\x0a\xbb\x2a\xdd\x2d\xce\x86\x7f\x94\xf0\xa7\x82\x1a\xe0\x96\x84\x19\x7a\xa1\x43\xe7\xff\xf8\x24\xb1\x86\x2c\x33\xf6\xd9\x90\xd9\x2c\xd0\x9f\x31\xa4\x8c\x3c\x07\xca\x36\x78\x62\xb2\x4d\x5b\x1c\x7a\x65\xbb\x19\x83\xad\x0e\xdc\x0f\x7d\x06\xcc\x97\x0f\x76\x77\xe6\x1e\x0c\xfc\x34\x4f\x0d\x4c\x32\xc9\xea\x0e\xfd\xd0\x6d\xd2\x06\x98\xae\x12\x2d\x05\x11\x6d\xd5\x48\x22\x81\x99\xd1\xa2\xc0\x30\xa4\x24\xd5\x27\x96\x57\x76\x98\x77\xc8\x8b\x53\xa4\xff\xbe\xbb\x03\x8c\x68\xbc\x12\x78\x5b\x5a\xfe\xbd\x29\xa2\x4c\xd5\xd4\x96\xaa\x06\x0e\xc7\x50\x86\x7b\x7c\x93\x0b\x0c\x31\x5b\x31\x47\xb7\xb8\xbb\xbd\x80\x88\x91\x01\x87\x01\x23\x01\x87\x80\x6d\x16\xe0\x92\xea\xf7\x15\x1b\x57\x3e\xb3\x6b\x01\x5a\x9d\x5a\x60\xd6\x4e\xc1\xaf\xf2\xcb\xcd\x16\x0b\x18\x31\x92\x4a\xe8\x33\xb2\xf9\x9c\xff\x3d\xdd\xf7\x29\x04\x8a\x59\x50\xe9\x33\x08\x12\x5f\x42\x30\xf3\x39\x84\x67\xbe\xee\xcb\x29\x23\x82\xc3\x36\xdb\xbc\x2e\xc7\x8c\x7c\x85\x2b\xb6\x96\x0f\x1c\x76\x7a\xc2\xbf\x43\x63\x35\xf5\xb6\xcd\x29\x45\x35\x6b\x86\x8c\xa4\x1c\x26\x1b\xcb\x39\x8e\x29\xe8\x41\x76\x47\xcb\x8c\xad\x39\x3d\xe5\xe9\x30\x99\x8a\x54\x03\x8c\xec\xed\xea\xb3\x0a\x7b\xfb\x84\xc8\x5e\xc7\xf7\x20\x25\xac\x9b\x96\x91\x4d\xad\x56\x19\x7e\x5b\x4b\xfd\xae\x63\x22\x33\x2f\x55\x71\x98\xeb\x46\x76\x05\xe9\x78\x7f\x88\x16\xb5\xbc\x31\x29\x21\xac\x77\x23\xfd\x5b\x89\x4c\x4d\x6d\xe1\x0b\xbc\x80\xbb\x75\x4b\x42\xe1\x56\x95\xcc\xe7\x33\x34\x65\xe0\xfc\x5f\x0e\xa4\xd8\xaa\xc3\x50\x34\x43\x43\x06\x8e\xaf\xbe\x69\x6a\xb2\x6d\x9c\xea\x6d\xbc\x0a\x1c\x12\xaa\x8f\x7d\xce\x18\x9a\xa1\x2b\x06\x61\xab\xa3\x13\x01\x65\xdc\x86\x96\x9a\xe3\x8d\xec\x52\x52\xe4\xc6\xba\x95\x68\x80\xfa\x4c\x8b\xb2\x13\x66\x82\x89\x69\x7e\xb8\x65\xb1\x04\x94\xc2\x8d\x2c\xbe\x62\xb8\x65\x2b\x37\x23\xd3\x3a\x56\x6f\x50\x9a\x35\xe8\x81\x03\xc6\x00\x95\xaa\x06\xa5\x24\x2d\x53\x75\x4d\xd1\x9d\xa1\xe5\x4a\x4d\x54\x86\x35\x46\x4d\x59\x6a\x13\x94\x81\x29\x4c\x79\x9e\x0c\xb8\x61\x2b\xf4\x05\x43\x89\xac\x53\xc2\x32\x4a\x7a\x0e\x18\xc9\x8f\x99\x0c\xa6\xac\xa4\xe4\x16\xdd\xaa\x2a\x6e\xa5\xe9\x4f\xd6\xea\x80\xc4\x8a\x1e\x4d\x0d\xd3\xf6\xda\x2a\xa8\xe9\x19\x45\xca\xd1\x4a\x0e\x44\xa5\x95\x7a\x36\x23\x44\x66\x84\xfc\x5b\x9e\x5f\x57\x69\x4c\xf7\x92\xc8\x92\x90\x3b\x74\x63\x13\x22\x5b\x1d\x9d\x67\x4d\x93\x21\x6d\x4f\x47\x06\x78\x93\x5d\x77\xf7\x81\x11\xc9\xe1\x39\x5b\x2d\x4e\x7f\x60\xa0\x2f\xf0\xf2\x1f\x3c\x70\x80\xe3\xde\x0c\x1d\xa9\x45\xa6\xab\x78\x08\x1c\x63\xbf\x84\x49\x2a\x40\x1d\x03\xf4\x48\x03\x29\x69\xff\x7a\x79\x45\x76\xbb\xd8\x5b\xc0\x53\x46\x6e\x38\x7c\x25\x4f\x99\x5a\x8b\xaf\xd6\xce\x15\x56\xa6\x9e\x61\x7a\x12\x33\x73\xf8\x84\xe6\xef\xcd\xe4\x8e\x09\x35\xef\x63\x33\x58\xb1\x86\x8d\x2d\x58\x03\x61\xa5\x63\x49\x61\xa6\x03\xce\x66\x3a\x9e\x4b\xdf\xd2\xe0\x52\xd8\xf7\xbc\x43\xd9\xd3\x4a\x92\x22\xe3\x44\x2a\x01\xd5\xbf\x45\xaf\x98\xb1\x99\xb4\x3a\x5a\x04\x2d\x7b\x76\x19\x95\x58\xf3\x55\xd4\x5f\x57\x6f\x64\x38\x65\xeb\x1d\xed\x45\xfd\x9e\x99\x44\xdf\x2b\x93\xc8\xbe\x43\xe6\x0e\x9d\x32\x58\xab\xc8\x87\x12\xcc\xed\x62\x85\x69\x05\xc3\x39\x23\xcf\x39\x7c\x61\xeb\x42\x46\x66\xe8\x9c\xad\xf2\xaf\x3d\x65\x48\xe8\x98\x1b\x83\xe6\xac\x42\xbe\xd4\xa1\x67\xab\x51\x88\xdf\x41\x3d\xcb\x2e\xfd\xc5\x0b\xb5\x82\x4d\x15\xc7\x8c\x7c\xe6\x70\xb2\x76\x4f\x38\xe5\x48\x63\x3c\x66\xa6\x8d\xd8\xdc\x5f\xb0\xc1\xce\xf4\x65\x65\xf5\xde\x02\x90\x20\x33\xf4\x9d\xc1\x49\x86\x0a\x74\xbf\x9e\x31\x14\x4a\x0c\x4f\x19\x3a\xc6\x90\xe9\xa4\x72\x43\x05\x7a\x72\x2f\x30\x7c\xe6\xeb\x6d\x2b\x5f\x58\xb6\x7b\x75\xdf\x70\xf7\x9c\x26\x37\xe4\x3e\xf0\xbf\x42\xe8\xa7\x1c\xfa\xbe\xe4\xc0\xfc\xcf\x1c\x06\xc5\xa1\xe4\x32\xa2\x88\x59\xc2\x03\xfd\xd3\xdf\xea\x40\x10\xf9\x8e\x03\xc1\x75\x25\x8d\xea\xe7\x0a\x5c\xdf\x1f\xc7\x10\x7c\xa9\x40\x3c\xab\x40\x7c\xf0\x3f\x33\xe4\x38\x18\xe8\xc7\xfc\x69\xea\xdf\xd3\xc8\xbf\x91\x40\xaf\xd5\xdf\x70\xea\x73\xa0\xb1\x7e\xf1\x43\xff\xbd\xd3\x7f\x7f\xea\x25\x9f\x14\xe5\x3f\xe7\x4f\x41\xa8\xbe\x07\xa7\xf9\xef\xd7\xfe\x56\xc7\x4e\x0b\x68\xd5\x8f\x66\x68\x16\x6b\xe6\xad\x79\xca\x24\x86\x0e\x70\xec\x73\xdc\x72\x1e\xd0\x49\xf4\x60\xba\x63\xdd\x3b\xf8\x91\x6d\xcc\x2f\xfb\xb3\xfa\x79\xbf\xf6\xf9\x25\x5b\x65\x63\x2b\xd3\xd6\x55\x3f\xef\xd6\x3e\x7f\x62\x1b\x93\xd7\x7e\xab\x7e\xde\xab\x7d\xfe\xb1\xb9\xee\x27\x9b\xeb\x7e\xbf\x19\xf9\xbb\xcd\xed\xfe\x73\x33\xe5\x2f\x36\x77\xea\xd7\xcd\x94\xf3\x74\x63\x69\x91\x6e\xa4\x5c\xa6\x1b\x29\x67\xe9\xc6\x6e\x49\xab\x9f\x77\xea\x2d\xa3\x9b\x8b\x87\xa9\xed\x28\xd7\x1a\x86\x69\x1d\xac\xdc\x25\x5f\xc6\x4a\x43\x99\xa1\x77\x71\x66\xf3\x34\x16\x21\xeb\xca\xe5\x74\xf3\xad\xc6\x77\xe8\xad\x46\x61\x9c\xc7\x65\x02\xa3\xb4\x92\xc9\xf4\x5b\x5c\x77\x4a\x0e\xd1\xb6\xe6\x4a\x12\xe7\x06\xbb\x19\xb3\x1c\xd2\xc2\xdd\xee\x85\x29\xfa\xc7\x2b\x6e\x6e\xac\x88\xb8\x64\x43\x26\xfc\x86\xf3\x8f\x16\x6f\xfd\xc3\xf9\x07\xf6\xd3\x48\x07\x1b\xe4\xbb\x4f\x98\xa2\x2c\x8d\x38\xeb\x1b\xbb\x8c\xc4\x2d\xe4\x34\xfa\xd1\x30\x92\x09\xe8\xfc\xfc\xc3\x58\x9a\x4f\xba\x6e\xac\x33\x21\x46\x88\x47\x88\x6b\x77\x64\x99\x99\x24\xb5\x35\x1f\xeb\x6a\xe1\x14\xf1\xf2\x94\x2d\xd5\xc7\x2e\x02\x08\xc9\x96\x16\x75\xbb\x2b\x7a\xc9\x34\x6d\x8a\xc6\x11\xa4\xc0\x75\x34\x41\xf0\x1c\xb8\x4b\xcf\xb4\xcf\x44\x3b\x32\xa9\xbe\x49\xca\xdc\x16\x19\x16\xa7\xdf\x90\x84\x76\x47\x71\x8f\xca\x10\x51\x8c\x7d\xdd\xe5\x21\x78\x70\x4f\xcf\xb4\x15\xa4\xcc\x8b\x10\x28\xf5\xe6\xb9\xcf\xd4\x88\xbb\x74\x61\x8f\x48\x62\x4f\x8c\xab\x08\xcd\xd0\x76\x64\xd2\xcb\x3e\xd2\xf3\xc3\x6e\xff\xc8\x86\x55\x1d\x9b\x0f\x43\x9f\xde\xf9\xa6\x07\x2b\xee\xb9\xbe\xdd\x5f\x68\x86\x86\x11\xec\x01\xc7\xf3\xf9\x96\x7e\xee\x78\x1e\x70\x6d\x42\x54\x1f\xf4\x0f\xfb\x12\x69\xab\x30\x42\xbc\x4d\x3a\xf8\xc1\xde\xdc\xc3\x6d\xc4\x1f\x74\x3c\x6f\xee\xe1\x16\xe2\x0f\xf6\xf4\x93\x95\x57\xe7\x97\xf3\xf2\x4e\x35\xef\x7d\xac\x43\x5f\xcb\x54\x1b\x69\x05\xe6\x43\x54\x19\x90\xc0\xbe\x55\xfb\x2a\xad\x7a\x29\x23\x38\x8a\x2a\x7e\xc5\x61\xfa\x9b\x4e\xc9\x3b\xf4\x25\xd2\x99\xfe\xcd\x5d\x04\x0b\x0c\xa3\xb8\x82\x69\x62\x63\xe2\x3d\x47\x8a\x94\x39\xbe\x33\xa0\xa3\x84\x59\x3b\xc5\xac\x02\xe6\x6e\xcf\xe7\x8e\xb3\x95\x5d\x69\xad\x76\xad\xf2\x4c\xb8\x0d\xf8\x52\x8d\xf5\xab\x18\xde\x46\xf0\x41\x22\xde\x72\x88\x53\x99\xec\xb7\xe9\x5a\xf7\xa4\xf3\xdd\xc9\xae\x84\xad\xfa\x29\x6f\x2a\x84\x90\x72\x2a\x3d\x36\x53\xa9\xba\x30\x08\xaf\xc4\xae\xfc\x0b\x56\xc5\x7c\xee\x1d\x92\x3b\xf4\x22\x5e\x79\xe7\x80\xb4\x69\x5f\x80\xbe\x64\xe1\xbf\xbc\x90\xba\x45\xc8\x97\x95\xfa\xbd\x3a\x43\x9e\xa8\x4e\x70\xec\xfb\x7f\x3f\xa4\x1b\x37\xa9\xe7\x9b\xb7\x99\x6b\x7b\x61\x70\x77\xbb\xf7\x21\xf5\x9f\xa7\xd8\x5c\x0d\x51\x1e\x15\xdc\x5c\xc7\xab\xcd\x75\x9c\x56\x9b\xf0\x55\x35\xe1\x45\x64\x67\x72\xaf\x02\xe4\x03\x6d\xf0\x58\x70\xe7\x36\x5c\x90\xa2\xef\x69\x95\x45\x7c\xb1\xd7\xeb\x6d\x84\x3e\xa3\x8b\x19\x4a\x22\x48\x23\xf4\x34\x45\x1e\xc6\x70\x9e\xa2\xa3\x54\x3b\x49\x40\x7f\xc9\x3f\xbf\xd2\x2f\xe1\x34\x45\xcb\x83\xed\xfc\xf5\x97\x19\xe5\x75\x00\x5b\xde\x22\xc7\xf7\x1b\xa8\xcc\x21\xc5\xe2\x7a\x61\x7c\x69\xb7\xe0\xac\xb6\xfa\x32\x73\x48\x75\x34\x8e\xab\x40\x3a\x1e\x88\x67\x81\xf4\x65\xae\xf3\x1a\x50\xf9\xe5\x4d\x6a\x3b\xb0\x82\x2f\x65\xdc\x58\x78\xdc\x42\x33\x34\x1e\x80\xe3\xc0\x0c\xbd\x8c\x80\x49\x38\x4e\x15\x17\x1f\x80\x5e\xde\x27\x29\xbc\x57\x75\x9d\x62\x0c\x3f\x22\xb5\xb9\xe9\x6d\xb2\x25\xd4\x3f\x96\x20\x5d\xad\xe1\x56\xdb\x94\xce\xb5\xeb\xe8\x83\x4e\x13\x76\xa6\x17\xef\x4b\x9d\xb2\xfc\x3b\xf0\x65\x67\xd2\x74\x50\x10\x42\x71\x16\x4d\xb8\x5d\xde\x9c\xc6\xed\xc4\xe4\x5a\xfd\xd0\xe4\xbd\x49\xcd\xbf\xb7\x12\x3e\x45\xc8\x19\x44\x23\xc9\x84\x62\x44\x99\x43\x29\x24\x33\xf4\x33\x5a\x2d\xa4\xdc\xa5\x39\xf7\xc4\x0b\x38\x43\x1c\x3e\xa3\x8b\x53\xe4\x24\xd1\x88\xf1\x90\xf5\x1d\xb8\x95\x68\x92\x1a\xaa\xb6\xb4\x15\x43\x0d\x32\x72\x22\x7e\x15\x05\x91\x5c\x86\x60\x39\x04\x0d\xa5\x4e\xd7\x64\x7f\xf6\x20\xcd\x3f\x0b\x16\xb2\x68\xca\x84\x03\xb3\x6c\xe6\x20\x67\x28\xe2\x74\xe2\x40\xa8\x26\x47\xb1\xe5\x7b\x87\x7f\x0a\x14\xe2\x9e\xd3\xcb\x7d\x23\x4d\x05\xe2\x3b\x16\xf7\x7e\x96\x66\xa2\x4f\xdb\x1c\x8b\xdb\x9e\xcf\xb3\xa7\xbe\xf5\xcc\xca\xbe\xe4\xdd\x12\x98\x19\x20\xf3\x40\xcb\x01\xec\xeb\xf1\x63\x90\xbb\x9d\xa5\x4e\x39\x2f\x75\xca\x79\xe9\xb2\xd2\xea\xf4\x2a\x52\x3a\x88\x66\xb5\x21\xe8\x9f\xe6\xd8\xb2\x8e\xf5\xd0\x49\x60\x18\xce\xdf\xe7\x89\x49\x8b\x34\xd8\xba\xa2\x28\xab\x28\xd2\x15\x45\x6e\x08\x92\x20\x4a\x22\xb7\x8f\x55\x79\x42\x5d\x06\x21\x89\x6a\x95\x7a\x40\xdd\x00\x68\x5e\xe9\x12\x0d\xb1\x1b\x40\xec\x86\x10\xbb\x7d\x88\x15\x0d\x12\xe7\x90\x8a\x0c\xa1\xc9\x28\x95\xac\xff\x4a\x1f\xf6\xf3\x3e\xec\x2f\xf7\xa1\xea\x02\xa6\x1b\x98\xea\x5b\x76\x29\x61\x6e\x08\x21\x61\x8a\x2c\xc2\x96\x7a\x32\xd2\x1b\xdc\x80\xac\xe8\x4d\xa9\x37\xae\xb2\x37\xf3\xc4\x01\xa6\x37\xed\x82\x8c\x20\x7d\x85\x16\xd6\xd5\x22\xb5\x72\x18\xd6\x95\x73\x5d\x39\xd7\x95\xf3\xa5\x1e\xcd\x83\x60\x32\x5a\x90\x50\x03\x8e\x57\x0e\x66\x07\x22\x18\x00\x5b\xa2\xc5\xea\xd3\x8f\x4b\x62\xcc\xa7\x01\x4c\xab\x02\xca\xcf\x2a\xcc\x34\x28\x9c\xef\xa5\x22\x5a\x13\x97\x02\xb8\x0f\x43\xa5\xcf\x87\x6f\x95\xa2\x5c\x81\x7d\x5b\x85\x7d\xae\x05\x80\xb1\x6a\x88\xfe\x57\x49\x14\xe3\x01\xbc\x0a\xe0\x65\x8a\x9c\x8b\xf6\xc5\x5f\x7f\x5d\xde\x2f\x10\xfe\xa3\xd5\x73\xe1\xaf\xbf\xfe\xfa\xeb\x7f\x6c\xcf\xff\xed\xaf\xbf\x92\x4b\x07\x6b\x9b\x46\xa0\x0d\xaa\xaf\xe2\xd5\x87\x6c\xc2\x57\x0b\x25\xf8\x28\xfe\x8e\x8d\xc1\xa3\xae\xe3\x7c\xaa\x33\x6d\xe7\x3e\x5f\xc6\xa0\x63\xd2\xbe\x33\xc5\xbf\x38\xc6\x2d\x67\xe1\x54\x16\xf5\xb7\xcd\xda\xd8\x8f\xda\xe6\x5c\xdf\x9d\x9f\xd4\x94\xbd\xfa\xf6\xfc\xbe\xfa\xfd\x71\x5d\x47\xae\x7e\x7e\x54\xd7\x91\x7f\x5f\x40\xbd\xe3\x55\x01\xf5\xbe\x3a\x60\x2f\x6c\x4c\x7f\xa6\xc8\x30\x60\x4e\xc7\xcc\x81\xb3\x00\xa9\x51\x33\xdc\xd1\xa4\x6b\xca\xde\x7d\xc9\xd8\x70\x72\xc6\x86\xec\xd6\x81\xf7\x6a\xab\x08\x86\xf9\xdb\xe7\x3f\x52\x3a\x52\xdd\x3b\x1e\xc0\xeb\xc0\xec\x6a\xef\x07\x8a\x86\x01\xae\xed\xbd\x5f\xd3\xd5\x61\x6e\x9c\x96\xbb\xd9\x0c\x7d\x54\x13\xa1\xb3\xb7\xe7\xe1\xd6\xc3\xce\xe3\xbd\x83\x47\x8a\x43\x89\x43\xaf\x27\xda\x9d\xbd\x03\xef\xf1\x81\x2f\xf0\x03\xfd\xf4\x70\xee\xa9\x55\x6a\x5e\x3f\xfc\x43\xaa\xe5\xc7\xda\x88\xe9\xaf\x5a\xe3\x60\x0f\x76\x0f\xf6\x77\x8c\x1e\x62\x5e\x3f\x3e\x98\x7b\x18\xab\xd7\xf3\x3c\xea\xf6\x9e\x7e\xf2\x11\x27\xac\x8d\x76\x0f\xf6\xff\x48\x5b\x28\xcd\x34\x97\x34\xd3\x5c\x30\x6e\x23\xd4\xd9\xdf\xfd\x03\x09\x82\xf6\xff\xe0\xad\x1d\xfc\xa0\xb3\xbf\xab\x6a\xd8\xc1\x0f\xf6\xd5\xbf\x1d\xa0\xb1\xcf\x88\x68\x21\x71\xd8\xf1\x7a\xbb\x7e\xfb\x31\x86\xa0\xe3\xa7\xad\x3d\xcf\xfb\x43\xb6\xd0\xce\x21\xeb\x79\x7e\xc7\xd6\x62\x04\xad\xac\x9f\x17\x01\xec\xc1\x0c\xf1\x04\x44\xa2\x67\x29\x72\xda\x6a\x02\xbf\x08\x60\x07\x56\x39\xe1\xea\x2e\xb8\x4e\xcd\x05\xb7\x53\x73\xc1\xed\xd6\x32\x2e\xec\xd5\xf2\x33\xec\xd7\x8e\x99\x1f\x54\x6f\x4c\x6c\x3c\xac\xdd\x56\xf8\xa8\x76\xc0\xff\x71\xed\x7a\xad\x8e\x57\xbf\x2e\xab\xd3\xa9\x7b\x01\x3b\x3b\x8b\x05\x9a\xa1\x27\x41\xd6\xe8\x6a\xab\x67\xe8\x6d\x50\xf6\xc6\xb9\xf5\xfe\x93\xf5\xde\xb7\xde\xff\x58\xf3\xfe\xab\xf5\xde\xcd\xde\xef\xc2\x0c\x7d\x2b\x2a\xd6\xff\x6b\x39\xdf\x2c\xb6\x20\x69\x45\xca\x1a\x9a\xdc\xa4\x86\xdd\x07\x03\xac\xb6\xa1\xd2\x01\x72\x1f\x1e\xfb\x9a\xef\x9d\xfa\x5b\xa2\xd9\xd4\xf7\xa1\xcf\xe7\xb2\xa7\x9f\x77\xfc\x5d\xbf\xa3\x6d\x9d\x6e\xf0\xc5\xb6\x22\xd9\x53\x80\x1f\x7a\x99\x34\x1a\x24\x68\xe5\x42\xaf\x07\x09\x71\xad\x61\x09\x2b\xe6\xa7\x71\x8a\xc4\x03\x36\xf7\x7a\xbc\x85\xae\xa5\x79\xc6\x2d\x24\x5b\x4e\xc3\xc1\xd8\x57\x1c\x76\x18\x19\xc7\xc0\x42\x2f\x60\x07\x58\xa4\x64\x7c\x48\x12\xac\x4f\x70\x94\x26\x2c\x8b\xb6\xad\xc2\x1a\xd0\x6c\x6e\xd9\xe6\x80\xdc\x38\x60\xec\x01\x96\x7d\xcb\x6e\x98\x5a\x56\xc6\x24\xa0\xa4\xdf\x69\x02\x1c\xf6\x70\x3b\x7b\xea\x78\x1e\x6e\xe5\x6f\x3d\xcf\xa6\x20\xa4\xff\x7f\x9c\xee\xa5\x51\xce\x6e\x9e\x8e\x67\xe8\xa8\x99\x78\x5f\x6d\x96\x57\x6b\x56\xa7\xd6\xac\x9d\x5a\xb3\x76\x6b\xcd\xda\xab\x35\x6b\xbf\xd6\xac\x83\x5a\xb3\x1e\xd6\x9a\xf5\xa8\xde\xaa\xc7\xf5\xeb\xed\x3a\xde\x52\x33\x6d\x93\x7a\x64\xad\x22\xa4\x3d\x9a\x7a\xd8\x35\xcf\xd6\x37\x3c\x71\xb5\xa2\xf4\x6b\x25\xb3\x69\x86\xad\xdf\xcb\x02\x5c\x89\x4a\x9d\xbd\x83\x8e\x7e\xcd\x2a\xb0\x85\xf0\xae\xf8\xac\x68\x75\x3c\xef\x0f\xde\xda\xfb\x43\xb6\x98\x4b\x5b\x88\xb9\x41\xaf\xe3\xdb\x16\xa7\x81\x4d\x8d\x20\x8a\x38\x22\x08\xc7\x70\x4f\x13\x9f\xb7\x29\x45\x42\x33\x6e\x61\x07\x42\x5f\x25\x4a\x2e\xec\x68\xf1\x90\x26\x16\xb6\xc0\x9e\xe6\x8f\x0e\xf6\x98\x9e\xe8\x0f\x3b\x8f\x3b\x07\xf6\xdd\x32\x09\xad\x6e\xe1\x09\x74\x76\x0d\xdf\xb1\xac\x75\xb4\x1e\xeb\xdd\xdb\x96\xc8\xf9\x70\x15\xa7\xa3\xbe\xbe\xf5\x37\x60\x0d\x36\x9e\xc8\x3b\x07\xfb\x33\x74\x9c\xc0\x54\x22\xe7\xb3\x88\xf9\xb0\xf1\xea\xc3\xe9\xa3\x03\xaf\xd3\x18\xc4\x62\x4c\xa5\x83\xe1\xaa\xa6\xb7\xf7\x6d\x02\xce\xd1\x5d\x0c\xf7\x67\x8a\xd1\x6c\x79\x18\x3e\xe4\x0f\xe7\xf9\xc3\x4b\xf5\xf0\x49\x69\x52\xdf\x19\x48\xaa\xd9\xa3\x6d\x25\x98\xda\xd8\xa8\x66\x57\x1c\xe3\x0d\xe4\x4e\xaa\x86\xa7\xed\x0a\x5f\x0d\x4d\xe2\x90\x53\xc3\xd3\xbe\x54\x23\x1f\xb3\xd5\x21\xeb\x8b\xff\x14\x6d\x75\x60\xcb\xab\x67\xfb\x34\xaf\x3b\xf5\xac\x40\xa7\x68\xcb\xd3\xd0\xb5\x79\x6a\xde\xab\x4d\x19\x61\x9d\x66\xb1\x60\xe6\xc1\x40\xf5\x41\xea\x06\x18\x82\xa1\xcf\x21\x18\xfb\x42\xf1\x6f\x66\x5b\x21\xab\xdb\xc3\x75\x37\x1f\x3c\xad\xa7\x04\x91\xee\x90\x77\x23\xa6\x48\x09\xe3\xf1\x64\xc4\x24\x6b\xd0\x7e\x3f\xe2\x43\x1d\x51\xa7\x0f\x43\x29\xf5\xd9\x17\x6e\xd0\x9b\x48\xd3\xdf\xdb\xfa\x2a\x51\x5f\x95\x7d\x6b\x00\x92\x06\x15\x4c\x27\xbe\x88\x04\xeb\xdb\x96\xa9\xab\xea\xbc\x7a\x97\xc0\x8e\x9a\x56\xb7\xe8\x47\x02\x9d\x0e\x74\xd4\x0f\x3d\xdd\x76\xf2\x5d\xce\xb2\x7b\x2e\xcf\xc9\x4e\x7d\x4e\x4e\xe8\x46\x07\xcb\xac\xda\x01\xa1\x49\x00\xf3\xd1\x0c\xe4\xa9\xb6\x43\xd0\xcf\x5a\x0f\x4a\xb4\x1e\x34\xed\x96\x8e\xbd\x19\xda\x1e\xc1\x54\xc9\xaa\xf4\x83\xf6\xef\xe5\x2f\x94\xe6\xe8\x87\x90\x98\x17\xa3\x11\x50\xed\xbe\xd3\xbf\x5e\x26\x6a\x45\x7f\xc1\x90\x6a\x47\x9e\x80\xe0\x34\x03\xd3\x81\x55\x35\x5f\xde\x9d\xdd\x42\xa5\x3c\x22\x4e\xf4\x98\x61\x77\xbb\x37\x1d\xf9\xaa\x75\x15\xb3\xcf\x2d\xdd\xe8\x11\xba\xd9\xdc\x1b\x47\x76\x75\xdb\x03\x74\x3b\xaa\xae\xc1\x0f\x9b\x8b\x3f\xa7\x9b\xcd\x88\x9b\x3f\x3f\xad\x08\x12\xd5\x7c\x39\x96\x19\xb1\xca\xfc\xb4\xcc\x7e\x6c\x44\xf6\x19\x7a\x39\xd2\x31\x13\xa5\xca\xf6\xd7\x5f\x3d\x63\xda\x71\xe9\x3e\xc6\xd8\x0d\x7a\xa7\x3a\xb3\xbb\x70\x03\x63\x12\xcb\xe3\x47\x85\x1b\xe8\x18\x0b\xec\x9f\x22\xe7\x81\x03\x37\xc5\x71\x22\x5d\xc9\x9b\x11\xfc\x1c\xc1\x39\xe2\x70\x4f\xf7\xb5\x77\x56\xfa\x0a\x4f\x78\xa6\x26\xfe\x02\xeb\xf1\x78\x1e\xab\x77\x96\x49\xd3\x22\x55\x93\xdd\x5d\x4a\x84\x96\x6f\x79\xb9\x4a\xa6\xca\x57\x37\xbf\x4c\x9b\x2a\x3f\xec\xf8\x15\x2d\xac\xfc\xb0\xeb\xdb\xf7\xf9\xdb\x5f\xbc\xf2\xcb\x41\xe5\x43\xb1\xa3\x7e\x1b\xd5\xb6\xd0\x4f\xa3\xda\x1e\xfb\x63\x54\xe7\x3c\x6f\x47\xd6\xe0\x7d\xa7\x1b\x5d\x8d\xe7\xd5\xcf\x9d\xfa\xd8\x7f\xa9\x7e\x7f\x58\xfb\x7c\x56\xfd\x7c\x50\xfb\x7c\x6c\x75\xf4\x1b\x69\x39\xef\x3a\xd9\x35\x24\xdc\x57\x6a\x8f\x16\x35\xb1\xab\x14\xbc\x27\x11\x50\x9d\x80\x19\x57\xef\xf8\x3d\xa9\xea\x33\xbc\x0f\x9f\xd1\xc5\x8b\x11\x72\xe8\x88\x09\xd9\xd0\x7f\xdb\x33\x2a\x78\xc4\x87\x0e\xbe\xc4\xea\xb3\xec\x23\x4d\x40\x55\x51\x7c\x53\x99\xcd\x29\x12\x6a\x2f\x8a\xfa\x3a\x0c\x53\x8b\xd9\x14\x4e\x7d\x2f\x93\xa7\x17\x15\xb6\xf6\xb9\x4a\x44\xd2\x87\xfc\x9e\x88\x80\x55\xd7\xe3\xb3\x2a\xe4\x9f\x23\x70\xae\x04\x1b\x38\xf0\xe0\x7f\x5c\xd3\x29\x35\xf7\xb5\xf8\x0f\x22\x57\xb2\x44\x22\xc4\x09\xc7\xc5\xe9\xcc\x07\x7f\x25\x0f\x86\xe0\x38\x18\xeb\x24\xc1\xb6\x6d\xab\x8a\xf5\x3b\xab\xbb\x4c\x2d\x33\xab\x34\xbd\x58\x37\xb6\x8e\x75\xc7\x69\xf2\x60\xd6\x47\xce\xf7\x60\x44\xf9\x8d\xd5\x63\x5c\xf7\x16\x9c\xe8\x6b\x9f\xfb\x30\xd1\xc9\xd7\x8e\xab\xe6\xa0\x0a\x6b\x56\x32\x16\xb7\x43\x8e\x6e\xfa\x70\x0c\xda\x78\x7f\x5b\x8c\x92\x64\xb7\xb2\xcd\xe3\x99\xa0\x13\xab\x2e\xd1\x72\x7c\xf5\x13\xc3\x0c\xdd\x15\xb0\xb3\x76\xc7\xf3\x34\xd4\xeb\xe5\xd1\xfb\x59\xed\x82\x71\x51\x2a\x90\xbc\x11\x48\xde\x8e\x53\x39\x8a\x38\x6b\x47\x7c\x10\x37\x82\x58\xf4\x99\x68\x7b\x0e\x06\xdd\x64\x53\xf3\x0c\x0d\x8a\x62\x03\xda\x18\xd0\xb6\x2e\x11\x5e\x51\x21\x1b\x63\xd1\xde\xd1\x95\x1f\x63\x90\x7d\xe4\x7c\x88\x53\x11\x32\xa7\x4a\xc5\xcb\xcd\x3d\xc0\x6d\xaa\xda\xda\x1c\xac\xf1\x36\xc6\x41\x86\x5c\x53\x21\x96\x88\x4f\xc6\x39\xc9\x22\x1a\x5e\xc9\xb6\xd7\xd0\x1d\x37\x4e\xa5\xda\x9b\x61\x86\x46\x7d\x70\xd2\x84\x89\x76\xc2\x46\x2c\x94\x0e\x38\x11\x8f\x64\x44\x47\xc5\xd7\xf6\x38\xfe\xd9\xfe\x05\xc8\x8c\x05\x37\x91\xfc\x05\x54\x46\x48\x18\x8f\x62\xe1\x80\xf3\x6f\x61\x18\x56\x86\xee\x1f\xa4\x70\xb4\x9b\x11\x8c\x57\x36\x67\xd8\x1e\xd0\x3e\xeb\x57\xc6\x26\x61\x61\xcc\xfb\x54\xdc\x39\x18\x3e\x53\xf4\x86\xa2\x53\x1d\x8a\x85\x31\x4c\xfb\xc8\x39\xd6\x96\xff\x46\x70\xd7\x90\x57\x51\xd2\x18\xd1\x80\x8d\xac\xaa\x9d\x96\x1e\x8d\xca\x80\xbc\xb5\x05\xdc\x7f\x7b\x90\xd9\xfd\x93\x07\x9c\xcd\x7a\xc6\x91\x40\x9c\xd6\xdb\x08\x7d\xaa\xc9\xae\x9f\xac\x81\x3c\x4e\x11\x77\x83\xf7\x6e\xf0\xb6\x12\x63\xf0\x37\x27\xd9\x5b\x8a\x96\x56\x66\x21\x08\x7d\x33\x4e\x5e\x2f\x73\x54\x0c\xa4\xaa\xf1\x0a\x6b\x5b\xd6\xea\x99\x19\xb0\xd1\xa8\x9d\x8c\x68\x72\xd5\x8e\x97\xe7\xa6\x69\xa6\xee\x0e\xdf\x1c\xc7\x81\x4d\xe4\xf6\x29\x1f\xaa\x8e\xad\x10\x6c\x77\x97\xd3\x12\xbf\x41\xc9\x3a\x3a\xfa\x9a\x10\xab\x77\xbf\x55\x17\xeb\x75\xbf\x76\xec\xf6\x47\xf5\xbb\xb0\x98\x7a\x34\xe4\x6a\x6a\x0e\xda\x21\xe3\x6a\x32\x14\x55\x9a\x39\xf0\x4a\x71\xb7\xe0\xb4\x36\x0b\x9e\xac\xe0\xcf\x57\xaa\xcd\x0e\xe8\x0b\xab\x38\xbc\xe9\xdb\xf0\xef\x37\x4b\x4d\xef\xe8\x46\xcb\xed\x9f\xab\x35\x0d\x6e\xbb\xe5\x32\x59\x7f\x4b\xce\xe7\xfa\xf0\xaa\x96\xf8\x77\x4c\xee\x78\x7d\xc9\x51\x21\xfc\x57\x42\x93\xec\x76\xac\x9a\x50\x15\xb3\x11\x77\x83\x41\x66\x3d\x92\x55\xb3\x51\x3e\xe3\x82\x31\xac\x33\x1b\x69\x1b\xec\xa2\x1a\x6e\xf1\xd5\xae\xfe\x14\x71\x9d\x61\xa3\x34\xa8\x86\x55\x97\x76\x5f\xb1\x8e\x49\x2a\xb3\x3e\xfe\x4a\xf3\xbe\x7e\xdf\xaf\x4c\x06\x11\x2e\x0d\xce\x0d\xbb\xeb\xc7\x33\x5e\x8c\xce\xbb\xca\xe8\xc8\x95\x05\xd2\xc9\x1a\x70\x16\xae\x91\x10\xc2\x78\xd4\x08\xe3\x51\x9b\xa6\x32\x2e\x99\xef\x6f\xf2\xe8\x78\xe3\xda\x37\xfc\x6b\x86\x3e\xf6\x61\x4b\xc7\xac\x14\x13\x54\xbb\x69\xd7\x32\xc6\xea\x82\xb4\xb1\x74\x2a\x58\x9c\xff\xe7\xff\xce\x79\x5d\x65\xa2\xa7\xe1\x46\xa9\x8c\x86\x1b\x65\xbe\x70\xb9\x63\xc7\x71\x9a\x30\xbd\xd2\x96\x65\x99\x78\x0d\xf8\x88\xd1\x29\x5b\x06\x8f\xc2\x8d\x22\xe1\x20\xdc\xa8\x07\x05\xe1\xc6\x35\x97\x84\x1b\x57\xec\x68\x99\xd4\x60\x94\xae\x68\x53\xff\x7f\xe3\x64\x49\xa7\xf5\xc9\xc2\xff\xe6\x44\x51\x18\x7e\x6f\xa2\x4c\x37\x77\xe7\xf6\xe6\x89\x32\xde\x3c\x56\x57\xe1\xc6\xb8\x92\xe1\xe6\x89\x30\xd9\x8c\x7c\x16\x6e\xd4\x49\xef\xc2\xcd\x7a\xcb\x6d\xf8\xbf\xd6\xbc\x5b\x53\xc1\x1e\xda\xaa\x7d\x58\x37\x86\xec\xd4\x6d\x21\x47\xeb\xa6\x63\x9f\x4a\x56\x91\x01\x6b\x72\x7a\x69\x00\x08\x8b\x7d\x41\x9f\xde\x1b\x0f\x20\x8e\x90\xa7\x15\xec\x18\xaf\x12\x4d\x43\x3a\x62\x4a\x08\xfb\xde\x18\xc7\x5c\x5e\xe5\xa8\x91\x20\x12\x6c\x38\x11\xcf\x0c\x44\xfb\x4a\x27\x20\x5a\xb5\x2a\x26\x82\x4d\xdb\x1a\xa8\xd1\x6f\x0f\x46\xec\x36\xdb\xb6\xcd\x84\x7d\x36\x2d\x8a\x7c\x9c\x16\x5b\xbc\x10\xf1\xcc\x59\x2b\x6d\x50\x3e\x1c\xb1\xf6\x88\x0d\xa4\xfa\xb5\x7b\xdb\x08\x53\x91\xc4\xa2\x3d\x89\x23\x83\x58\x4b\x1f\x97\x39\x8b\x2d\x49\x31\xa4\xaa\x2e\xab\x93\x52\x74\xdd\xb5\xd2\x6d\x8c\x2b\x2c\x8b\x25\x7a\x3d\x85\xe3\x4c\x98\xf9\x0d\x67\x98\xf3\x9a\xf2\x54\x49\xaf\xd5\x59\xe4\x1c\xb3\x40\x58\xef\xf3\xd9\xe4\xbc\xa5\x22\xbc\x72\xaa\x53\xca\x39\x9a\x88\x68\xe4\x54\xe7\x95\xf3\x96\xe6\x85\xf7\x8b\xba\x52\xce\x9c\xaa\x51\xc0\x79\x9d\x8e\x72\xb8\x87\x05\xbe\x74\x98\x26\xd2\xa9\x5a\x0b\x9c\x0f\x6c\x22\xd9\x38\x60\xc2\xa9\x5a\xde\x9d\xd3\x50\xc6\xe5\xeb\xc2\x00\xef\x9c\xc4\xd3\x0c\xbe\x3a\xa3\x9d\x67\x2c\x34\x1f\x2c\x9f\x9a\xc0\xcb\xfd\xcf\xf5\x64\x5d\x37\x15\x3e\xff\xa7\xa7\x82\x56\x88\x7e\x3d\x17\x2e\xcd\x80\xbe\x99\xea\xc4\x1a\x95\x25\xf2\xbc\xba\xc8\x62\x01\xce\x98\xde\x9a\x33\x70\x0e\xd4\x03\x56\xaf\xc3\x8a\xd1\xeb\x75\x4f\x37\xf2\xb8\x3e\xf3\xc7\x71\x9f\x8e\x1a\x4a\xc5\x69\x24\x57\xaa\x15\x99\xf6\xd4\x8f\x92\xc9\x88\xde\x39\x6a\x03\x8a\xc3\x9b\x55\x8b\x46\x17\x6d\xf7\x23\x3a\x8a\x87\x0d\xfb\x47\xd6\x63\xe5\x72\x5f\x2e\x15\x9a\x1c\x60\xeb\x01\xea\x6b\xb5\xdc\x5a\xc2\x51\x9c\xb0\xc6\x38\xdf\xe2\xf4\x8e\x12\xa2\x0f\x53\x7b\x37\xb9\x75\x56\x2f\x2d\x85\xd9\xa4\x63\xce\xf1\xa6\x7d\xb8\x0b\x41\x07\xa6\x4c\x2b\xc0\x5a\x8b\x6f\x28\x3a\x69\xc4\x4b\x4a\x34\x9f\xaa\x00\x4e\x64\x7b\xaf\xa1\x98\xcc\x75\x9a\xc8\x68\x70\x97\xb7\xad\xb6\x6e\x67\xe8\x9b\x1a\x53\x4f\x15\xd6\x4f\x9d\x7c\xb8\xd7\xd0\x39\x88\x63\xb9\xba\x07\xc6\xa3\xf6\x4e\xa3\xbe\xc3\x26\x69\x18\xb2\x24\x51\xdb\xfa\x86\x8e\x79\x4a\x79\x68\x94\xd1\xea\x7e\x5d\x41\x39\x11\xd1\xb8\x50\x6e\x67\x21\x7a\x5e\x41\xf1\x81\xc9\xc6\x33\x2a\xd9\x83\xf3\x68\xcc\xac\x4d\x7b\x7d\x87\xd3\xf0\xa6\x2f\xe2\x89\x3d\xcb\xf2\x19\xef\xe7\xe0\x66\xd6\x85\xa3\x68\xe2\x80\x23\x58\x28\x91\xa7\xef\x9d\xf0\x70\x31\x25\x27\x71\x12\xe9\x3b\x29\xc1\x19\x44\xb7\x1b\x66\x97\xae\x28\xd7\xef\x7e\x41\x4f\x49\x8a\x65\x2f\xde\xbc\xb3\xbf\xda\x2c\xc3\x9d\x6e\x96\x2a\xbe\x6f\x96\x0b\xce\x97\x17\xb9\x88\x67\xc9\xf2\xfa\xfe\xb2\x19\xcf\x59\x68\x87\x8d\x3c\x95\xfa\xa8\xce\x67\x74\x61\x69\xcb\x0e\x68\xbf\x84\xc3\xfa\x91\x74\xd4\xd0\x69\xa3\xf7\xdf\xb1\x15\x08\x6b\x5a\x3c\x57\x58\x54\x3f\x96\x01\x9d\xc1\x7b\x37\x78\xd7\x13\xfe\xdf\xc4\x3a\x43\xc3\x6d\x45\xda\xb5\xf1\x7f\x58\x75\x9c\x31\x73\xd9\x41\xcd\x9a\x75\x6c\xb5\x55\xc7\x20\x88\x32\x06\x41\xd8\xc6\xc3\xa7\x12\xb6\x94\xd2\xd9\x6c\x8a\x9e\x43\x1c\x5f\x3d\xcf\xe7\xa2\x57\xbe\xfb\x0f\xc7\x77\xb6\xf4\x1f\xa2\xbb\xcb\x04\x6b\xb9\xc1\x97\x4a\x85\x27\x9b\x2b\x04\x41\xde\x32\x74\x4b\x6b\x76\x59\x13\xfe\x90\xd7\xe5\x2d\xd5\x5e\x8d\x83\xb0\x42\x2f\x67\xe8\xc7\x14\x6e\x75\x2e\x63\xdd\x54\xdb\x18\x1c\x66\x96\xd3\x6a\xda\x93\xc6\xff\x39\xcb\xaa\xcc\xbf\xe2\x06\xb1\x71\x82\xb9\xc1\x23\xed\x06\xa3\x2e\xdd\xd5\xe6\x06\xea\x86\x7b\x15\x11\xef\xff\xb3\xdb\xd4\x64\xbb\xfa\x5d\x27\xef\x77\x36\x69\x4a\x7a\x3b\x33\xac\x56\xac\xd4\x87\x7e\xb1\x85\xc9\xdf\xd9\x3d\xd8\x7f\x81\x45\x5b\xee\x83\xcd\xda\xd6\xb3\x55\x2a\x4b\x21\x5c\x66\x71\xc8\x35\xd1\x72\xc2\x78\x3f\xe2\xc3\x25\x69\x8d\xdd\x4e\xb4\x3f\xd7\x3e\x7d\x58\xe5\x8c\x1f\x56\xcc\x92\x72\x1c\xae\xb7\xeb\xef\x54\xa7\x9e\xd0\x31\xf3\x1b\x66\xf7\xd3\xfe\x85\x60\xbc\xd4\x21\xbf\xc2\x71\xd4\xef\x0b\x96\x24\x15\x34\xf4\xc5\x92\xe2\xfa\x31\xac\xd8\xb9\xc6\xc6\xce\xf5\xde\xb8\x7f\x7f\x74\x6d\x9b\x61\xd6\x8c\xd3\x6d\xf3\xa4\xb7\xe9\x51\x9a\x48\x26\x1a\x1f\x74\xbe\x5a\x53\x93\xe5\xf8\xd7\xb9\x43\x6c\x4f\x89\xe3\x98\x75\xa6\x1d\x51\xab\x94\xa5\x41\x2c\xc6\x99\xde\x5f\x91\x55\xcb\x36\x86\xf1\xa8\x9d\x8c\x2b\x46\x4a\xd3\x5f\xce\x52\x17\x65\xa0\x1d\xaf\x3e\xb1\x55\x0f\xa0\x2a\xec\x7f\xae\x62\xd3\xee\xdf\xa9\x3a\xb7\xba\x22\x4e\x4c\xc4\x66\x40\xfb\x43\xe6\xc0\x96\x57\xe9\xb1\xf5\xa1\x12\xa6\x40\x2e\x3e\x39\x2b\xe3\x26\x32\x98\xdc\x3d\xb7\x3a\x5a\x22\x03\xca\x2c\x1d\xda\xe6\xb8\x40\x8a\xca\x17\xa3\xcc\x19\x9c\x07\xda\x32\xa5\xa0\x6e\x0f\xe0\x24\xcd\x82\xeb\x8a\x86\xff\x8e\xf2\x26\x18\xed\x2f\xa9\x6e\x09\x93\x6a\x3f\x5d\x5e\x48\xfd\x28\xa1\xc1\x48\xaf\x24\x24\x6d\x26\x50\x9d\x4e\x6c\xcd\x74\x62\xff\xca\xe9\xf4\x8e\x31\x51\x0e\xea\xf6\x9a\x41\xd5\x5d\xf4\x3a\x04\x9e\xcf\xa7\xaa\xff\x6c\xc9\xd2\x90\x2d\x9f\xe1\xb4\x5c\x3e\xf6\xb2\xf9\xaf\x93\xfd\x71\x22\xa3\xdf\x5e\x07\xe7\x89\x62\x2b\x9f\x8b\x6e\x56\x7c\xc0\xa5\xef\x71\x16\xf9\xfd\x0d\xd6\x2d\xf9\x4f\x4c\x24\x5a\xba\xe4\x26\x50\x49\xed\xc8\xff\xa2\x06\x3c\x11\x94\x87\x57\xbf\xd9\x00\xe1\xd2\xb7\xab\x36\x8a\xff\x64\xd5\x69\x34\xea\x2b\x6d\xe1\xf7\x6b\x3f\xf9\x17\xd7\xfe\x31\x61\xe2\xf7\x6b\x3f\xfd\xd7\xd5\xfe\x22\xce\xc6\xf4\xf7\x6b\x7f\xf4\xaf\xab\xfd\x8c\x4d\xa3\xbf\x55\x79\xf0\xec\x5f\x57\xf9\xdf\x6d\x78\xf0\xd5\x36\x7f\x68\x6d\x9c\x9e\xbb\xc1\x04\xec\x15\x5e\xd9\x20\x63\x3e\x88\x86\x39\xfa\xf3\x82\x98\x49\x7b\xaf\x74\x00\xd3\xf0\x46\x51\xce\xfb\x0e\x38\xff\x36\x78\x38\x78\x38\x78\x5c\x7c\x1c\xc4\x5c\xb6\x07\x74\x1c\x8d\x94\xf0\x38\x8e\x79\x9c\x4c\x68\xc8\xca\x06\x7e\x2f\x6b\xe3\x16\x71\x97\xcb\x67\x2b\x5e\xda\xa6\x4b\x73\xb2\x88\x70\x37\xfc\x94\x07\xe5\xd0\x22\x28\xa7\x08\x82\xd1\x76\x87\xef\x14\xf4\xc9\xd1\xf0\x0b\xac\x16\x7a\x79\xdc\x1e\xa6\x52\x32\x91\x94\x64\x9d\xe8\xaf\x77\x53\xe4\x0c\x22\x36\xea\x27\x4c\xda\xfd\x7e\x1c\x89\x44\x36\xfa\xf4\xae\x11\x0f\x74\x18\xdd\x8c\xb1\x9b\x62\x14\x66\xda\x62\xf5\xb6\xbf\x5c\xf6\x0e\xdd\x4e\xc1\x79\x1b\xf3\xbe\x92\xa5\xb7\x52\x37\x9c\x42\x2a\x31\x98\xf7\x1f\x52\xf3\xbe\x63\xae\x8a\xd3\x9f\x0c\xc6\x17\xd5\x79\xa2\xad\x96\x2b\xad\xbd\xce\x49\x2c\x99\xdf\x38\xbf\x8a\x92\x86\xda\xab\x22\x3e\x6c\xa8\x47\x3a\x35\x39\x07\x47\x71\x48\x47\x8d\x44\xc6\x82\x0e\x99\x22\xfe\x2e\x4e\x45\x23\x50\xaa\xaf\x91\x62\x0b\x63\x49\x2d\xbe\x68\x86\xc2\x3e\xfc\x0c\xc1\x68\x9b\x67\xf5\xdb\xe0\x56\x3a\x1d\xff\x34\x6a\xc8\x2b\xa3\x84\x3c\xd5\xb1\x78\xc1\xab\xe5\x18\xaa\x5d\xdf\xa4\x70\x12\x76\x46\xa1\x57\xdb\xc0\x40\x02\x85\xad\xad\xb4\xb8\x7c\xbd\x2a\x30\xb0\x7e\x5d\x3a\x38\xa1\xe6\x20\xff\x02\x71\x37\xdc\xae\xdf\xd9\x3e\x43\x57\x53\xd5\x80\x5d\xa0\x2e\xad\x5f\x0c\x29\x8d\x76\x19\x7e\xc7\x2e\xfd\x6e\x0e\x0c\x9f\x83\xbe\xb4\xec\xd6\x04\xb7\xbf\x5a\x33\x7b\xc6\x81\x5a\x0b\xd5\x79\x93\xbd\xdf\x71\x30\x98\x29\x34\x92\x4c\xb4\x03\x2a\xda\x45\xbc\xa5\x3d\x99\x46\x99\x0a\xa1\x86\x3a\xed\x67\xa7\x1b\x29\xbc\x65\x7a\x4d\x4f\xe1\x3e\xfc\xaa\x0f\x3e\x19\xc1\x02\xee\xd0\x68\x1b\x8e\xb6\x75\x76\x2c\xb8\x45\xfd\x6d\xb8\xdb\x06\x73\x65\xed\x25\xc6\x35\x97\x84\xc6\xf8\x92\xc1\x2d\x4a\xb6\xe1\x46\xea\xa4\x3e\xdc\x0d\x23\xf5\xe7\x1b\xae\x07\xa9\x56\xa0\x6f\x25\x52\x1d\x05\x5f\x47\xe0\x38\x15\xf8\x87\xeb\x87\x60\x32\xb5\x73\x86\x54\xa5\x88\x2f\x05\xff\xe0\x74\x1a\x28\xc1\x47\xff\xd3\x96\xf1\x70\x38\x62\x4a\x7e\x6a\x8f\xfb\xf9\xcb\x91\x36\xe4\x16\x71\x21\xe3\xa0\xbd\xdf\x98\xc8\xf6\x6e\x63\x12\xb4\x77\xeb\xd1\x27\x41\x2c\x65\x3c\x76\xc0\xe9\x4c\x6e\x1b\x49\x3c\x8a\xfa\x0d\x31\x0c\x28\xf2\xa0\x61\xfe\x73\x3b\x3b\xfb\xb8\x1c\xa6\x33\x8b\xad\xd6\x2c\x8f\xb6\x15\x25\x23\x25\x10\x94\xf7\xf3\x28\x88\x8a\x92\x32\x62\x42\x8e\x29\xa7\xc3\x72\x00\xb7\xeb\xa5\x39\x9d\x96\x02\xd7\xc7\x6d\xc4\x31\xfc\xdc\xc6\xab\xc4\xe6\x32\xa5\xdd\x8e\x5f\x1d\xc5\x4c\x58\xac\xf5\xf5\xd2\x06\x12\xf1\x51\xc4\x2d\xa3\xed\x72\x8b\xd6\x38\x1c\x6b\xf1\x1d\x9c\xcd\x2a\x4c\x85\xcd\x1a\x76\x0c\x89\x92\x14\x8d\xc0\x58\x91\x19\x3f\xd5\x2c\x72\x75\x77\xf3\xb7\xda\xf7\xfa\x19\xb4\x1f\xb5\xef\xf5\x43\x68\x4f\xec\xd9\xf4\x82\xa3\x48\x54\x8d\x34\xef\xc3\x4a\x2e\xa5\xca\x62\xcd\x9e\x3e\x6d\x83\x8e\x22\x0c\x5f\xe0\x96\x33\x8a\x82\x07\x41\x1c\xcb\x44\x0a\x3a\x69\xef\xb9\x9e\xeb\xb5\xe9\x68\x72\x45\xdd\x83\x76\x3f\x4a\xe4\x83\x30\x49\x4a\x00\x77\x1c\x71\x37\x54\xaa\xcb\xa7\x50\x0d\xe6\xa7\x6d\xe0\x06\x87\xde\xe3\xe8\x8c\x25\xf1\x98\xb5\xf7\xdc\x87\xae\xa7\x4b\xda\xaf\xcb\xc2\x3f\x6a\x85\xd9\x68\xdc\xee\x53\xc9\x26\x51\x78\xc3\x84\x2e\x58\x7d\x65\x8a\x7d\x0b\xeb\xda\x84\x51\x1c\xbe\xab\x5d\xfd\x31\x08\x37\x9c\xa9\x3f\x23\xdc\x2d\x12\xfc\xde\x8b\xe2\x49\x2e\x25\xfd\xa5\x16\xf7\x2d\x5e\x06\xab\x5e\x86\xd6\xcb\xa5\xbe\x7d\x1b\x22\xe1\x86\x9f\xea\xb2\x47\xb6\xa4\xd4\x52\x2d\x78\xe3\xcb\x50\x1b\x5e\x4a\x16\xb5\xe3\xeb\x1b\x15\x1a\xa2\x98\xd6\xe6\xb7\x5c\x54\x3e\xf3\xe2\x73\x34\x40\x3b\xda\xfc\xa6\x48\xca\xcb\xd6\xca\x94\xe9\x68\xb6\x4d\x32\x79\xc5\xe9\x97\x10\x04\x05\x02\x7d\xaa\x77\x3d\xd6\xca\xf2\x5b\xd4\x91\x87\x56\xd2\x98\xe2\x65\xa0\x6f\x6d\x29\x04\x18\x93\x2d\xff\x5d\x08\x7f\x86\xf0\x22\x84\xaf\x21\xf0\x18\x44\x0c\x32\x06\x16\x43\x1a\x93\x67\x1c\x39\xe7\x34\xb9\x71\x30\xd0\x78\x5d\x5e\xac\x34\x46\x45\x6a\xac\x2c\x83\x56\xfd\x1a\x28\x3d\xad\x4f\xf5\x96\x15\xbe\x83\xca\xe5\x74\xfa\x86\x16\xf4\x45\xe8\x8b\xe6\x8a\x43\x5a\x02\xdd\xd3\x59\x79\x97\x0b\xb3\xae\x31\xa3\x84\xc1\x89\x58\x75\x67\x54\x3c\x49\x24\x95\xcc\x01\x89\xe1\x3f\x4e\x84\xcb\xe9\x34\x1a\x52\x19\x0b\x37\x4d\x98\x38\x1a\x32\x2e\xcb\xfb\x8b\xce\x45\xd4\xd7\x66\xbd\x66\x73\x25\xb6\x2b\x9a\x5c\xe5\x81\x57\x12\xaf\x3e\x69\xd6\x15\x6e\x28\xc5\xe8\x4f\x76\x37\x9f\x0b\x77\xcc\x24\xcd\x1e\x93\xab\x68\x20\xf5\x73\xe7\x50\xed\xcf\xa9\x94\x31\x9f\xcf\xb9\x2b\xa9\x18\x32\xa9\x4f\x67\xc7\x33\x3e\x8a\x69\x7f\x3e\x47\xc2\x9d\x08\x7d\xc3\xf2\x33\x33\x17\x10\xd6\xc2\xc9\x95\x60\x03\x10\x44\x75\x0d\x70\xf2\x9c\x21\xa9\x4f\xf7\xa0\x14\xf1\x66\x53\xb8\xc1\xcc\x4c\x17\x7d\x6f\x69\x10\x98\x1f\x81\xfe\x91\xb8\xd4\xfc\x4c\x5c\xda\x2b\x1c\x03\x7e\xe6\x89\x90\x0b\x73\x5c\x05\xc2\x27\xfe\xca\x08\x3f\x7d\x5f\x2a\x07\x5d\xb1\x54\x70\x8f\x7d\x73\x1f\xea\x43\x5f\xdf\xa2\x1a\xee\xa8\x7f\x77\x34\x8a\x38\xcb\x5f\x1f\xeb\x87\xdd\x05\x44\x31\x19\x71\x18\xc4\x84\x72\x08\xf4\x4b\x6f\x01\x89\x7e\x68\xef\x2c\x60\x14\x93\x24\x86\x69\x4c\x46\x31\x6c\xaf\x9b\x52\xf7\xf4\x99\x3f\x8d\x81\x3e\xd7\xc1\xf0\x2f\xfd\x20\x06\xfa\x4a\xfd\x0d\x63\x5f\x00\xfd\xee\xbf\xd1\x59\xdd\xe8\x63\x3f\x4b\xa6\x46\xbf\xf9\x8e\x03\xa1\xf4\x8f\x81\xee\xe8\xd3\xd8\xc7\x3e\x87\xf0\xa5\xc2\x12\xdc\xf8\xc7\x10\x8c\x74\x56\xb3\xa7\x5a\x54\xd1\x1f\x83\x3b\xff\x3e\x2b\xa6\x7f\x6a\xa8\x9f\xea\xcf\x53\x9d\x9e\xed\xa5\x2e\xa0\x4f\x4c\x40\x70\xee\x7b\x8b\x05\x86\x71\xde\x9e\xab\xf5\x94\x3f\xd5\x34\x6b\x6a\x4b\x42\xb7\xfd\x71\x0c\xe1\x54\x11\xbf\xe7\x3f\xd3\x99\x53\x4d\xad\x6f\xfc\x20\xd6\xb7\x12\xc4\xe4\x3e\x38\x53\x3f\x60\x12\xff\x46\xfe\x55\x0f\xda\x79\x1e\xd5\x98\x30\x0e\x77\x31\xb9\xa7\x43\x45\xf8\x77\x4d\xf7\xad\xfa\x7b\xa6\xfe\x7c\x50\x7f\xce\xd5\x9f\x97\x3a\xe5\xdb\xad\x6e\xc3\xc1\x02\x6e\xf4\xc3\xce\x02\x8e\xf2\xa1\xfb\x10\xaf\xbf\x1a\xe1\xc0\xbe\x1a\xe1\x79\x3e\xea\xd7\xfa\xe1\xd1\x02\x9e\xe6\x58\x5f\xc5\x1b\xee\x40\x44\x1c\x49\x73\xae\x32\x26\x2b\x53\x0c\x2e\xdf\x7c\xca\xf1\xbd\x14\x77\xf7\x27\xa2\xb8\xe8\x95\x88\xf2\x7e\x25\xfb\xfe\x57\xc1\xd4\x72\x42\xfa\xb4\x12\xc6\x0b\xf8\x1e\x93\x8f\x1c\xce\x63\xf2\x9a\xc3\x97\x98\x9c\xc7\x6a\x24\xce\x62\x72\x26\xe0\x78\x3d\x91\xf7\xf4\x95\x2f\x21\xb8\x52\x8d\xbd\x35\xad\x3d\x59\x3b\x20\xc2\x0d\x7a\x3a\xd9\x60\x68\x12\x20\xea\xa4\x40\x6f\xd6\x83\x9b\x3c\x85\xbc\xc8\x51\x78\x12\xc3\xb1\x19\xc5\x8f\x9c\xc4\x2b\x53\xfc\x42\x08\x31\x44\x25\x75\xaf\x7d\x09\xf4\xb3\x1f\x03\xf5\xfc\x14\xe8\x81\x2f\x32\x62\x7f\xfa\x0c\x82\x53\x9f\x42\xf0\xde\x8f\x20\xf8\xe4\xeb\x2c\xe3\x67\xeb\x6f\x84\xbc\x0f\xfa\xaa\xa5\x6f\x15\x86\x77\x4a\x96\xc1\xf0\x39\x5e\x9d\xec\xf8\x21\x04\x3a\xd9\xf1\xb3\x98\x0c\x18\x8a\x30\x44\x9b\x12\x35\x3e\x8b\x61\x86\x06\x26\xd3\x9a\x49\x81\xf9\x3a\x26\x31\x87\x8f\xf1\xe6\xbb\x3e\x62\x4e\x66\xe8\x75\xbc\xe1\x28\xb7\x93\xf2\x89\x88\x43\x96\x24\xac\xef\xe4\xdb\x69\xc0\x50\x66\xae\xcd\xfd\x0b\xd6\x97\xec\xa4\x9b\x93\xa4\x93\x89\x58\x2a\xb7\xb3\x24\xb5\x7e\x8c\x91\xf3\x91\xdf\xf0\x78\xc6\x1b\xf2\x6e\xc2\xfc\x86\xd3\xe2\x78\xa1\x96\x8d\xee\xcc\x3b\x14\x41\x99\x70\xe5\xc9\x9d\x03\x9f\x63\xa4\xbe\xe9\x0f\x79\xaa\x96\xa5\xf7\x66\x5b\x8a\x39\x04\x0c\x9d\x09\x9d\x77\xe5\x67\x6c\x90\x99\xec\x02\xb7\xb1\xfa\xb4\xa2\x7b\x82\xb1\x1e\x1a\x0c\x2f\xe3\xdf\xbc\xb7\xe4\xed\x86\xc9\x5d\x4b\x2b\xaf\xc0\x3f\xc5\xd5\xeb\x1c\x79\x91\x46\x79\x7d\xa2\x37\x9a\xdd\xaf\x97\x9d\x80\xc1\x76\x9e\xbd\xec\x00\x54\x76\x61\xac\x0e\xe7\xd6\xbb\x3f\x70\x92\x2a\x79\x10\x5b\x65\xb5\x4e\x91\xdf\x93\x60\x19\x99\x5f\xc6\x20\xe7\x73\x66\x22\xcc\x2b\xdf\x74\x2e\xab\xfc\x9b\x92\x72\xd4\x62\x87\x6f\x31\xf9\x14\xc3\x8f\xdf\xed\xa1\x27\xf1\xa6\xd4\xfb\x26\x49\xd6\x50\x2d\x27\x69\xd2\x63\xe9\xb5\xf1\x3e\xe7\xfd\xef\xe2\xf5\x09\x42\x7f\xc4\xf0\x3e\x86\x5b\xf4\x24\xb6\x72\x7d\x69\x69\x4c\xb3\xbd\x3f\x63\x82\xde\x85\x9a\x77\x7a\x2b\x33\x15\x9a\x74\x81\x4a\xaf\x55\x4d\xef\xe5\x59\x07\x3d\xc5\x5b\x6a\x99\xbd\xde\x19\x94\x2f\x62\xf2\x5a\xc0\xd7\xf8\x17\x29\xdb\xcd\x90\xea\x5c\x62\xea\x6b\xaa\x24\x4f\x9d\x9e\x90\x42\xbb\x53\x24\x33\xd6\xf5\x7d\x40\xa9\x4e\x8f\x71\xe8\x65\x29\xc3\x18\x84\x7e\xaa\x53\x86\xa5\x3a\x65\x98\x50\x7d\x22\x81\xfa\xa9\x4b\x17\xb8\x9b\x12\xc4\x88\x46\xb4\x83\x7b\x28\x4b\xeb\xde\xea\x80\x24\xb2\xd5\x81\x0e\xf6\xb3\x77\xd4\xe4\x79\x6f\x75\x30\xa4\x7a\xd4\x5e\x8b\x55\x9b\xc0\x8a\x5e\x99\xa2\xaf\xb1\x11\x5f\xed\x0c\x6a\x6a\x55\xf2\x48\xb5\x5e\x44\xff\xcd\x53\x38\xd0\x36\x97\xb0\x32\x09\xcd\x39\x85\x14\x4c\x3a\x74\x05\xa2\xc9\xa3\x3a\xb5\x0f\xd3\x0d\x94\xd1\xea\x9c\xcd\x22\x82\xa9\xbe\x61\x04\x58\x44\x7e\x40\x1a\x91\x38\x05\xba\x12\xd8\xd3\x47\x65\x4d\x7c\x57\xb3\xb9\xf5\xe0\xe2\xaf\xe4\x36\x88\x2f\x1f\x98\x33\x57\x5c\x5f\xcb\x48\x5a\x1c\x13\xc2\x75\x0e\x2e\x93\xc9\x39\x8c\xc8\xaa\x64\x94\x8f\x0f\x4d\x42\xc8\x55\xa9\x24\xa9\x6c\x8c\xe3\x44\x36\x1e\x6f\xcc\x24\x99\x5f\x6c\x1f\x21\xc7\x73\x15\x67\x5c\x97\xc8\x72\x30\x8a\xa9\xac\xa5\xb1\x64\x11\xea\xb0\xdd\x3f\x84\xce\xa2\x60\x67\xa4\x84\x38\x22\xa9\x84\x28\xfa\x45\x9a\xfb\x46\x1c\x21\xde\x3a\xf0\xfe\x10\x7f\x1c\x78\x7f\x74\xd8\xae\x7a\x46\xb2\x4d\xb1\xfe\xa1\x90\xb3\x96\xbe\xb0\x6e\x50\xd9\xa0\x68\x79\xeb\x57\x48\x38\xc4\x44\xac\x9f\x17\xe1\xdf\x63\x6d\x26\x79\x9f\xda\xbc\x85\x1b\x2e\xf3\xb5\x74\x15\x5f\x4b\x0d\x5f\x33\xb9\x35\xb5\x5d\xaf\xce\xdd\x52\x98\x21\x0a\x86\xff\x99\x3b\xcd\x21\x88\xd6\xdf\x60\x34\x30\xf3\xc9\x48\x12\x49\x44\x82\x08\x46\x9b\xc1\x9f\x14\xe0\xfd\x68\x29\x3b\x75\xc6\x90\xd7\xf7\x11\xcb\xfa\x68\x6d\xff\xf0\x95\xab\x45\x6a\xf6\xaf\x17\x2e\x86\x69\x44\xfa\x11\x6c\x47\xbf\x79\x6b\xd7\x38\x22\xcf\x04\x5c\x45\x24\x48\x61\x18\x91\xb7\x30\x89\xc8\x11\x3c\x13\xcb\xf3\xbc\xd8\x4e\x8c\x49\xd7\xd8\xe1\xcd\x1d\x00\xc5\x95\x15\xa3\x14\x51\xeb\xa2\x3e\x51\x36\x6d\xb7\xb3\xbf\x7b\xc0\x0e\xfe\x40\xac\xdd\x79\xfc\xd0\x53\x8a\x58\x96\xb5\x00\xa5\x87\xbb\xf3\xf9\x56\x3f\x45\x0c\xf7\x68\xbb\xe3\x53\xdc\x42\x53\xf5\xab\x3d\x4d\x91\x06\x2e\xa3\x75\x52\x35\x51\x45\x4b\xe2\x45\x26\xc0\xa4\xf5\xac\x15\xbb\x9d\x43\xda\xd3\x74\xf8\x22\x97\x5f\xac\xcb\x20\x1e\x1f\xd2\xf9\x7c\xe7\x31\x21\x84\x36\x9b\x59\xa5\x39\xf4\xce\xc1\xc3\x47\x7b\x6c\xbf\x6e\x4c\xad\x60\xdc\xf7\x1e\x3f\x3c\x28\x60\xca\xdc\x17\x9e\x05\xf3\xf0\xe1\xc3\x03\x76\x50\xb7\x96\x57\xd0\x74\xbc\xdd\x83\x47\x05\xcc\xc1\x4a\x34\x9d\x5d\x6f\xef\xa0\xa4\xe7\xe1\x6a\x44\xfb\x07\xbb\x16\xd1\x8f\x56\x03\x3d\xda\xed\x1c\x3c\x2a\x80\x1e\xaf\xac\x6e\xc7\x7b\xfc\x78\x7f\xa7\x00\x2a\xd3\x6e\x54\x50\xed\xec\xee\x3f\x7a\x68\x41\x75\x56\xe3\x3a\xd8\x39\xd8\x2f\xbb\xa9\xb3\xb3\x1a\xd7\xa3\x47\xfb\xa6\x33\x6b\xc2\xa2\xcd\xf0\x74\xb4\xb0\x66\x78\xd7\x12\xa5\x26\x29\xe1\x62\x01\x33\x34\x8a\xac\x3f\x69\x84\xfa\xe8\x7b\x9e\xaa\x31\x4a\xd1\x1e\x86\x24\x45\x4e\xdb\xc1\xd6\xcb\x1d\xfb\xa5\xfe\xad\x3e\x6e\x58\x2a\x3b\xf6\x52\xb9\x8b\x7e\xff\x32\xb7\x5c\xa6\x10\xd9\x2d\xbe\x3a\xb5\x9e\x5a\x33\xc8\x03\xe9\xd2\x5c\x56\xdb\xb2\xae\x9d\x60\x44\x5f\x45\x85\x18\xa1\xd8\x2d\xae\xd3\x60\xd9\x7d\x2e\x33\x34\x8b\x40\xe8\xbb\x7c\x41\x9a\x6b\x5c\x6e\x23\xb2\x9d\xc2\xcd\x06\x95\x81\xff\x81\x14\x53\x6f\x99\x3b\x17\x56\xec\x5d\xb5\x7c\x9a\x95\x2e\xb5\xff\x98\xf4\x97\x51\x96\xaf\x27\x45\xce\xb9\xb3\xdc\xad\xfe\xaa\xbe\xf6\xad\xbe\x86\x4a\x3d\x06\x67\x2a\x0d\x9c\xab\x10\x86\x11\x56\xef\x3c\xe3\xa9\xcc\xa1\xa7\xd1\x9a\xfc\xf4\xaa\xdc\x37\x43\x48\x7d\x32\xdc\x44\xbf\x2a\xdf\x31\xe5\x5b\xa6\xfc\x4a\x98\x76\x06\xa3\xe6\xcb\xe5\x2f\xda\x56\x7e\x4f\x23\x34\x40\x51\x04\x1c\xbc\xfc\xff\x58\x49\xa8\xe6\xe8\xf4\x33\x81\xe1\x43\xb4\x41\x4b\x56\x42\x72\x45\x3e\x7e\x1e\xfd\xad\xeb\xdc\x8a\xdb\x04\xad\xdb\xd2\x74\x78\x94\x39\xec\xad\xa7\x52\x28\xf5\xc5\x18\xf6\xad\x68\xf9\x0a\xac\x83\x3e\x8f\xea\xa0\x8a\xa6\xeb\x15\x6b\x86\x70\xa4\xa8\xef\x40\xe8\x1f\x43\xdf\xef\x40\xe0\x7b\xaa\x11\x3a\x41\xc2\xa2\x22\xd0\x6c\x4b\xa4\x31\xab\x2d\xfe\x18\xeb\x2c\x2d\x6a\x07\x5b\x60\x78\xba\x0a\xf1\x0c\x5d\x47\x7a\x5b\x5d\xc2\xf1\x9d\xc1\x38\x05\x2d\xf8\x18\x24\x54\x21\x79\xbb\xac\xee\x9a\x4d\x48\x27\xa7\xb1\x91\x28\xad\xb4\xcc\x4d\x2d\x33\xad\xd4\x0f\x98\x41\xa5\x95\xd3\x57\xd1\x1a\x15\xa0\xe0\x12\xed\x4e\xfd\x4a\x31\x60\xe6\x76\xa0\xd3\x68\xfd\xf5\x36\x3a\xab\x64\xea\x6e\xcf\xe7\x4a\x69\x28\xde\x30\xf5\x86\xb9\x34\xcb\x46\x99\x25\xee\x34\x69\x3b\xed\x34\x8e\x25\xb2\xec\x2e\x1c\xa6\x93\x76\xb2\xa5\x6c\x8f\x4c\x8b\xd0\x45\xb6\xc7\x3c\x9d\x26\x75\xfb\x40\xad\x3c\x8f\x0a\x5b\x08\x69\x96\x66\x32\x26\x4a\x57\x89\x48\xaa\xf3\x81\xa6\xfa\x0e\xd5\xd4\x65\xdd\x3a\x95\x35\x92\x62\x88\x8a\x74\x96\x9a\x3a\x8a\x73\x02\xc3\x1a\x59\xea\x7b\x9e\xa8\x33\xa3\xb0\x68\x41\x41\x94\xc2\x47\x35\x51\x18\xbe\x47\xe4\x29\x9c\x47\xab\xef\xb0\xa9\xdf\xfe\x9a\x55\xa2\x00\x92\x18\x92\x18\x17\x97\x6b\xd2\x82\x15\x4b\x9d\x24\x53\xea\x24\x99\xd2\x65\xdd\x22\x1f\xd6\x77\xd5\x94\x14\xd7\x97\xd4\x14\x9d\x46\x99\xed\xe8\x0e\x9d\x9b\x11\x08\x31\xc4\xf5\x90\x36\x5d\x77\x96\x7c\x54\x7d\xad\x6d\x6e\x16\x9a\xb0\x44\x14\x63\x2d\xbd\x7d\x59\xd5\x3e\x49\x0a\x30\x73\x19\x8f\x1e\x82\xfc\x38\x6f\x79\xbb\x67\x35\xc5\x68\x9e\xe5\x53\x35\x50\xba\xfa\x06\xa0\xb3\x68\x8d\xa1\x68\x27\xb3\x43\x1d\xad\xd6\xab\x22\x06\xc3\x14\xce\x22\x73\xd3\xc9\xf1\x1a\x2c\x8f\x21\xd4\x48\x4e\xd6\xd5\xb2\x0f\x43\x63\xed\x12\xab\xaa\x39\xd1\xec\xfa\x38\x42\x37\x12\x9b\x43\xbc\xb7\x4a\x6a\xd6\x5c\xf3\x4d\xa4\x74\xfb\xfd\x05\x7c\xae\xf4\x4f\xc5\xbe\xd0\x58\x5a\xf4\x9c\xcc\x50\xa4\x15\xdc\x7a\x9a\xe1\x80\x21\x96\x65\xbf\x80\x0c\xca\x54\x2f\xe1\x38\x42\x4c\x7b\xdd\xb8\x15\x6c\xaf\x39\xc5\x33\x93\x8a\x59\xb3\x07\x9d\xbe\x07\xde\x44\x9a\x67\x6d\x30\x7a\x18\xab\xdd\x1d\xfa\x1c\xe5\xa6\xbb\x37\x91\xbe\xb3\x29\xbb\xd2\xf8\x59\x44\x0a\x43\x56\x9a\x38\x70\x26\x8c\x61\x8b\xf1\x7e\x72\x24\x1d\x78\x6b\x7e\xa6\x13\xc5\x9d\xfa\xd6\x9b\x44\x52\x21\x6d\x90\x41\xc4\x87\x4c\x4c\x44\xc4\xa5\xb6\x7a\xe9\x97\x79\x02\xe2\x44\x9b\xcd\x7e\xe6\x66\x33\xca\x79\x2c\xb5\x71\x37\x71\xe0\x48\x9b\xd3\x6e\xd1\x53\x70\x86\x8c\x33\x41\x65\x2c\x3e\x9e\xbd\x71\xe0\x99\xd0\x5f\x6e\xa4\x29\xa4\x33\x2d\x14\xf0\x01\x43\x1f\x8b\xe4\x85\x18\xc3\xeb\xac\x21\x3a\xd7\x8a\xa9\xee\x59\x84\xab\x54\x38\xf0\x33\x5e\x83\xeb\x58\xdf\x27\x04\x1f\x57\xee\x8c\x84\xa3\x2a\xef\x97\xbe\xb9\xb8\x56\xe9\x3e\xaa\x17\x7f\x6e\x50\xca\x4e\x19\x7c\x8c\x74\x9a\x12\xa3\x97\xbd\x5c\x7f\xdb\x62\x99\x93\x1b\x19\x5d\x1a\xc3\xdb\x95\xc6\x03\xc6\xc3\xb8\xcf\x3e\x9e\xbd\x7a\x1a\x8f\x27\x31\x67\x26\xbd\x3d\x7c\x52\xa8\x4f\x31\x7c\xdb\xb0\xbf\x6b\xef\xc4\xa9\x39\xd5\xaf\xcd\x84\x3f\xa2\x2c\xb6\x96\xfc\x87\x03\x3b\x3a\x33\xe2\xd6\x7f\x38\xb0\xab\x9f\x88\x03\x9e\x79\x45\x1c\x7d\xa6\x09\x9e\x44\xe4\x3d\xbc\x8f\x56\x4e\x39\xdb\x26\x95\x12\x89\x04\x46\xf9\x35\x62\xdb\xf8\x9e\xda\xd7\x88\x69\xf9\x94\xcf\xe7\x14\x52\xb5\xf1\x9a\x1d\x24\x75\x29\x84\x9a\xf1\x2b\x66\xaf\x75\xbb\xb0\x5c\x33\x5a\xdd\xd4\x45\x42\xc5\x44\xf5\xf2\x51\xbf\x04\x09\xdd\xcc\x04\x65\xe4\xd2\x77\x51\xf5\x16\x25\xb9\xc9\x04\x75\x8b\xde\x47\xe6\xfe\x0f\x99\x29\xb0\x7f\x46\xeb\xcd\x80\xef\x14\x17\x5c\xc9\xa2\xa6\x11\x5c\xa7\x60\x62\x03\xb4\xf9\x4e\x73\x8c\x4e\x67\x01\x5f\x6b\x8a\x38\x5b\x49\x8e\x11\x10\xb4\x1d\x4f\x66\x06\x31\x8a\xcb\x6b\x03\xc4\xaa\xcb\x34\x18\xc6\xbe\xfe\xb4\x93\x5b\x14\x73\xcb\x5e\xa7\x76\x19\x00\x77\x83\x56\x47\x4b\x74\x6e\xf0\xbc\xd5\xc9\x6f\x05\xf0\xab\xa5\xb8\x4b\xcf\x5a\xf5\xa2\x22\x2f\x56\xde\xc9\x81\x81\x0f\x54\xeb\x1e\x2e\x40\x0c\xd6\x0b\x87\xc5\xbe\x28\x2a\x77\xdb\xe5\x36\x96\x50\xdb\x58\xfa\xda\x77\x5c\xdf\xfc\x74\x5e\xad\x7c\xfb\x33\x0a\x08\x5b\x71\x13\x6f\x79\xfb\x9d\x2c\xb7\x39\x03\x9e\x56\x24\xc6\x05\x06\x39\x58\x33\xac\x5b\x33\x24\x06\x5a\xbc\x73\xb7\x17\x18\xd8\x60\xfd\xf0\xcb\x41\x6e\x5d\x49\x07\x24\x5d\xe3\xd6\xa9\xae\x83\xd8\xb2\xcd\x66\x96\xd9\xd8\xb2\xcc\xe6\x66\x58\xd5\xdf\xb4\x62\x82\x5d\x74\x43\x82\x28\x41\x99\x15\x36\xfe\x95\x15\x36\xb6\xad\xb0\x4a\xee\x31\x16\x74\x3a\x20\xcb\xda\xc9\xb7\x08\x03\xfa\x1a\x92\x7b\xfa\xd5\xe7\x03\x08\x9f\xfa\xe8\x45\x48\xee\xc3\xa7\xfe\x6d\x0a\xe1\xb9\x76\xae\x9e\xf8\xb7\xe9\x02\xbb\xe1\x53\xf5\xe2\x45\xe8\x86\xe7\xea\xdd\x8b\xd0\x0d\x4e\x16\xb0\x66\xe6\x7e\x55\x5f\xcb\xd9\xfb\xff\x72\xf7\x26\xda\x4d\x23\xdd\xc2\xe8\xab\x18\x1d\x5f\xba\xaa\xb3\x6d\xec\x24\x4c\xa2\x75\xbc\x20\x21\x24\x84\x10\xc8\x40\x80\xfe\x38\x59\x25\xa9\x94\x28\x91\x25\x53\x2a\xd9\x09\xe0\x77\xb9\xcf\x72\x9f\xec\xae\xda\x55\x1a\x2c\x4b\x4e\xba\xbf\xe1\x3f\xeb\xff\xd6\xd7\x44\x56\x0d\xaa\x61\xd7\x9e\x6a\x0f\x38\x5b\x51\xd5\x43\xd7\x00\xf8\x8b\xd7\x67\x5f\x68\xae\x76\xcb\x41\xd9\x25\x59\xa0\x8a\xbc\x2d\xd0\x70\x6b\x80\x76\x68\x42\x3c\xfb\xa8\x76\xb7\x2b\xd5\x44\xa9\x3e\x5e\x5b\xac\x05\x6a\x64\x46\x8f\x2b\x6b\xa7\x6a\x46\x78\x00\x02\xbb\x38\x59\x3a\x5b\x66\x68\xc5\x09\xd1\x89\x03\x51\xa8\x6b\x0d\x78\x52\xc1\x6e\x5a\x0e\x43\x35\xdf\x35\x86\xde\x9f\xeb\xa4\x06\x40\xf6\x3d\xe7\x37\xeb\x37\x2d\xf2\xa1\xcd\xdd\x25\x27\x43\x0a\x93\x84\x0c\x29\x05\xa9\xba\xaf\x24\x8f\x18\xe4\x79\x25\xf6\x3d\x2d\xe0\xed\x87\xb0\xef\xc1\xe7\xcc\x90\x3d\xaf\x15\x5a\xc5\x9c\x42\x12\xac\x54\x16\x7a\x05\x38\x07\xc1\x4a\x19\xa3\x33\x23\x89\x5a\xaa\x6e\xa6\x25\xda\x24\xc0\x64\x7d\xf8\x6e\x46\xfc\x46\xf9\xf6\x75\x46\x90\x48\x22\x27\x8b\x56\x84\xb4\xb5\xee\x71\x46\xde\x4b\x92\xe9\x34\x76\xdf\x74\x4a\x37\x37\x58\xc5\xd6\xe4\x23\xb9\xeb\xdb\x5c\xf5\x39\x23\x6e\x88\x23\x75\x43\x8c\xb4\xee\x86\x88\xec\xef\x1e\x11\xb6\x4e\x30\xbd\xc7\x37\x1c\x54\xda\xb6\x4e\x8a\x89\xbb\xc7\xdc\x5e\xdc\xbd\x9e\xf7\x9d\x99\x59\x55\x88\xd1\x70\x56\xff\x99\x53\x88\x16\x46\xb8\xac\x69\xcf\xbb\xff\xd4\x78\x1d\xcb\xea\x32\x07\x92\xbc\x33\xf2\x67\xfc\x4d\xb1\x68\x41\x50\xcc\xb8\x2e\x74\x2c\x56\x4c\xab\x15\x97\xec\x07\xdd\x50\x4f\x56\xea\x3f\x59\xf9\xab\xec\xe3\x96\xb8\x41\xde\x03\xa5\x5a\x59\x0e\xbc\x7d\xc3\x76\xe6\x20\xcc\x26\xf9\x81\xf3\x56\x34\x65\x48\xe9\x98\x6c\x40\xd6\x3f\xe2\xe2\x49\x98\xfc\x2b\xf0\x56\x38\x39\x7a\xcc\xa4\x51\xa5\x90\x38\x71\x7e\x7a\x63\xfb\x3c\x23\xf9\xa3\x35\xb7\xc0\x7b\x6d\xb3\x00\xbc\x33\xdb\x02\x0b\xbc\x81\xed\x07\x0a\x35\x5a\x3f\x2d\xf0\x1e\xdb\x03\x85\x34\xc7\x54\x55\x8a\x93\xbe\xf7\x5a\xd5\x3b\xcf\x88\x7a\x3e\xa3\xaa\xb6\x7a\x1a\xa8\x06\xfa\xad\xfb\x9e\xaa\x66\xf7\x09\x73\xbe\x9c\x69\xb8\xb6\xb4\xeb\xf3\x39\x7e\xe8\x31\x02\x7d\x12\x80\x5b\x7c\x02\x7f\xea\x4f\x4f\x49\x94\x97\xa8\x81\xea\x97\x7a\xac\x6e\x31\x50\xdd\x0d\xd5\x9a\x23\x0a\x53\x45\x46\xf6\x12\xd8\x0a\xc9\x5b\x41\xe1\x28\xa3\xd0\x0d\xfe\x4e\xfa\x4b\xc5\xa9\x8e\x0c\x08\x0b\x8a\xb9\xc0\x4d\x0a\xcc\x71\xb0\x8a\x31\x8e\x31\xfe\x24\x85\xcb\xc0\xdc\xd6\x5e\x04\x4e\x1a\xc3\x24\x68\x57\x9e\x6e\x56\x95\xa7\xb3\xa0\x59\x28\x34\x41\x67\xe0\x36\x58\x9d\x0e\xfa\x26\x30\x46\x35\xd7\x81\x31\x91\x79\x19\xb4\xe6\x99\x3e\x6e\x2a\xea\x98\x0b\xa4\x39\xbc\x5e\x39\xd3\xae\xd6\x23\x61\xe8\xe0\x42\x0e\xb8\x5a\xc5\x6f\xe4\x12\x39\x53\x12\x39\xeb\xb3\x82\x07\xeb\xa9\x4f\x7a\xfd\xee\xc3\x87\xfa\x81\x19\xa6\x4c\xff\xf2\x15\x57\x1e\x06\x44\x35\xf5\x50\xbf\x93\x5b\x27\x9a\xd1\xbc\x55\x94\x6b\xde\xf4\x43\xcc\xab\xb2\xbe\x5c\x50\xea\x54\x93\xb0\xf4\x39\xe8\xdb\xec\xad\xc5\x15\x69\xc9\x80\x92\xb3\x09\x69\x52\x3a\x16\x6b\xdf\x4a\xb4\xe6\xf7\xf0\xe6\xc7\x37\x0a\x21\x0f\x33\x8b\x98\xf1\x67\x35\x55\x90\x5e\x9f\xad\x80\x64\x8a\x11\x7a\xb1\xbc\x42\x9a\x37\xc3\x9c\x36\x45\x69\xb2\x30\x82\xd8\x49\xaa\xba\xac\xc3\x10\x92\x3e\x2b\xf2\xbd\x6c\x05\x24\xe9\xfb\x78\xe9\xd5\xfe\xdd\x39\xec\x05\x4d\x89\x86\xeb\x5c\xb1\x99\x6f\xf5\xe6\x11\x2d\x2a\x81\x21\x87\xec\x21\x87\x1c\x06\xe4\x98\x28\x8a\xf6\xc7\xa0\xb6\xe5\xfa\x61\x71\x01\x34\x78\xcc\xc8\x5e\x80\xc9\xc2\xd4\x22\xe8\x29\xb3\xbe\x5f\x9d\xf0\xaf\x5f\x49\xbe\x1c\xa1\x5a\x0e\x51\x2e\x47\xb8\x34\xc0\x60\x31\x3d\xce\x61\x08\x61\x9f\x41\xd8\x77\x21\xec\x7b\xf9\xc7\x42\xb5\x2c\x01\x9d\xaf\x1e\x4d\x69\xc1\x7a\xa8\xde\xb9\xe4\x2a\x80\x45\xb0\x46\x73\xb2\xe6\xe5\x53\xa3\xfb\x1b\xcb\x77\x81\xcb\xa7\x04\xfa\x86\x74\xc8\x77\xc1\x64\x8c\xea\x1a\x7f\x3e\x27\x5e\x4e\xc6\xf3\x71\x8c\xd2\xc4\xc6\xd5\x90\x45\x12\x1d\xa6\x60\xc0\xab\xe6\xc3\x3e\x0c\x8b\xb9\x99\x75\xd0\x53\x3c\x0f\x1a\x35\xce\x7b\x9a\x37\x7b\x51\x99\x2e\x86\x1f\xa0\x85\x7e\x7d\x51\xdb\x56\xcf\x85\x34\xa7\x70\x12\x34\x2a\x33\x04\x31\x02\x8f\xa4\xb5\x50\x92\xe7\xfa\xad\xad\xf3\xfd\x95\xea\x8d\xcf\xad\xb8\xd1\xb5\x31\x78\xaa\x65\x90\xe4\x51\xe0\x64\x12\x76\xee\x60\x26\x8f\x02\xf2\xd3\x7d\xa6\xc4\x9e\x89\x4d\x98\x93\xc1\xe7\x80\xd4\xd5\x66\x17\x01\x60\x36\x97\x67\x65\x50\x5e\xbd\x35\xa3\x89\x49\x48\xd6\x95\x85\x42\x4c\xf1\xe5\xde\x8d\x2d\xc0\xdb\xb7\x63\xf0\x36\x30\x3d\xee\x33\x5b\x82\xef\xda\x0f\x86\x73\xa3\xa0\x9e\x53\x78\xdf\x8a\x78\xa7\x64\x27\x00\xeb\xcd\xeb\x13\x4b\x91\x23\xb8\x0c\x34\x5f\xfc\xce\x90\xbd\x8b\x80\xa0\xfa\xcc\xe2\x42\x24\xc2\x82\x1b\x45\x13\x15\x11\x3c\x0b\x56\xdc\x7e\x11\x61\xac\x22\xb7\x03\xe7\x2a\x86\xb7\xc1\x4a\x43\xeb\xed\x00\x66\xe4\xcc\xfc\xb3\xc5\x21\xa6\xd0\x45\x46\xe4\x84\x2f\x15\x4c\x64\x91\xfa\xf8\x74\x85\xbc\xfa\x36\x40\x7e\xf7\x93\x00\x01\xd7\x38\x14\xd2\x44\x97\xcc\xca\xb2\x4c\xaf\xec\x17\xde\xe8\xaa\x5c\xdc\xf1\x14\x7e\xc9\x27\xe1\x98\x27\x99\xec\xf0\x1b\x8f\x73\x9f\xfb\x75\xff\xff\xf7\x5c\xce\x12\x71\xdd\xd1\x8b\xf6\xa2\xe2\x76\x55\x93\x96\xc6\x01\x5c\xe1\x15\xcc\xc7\xbe\x17\xd0\x35\xab\x63\xad\xe9\x1f\x6f\xe1\x5d\xa0\xde\x3f\xa3\xb5\x0b\x6f\xeb\x34\xe6\xb9\x85\x8b\xe0\xe9\x24\x89\x53\xde\x09\x44\x32\xee\xb0\x49\x88\xb7\x28\x7d\x56\xf7\x96\x3e\x60\x51\x90\x88\x31\xf7\x3b\x99\x88\x4c\x1d\x74\xda\xd2\x24\xf6\x47\xf3\x41\x2c\xa3\xab\xc4\xa5\x76\x51\x67\x51\x4b\xad\xb5\xb3\x4c\xfb\x17\xe4\x73\x39\x55\x80\xf2\x5e\x71\xbf\x67\x09\x79\x1b\xea\x2d\xda\xbd\x7f\xcf\xab\x3b\xdc\x36\x1d\x1e\x18\xb0\x6c\xd8\xcc\xbc\x7e\xbc\x66\x3d\x5a\xd2\xbe\x6a\xeb\x59\xb2\x8a\xd1\xa1\x14\x3e\xad\x90\x1a\x3f\xeb\x7b\x45\x3c\x1c\x5f\xff\x16\x03\x78\x4b\x4e\x02\x9d\xff\x1c\xa7\xb0\x1b\x12\x0f\x83\xdf\xe8\x9f\xe3\x80\x68\x01\x80\xc2\x8d\xa4\xb4\x9a\x6d\x94\xc2\xf7\xe5\x81\x55\xaf\xbf\x45\x71\xfd\xfd\x60\x58\x5a\x78\xa1\xe6\x5d\xf3\x51\x79\x4a\x45\x73\xa5\x8d\xea\xcf\x57\x41\x7b\xca\xf5\xef\x41\xb3\x29\x1f\x0a\xcd\x7a\x48\x1f\x03\x67\x16\xc3\x87\xc0\xf9\x14\x13\x6b\xc2\x45\x1a\xa6\xf2\x8d\x82\x8d\xd7\x37\x13\x16\xfb\x2f\xa3\xc8\x82\x8f\x01\x85\xfd\x15\xa7\xf4\xbc\x10\xc3\xdf\xb4\xd6\x3a\x21\x02\x7e\x7a\xd2\xae\x31\x51\xb5\xc0\x30\x67\xe4\x4f\x0d\x48\x68\x7b\xfa\x6d\x29\xbe\x76\x9e\x6b\xb1\xcb\x21\xe3\x98\xb4\x72\x46\xf6\x24\x28\x31\x26\xa6\x54\x1d\x85\x73\xad\xbd\xfc\x12\xdc\xa1\xa4\x8e\xf3\xbb\xce\xd8\x5d\x39\x64\xf7\xca\x2e\x62\xed\x7f\x09\x60\x1a\x40\xdc\xdf\x35\x2a\x52\xb1\xdc\xb4\xd8\xb5\x1b\xa4\xe3\xec\x02\x09\xf9\x31\x12\xf2\x23\x24\xe4\xbb\x90\x38\xa2\x7f\x8e\xb9\x50\xee\x95\x6f\x71\x75\xae\x45\xd1\x3f\xb9\x33\xdb\x62\xb6\x3a\xdb\x22\xcb\x8b\x4d\x4e\x48\x98\x65\x8a\xf2\x2f\xe5\x5f\x94\x0b\xf9\x17\x13\xfc\xe1\x65\xa9\x4c\xc6\x08\x30\x18\x7b\x82\x8f\x6e\x24\xd1\xe9\x73\x31\xad\xc0\xb7\x5a\x8a\x46\x4e\x47\x47\x24\x86\x85\x34\x8d\x9c\x6a\xf3\x74\xe9\x2a\xc1\x64\x73\x0e\xdc\x75\xce\x04\xb1\x82\xc4\xcb\x52\x8b\x42\xb6\xbc\xcc\xed\xd7\xf6\xe7\x44\x6f\x5b\x68\xc7\x7d\x36\xb2\x2c\x5b\xf4\xdd\x10\x70\x17\x5f\x05\x86\x15\x17\x7d\xf7\x8a\x8e\xd4\xbf\xf6\x91\x42\xce\x57\x79\xa4\x9a\x39\x55\xeb\x81\x64\x67\x2a\x89\x74\x29\x70\xb7\xd9\x0f\xb5\x54\x1f\x94\xe6\x01\x8d\x23\x78\xa7\xd6\xad\x32\x88\x6e\x00\x93\x90\x28\xea\xa7\x06\xf1\xd7\x3f\xb8\xd1\x38\x51\xd5\xcf\x10\x3e\x27\x75\x6b\x2d\x53\x89\xed\x2f\x56\xaa\x49\xdd\xe7\x44\x98\x12\x54\xb8\xba\xc6\x73\xc2\x73\x1b\xd9\x2f\xec\xf1\xdc\xc6\xa4\x87\x9e\xa4\x7d\x77\x74\x41\x04\xd4\x4f\x2e\x35\x37\x41\x26\x8b\x9f\x85\x99\x51\x14\x6c\x58\xb4\x34\x65\x38\x24\x12\x4e\x12\xad\xcf\x39\x4a\x40\xf6\x3d\x75\xcc\x84\xab\x1d\xb6\x8a\x75\x61\x66\x5d\x10\xf2\x7a\xee\x6d\x0f\x1d\xc4\x8d\x4d\xc9\x9c\x42\xe2\xae\x42\xab\xf1\x1f\xce\xa0\x64\x3a\x17\xf1\x6c\x47\x28\x0c\xd3\x1b\x6a\x5f\x65\x35\xfb\xd0\x5d\x6d\xa3\x54\xed\x4d\xd6\x7b\x93\xe6\x92\x5c\xf4\x59\xb5\x5f\x0c\x19\xea\x29\x1e\x52\xe2\x0a\x07\xad\x28\xe7\xbd\x24\xb7\x24\xc4\xf9\xc3\x8e\xd6\x22\xba\xcd\xf7\xf5\x83\x3f\x44\x6e\x51\x78\x88\x57\x44\x8d\x1e\x8c\x6a\x7c\xbc\xef\x56\x47\x58\xbc\xea\xbb\x05\x06\xe6\x15\xd1\xd9\xf8\xf2\xe5\xe6\x81\xaa\x28\xe7\x9a\xf2\x2a\x06\xc6\xf4\x2d\x19\x2f\x21\xff\x8c\xfc\x99\xf5\xf1\x42\xac\xcf\xbe\x15\xf0\x8a\xf1\x5b\xdc\xfc\x8b\xb9\xa9\x85\x0b\x9e\xc3\x16\x9a\xb2\xbe\xbe\x18\xf3\x74\xf3\x79\xdd\xcd\xb1\xda\xcf\xc3\x87\xc5\x63\xde\x69\x82\x9d\x86\x4e\x62\xb2\xa3\xba\x28\x65\xeb\xcf\x30\x60\xd5\x8f\xe1\x66\x24\x68\x25\xed\x49\x94\x00\xf1\x21\xcb\x1f\x3c\x18\xf2\x8d\x3f\xe2\xd1\x8c\x04\x2e\x88\xde\x26\x30\x25\x52\xb8\x2e\xc4\x6b\x43\xf3\x1b\x73\xd7\xe7\x4b\x55\xf8\x3b\xce\xcb\xd9\x70\x33\x8b\x42\xbb\x47\x21\x75\xdb\xd9\x12\xd7\xd5\xc6\x16\xc8\x2d\x34\x6d\x79\x61\x7d\xb1\x4c\x0c\x75\xe2\x61\xd9\x77\xb5\x01\x95\x0b\x7c\x4d\x31\xcd\x89\x0b\xbc\x87\x30\xa8\x4f\x89\xdf\xfa\x79\x9d\x1b\x26\x46\x13\x22\x55\x73\xea\x3a\x1f\xa0\xdb\x4e\xdc\x8c\x8c\x9a\x8f\x64\x5f\xe8\x2c\xc8\x8d\xd1\x0c\x4c\xa5\x07\x17\x44\xa7\x01\x7f\xf8\xf0\x82\x70\xc3\x7e\xa0\xd2\x6c\xc5\xa2\x9c\x4b\xc0\xeb\x60\xb3\x2e\x97\x6e\x93\x2c\xad\xb9\x19\x5a\x68\x14\xf5\x20\xbb\xe8\xc9\xc6\xb5\x5b\x93\xef\x92\x1d\x14\x44\xc6\x2e\xcc\xc8\x67\x09\x43\x90\x14\x7e\x98\x5c\xd7\x99\x83\x26\xe4\x20\x1c\x9d\xc9\x2d\x03\x49\x1f\xad\xff\x1a\x50\x3c\xbe\x33\xf2\x2e\x81\x19\x89\x5c\x10\x5a\x7b\xdb\xdc\x5f\x86\xfd\x69\x95\x7c\xec\xec\x0b\xf5\xf5\xcc\x89\x1f\x65\x8a\xe4\x3c\x92\x60\x92\xf6\xe4\xc9\x98\xbb\x2e\xe1\x86\x61\x94\x12\xbe\x48\xd2\x7f\xac\x75\xc4\x80\xc9\x03\x6f\x25\xb2\xa8\xa5\xfd\xc4\x68\xf8\x68\xe3\x77\x92\xad\xc9\x35\x12\xf7\x04\x7d\x14\x53\x7b\x30\xa7\x70\xb1\x0a\x59\xe5\xc8\x21\x97\xc2\x1f\xf0\x3e\xeb\xbb\xbf\x7e\xd5\x11\x42\x7e\x8c\x19\x2c\x5e\x60\xa3\x51\x91\x7a\xce\xdd\x31\x32\xc5\x82\xc6\x8e\x07\xc2\xe1\x20\x9d\x23\x22\x15\xd6\x67\x8a\xc4\xf3\x28\xe5\x1a\x2b\xed\xa3\xc8\x58\x74\x6e\xae\x3e\x41\x2a\x66\x75\x4e\x61\xe2\x36\x29\x16\xb7\x49\x4c\xfb\x57\x49\x18\x23\x79\x80\x59\x1b\x4c\x58\xb9\xd6\x5b\xfd\x15\x8a\xba\xeb\xdd\x1f\xc5\xf6\xc4\x25\xb7\xe4\xc2\xd5\xbb\xa0\xf7\xd6\x60\xd0\xdb\x16\x0c\xba\xd8\x19\x2d\x54\xd4\x0d\x30\x95\x4f\x67\xad\xbf\xfe\xf8\xf7\x2e\x27\x33\x32\x33\x34\xea\x77\x32\xec\xa1\x8a\xe1\xe6\x9e\x63\x1e\xe4\x43\x1e\xda\xb7\xe4\x56\x63\xfa\x19\xb9\x74\x0b\x1f\xe5\x6b\xd7\x39\x85\x97\x6e\xe3\xdd\x51\x1e\x27\xac\x60\x7b\x9e\x96\x01\xbe\x78\x95\x92\xf2\x0a\x25\xd5\x1c\xc0\x85\x2d\x15\x07\x70\x5f\xaa\x5a\x8b\x01\x72\x4b\x3c\x3d\xd4\x13\xc2\xe1\x27\xfb\xaa\x3a\x53\xcb\xcf\xfb\xec\x2b\x78\xd2\x3e\x22\xbc\xef\x21\x38\xa0\x6d\x98\x92\x7e\xdc\x6b\x7b\x67\xbe\xc8\x26\x61\xd9\x8b\x55\x5d\x66\x45\x8f\x39\xa7\x94\x51\x35\x19\x59\xeb\xaf\x92\x1e\x0e\x9b\xbb\x91\x9e\x5e\xc9\x02\x6d\xd6\x6a\xb0\x75\x55\x43\x7b\xdb\x56\xeb\x3d\xae\xf7\xb4\x55\xef\x69\xa3\xde\xd3\x7e\xbd\x46\x61\x7d\xce\x9c\x7c\x32\x6a\xaa\x0c\xd3\x74\x9e\x10\x06\x3f\xdd\x6b\x9b\x61\x58\x90\xaf\x74\xb4\x63\xcf\xc8\x94\x63\xbc\x35\xfd\x82\xf5\xdd\x6b\x1b\x11\xf7\x70\xa0\x33\x03\x5d\xbb\x70\xe3\xea\xea\x50\xe2\xf9\x46\x51\xe1\x15\x2a\x24\xd5\x0a\xcd\x21\xc8\x28\xb8\xea\x33\xde\xae\xbe\x73\xaa\xce\x76\x29\x22\x1c\x47\x86\x2f\xd7\x2c\x1d\xbb\xc6\x20\xe2\x75\x1b\x20\xa3\xc3\x1f\x78\xc7\xfa\x3a\xe2\xca\x5d\xe5\xa9\xe6\xbd\xb4\x05\x78\x7b\xaa\xfe\x7b\x5b\x82\x37\xb4\xb9\x6a\xb4\xe5\x3a\x5f\x05\xec\xb9\xce\xa3\xfe\xff\x3c\x82\x43\xd7\xf9\x2e\xc8\xf0\xd1\x80\xc2\xf9\x3d\xe1\xbd\x12\x19\x6f\x03\x2d\xf8\x58\xbf\x3b\xe2\xb6\xde\xb9\x1f\x36\x22\xd8\x83\x0c\x24\x06\x30\xa8\x6e\x51\x1d\x60\xb4\x7f\x35\xae\xbb\x27\x41\x4d\xcd\x42\x51\xd7\x3b\xb6\x2d\x6b\x0e\xf9\x6e\xf0\xbe\xfb\x43\xad\x63\xe1\x72\xad\xb0\x6e\x7e\x80\x8e\xcd\x01\xca\x85\xa4\xe2\x00\xdd\x1b\xb4\xca\x63\xc1\x5e\x2c\x8d\x2f\xbb\x7b\x78\x39\xb0\xb4\xe7\xde\x76\xc7\x73\x05\x4a\x19\xa5\x66\x2e\x94\x36\x42\x6e\x3e\x44\xf4\x2a\x5f\x71\x8c\xdc\xdd\xe5\xe3\xd1\x34\x7e\xe3\xaa\x6e\xba\x6b\xc6\x49\xee\x8d\x8d\x48\x31\x33\xa2\x42\x46\xe7\x18\x42\xa9\x05\x52\xe7\x14\x4e\x56\xcb\xea\x79\xf2\xcb\xdc\x84\xe9\xb3\xdb\xa2\xc7\x2d\x02\x27\x3d\x47\x91\x9c\xed\xa1\x4c\xce\xce\x21\x54\x7c\xcd\x2d\x04\xea\xd7\x2e\xb8\x4e\xb6\x66\xfd\xd7\xa3\x5c\x67\x96\x3a\xc2\x25\x6e\x91\xcd\x6e\x59\x9f\x88\x93\x57\xc8\x76\xd7\x88\x5b\xc5\x22\x0d\x70\xa3\x23\xbc\x0e\xf2\x1b\x92\x64\x46\xb5\xa8\x4e\xb1\x13\xa1\xf9\x38\x22\x0f\x6f\xd7\x3e\x35\x19\x3d\x31\x36\x23\x32\x24\xad\x1b\x7e\x39\x87\x40\x52\x93\xf8\x13\x6d\x33\xdb\xb2\xdb\x94\x98\x78\x27\x81\x0b\xad\x3b\xd5\xb0\x96\x69\x3f\x63\x6b\x8e\x8c\x5a\xa0\xa0\xec\x6b\x70\xc7\x47\x75\xa9\x84\x6e\x40\x5a\x11\x95\xe2\x31\x10\x55\x21\x9b\x54\xf1\x4a\x3a\x24\x0c\xe9\xb6\xa8\x13\x9e\x43\xe2\x26\xc0\x96\x63\x5e\x7a\xf8\x76\x09\x52\xbe\x70\x12\xa9\x59\x60\x30\x2b\xfa\x62\x69\x5b\x7c\x35\x82\x3d\x3b\x02\xf6\xdc\xf6\xfb\xee\x22\x86\x50\x0b\x33\x75\x66\xe4\x4d\x00\x12\x18\x85\xae\x33\x23\xb1\x0b\x12\x92\xa5\x9e\x16\x83\x40\xc8\x3e\xbb\x18\x05\xb6\x87\xb1\x20\xf0\x87\x97\xd8\x1e\xb0\x73\xbb\xab\x3e\x34\x55\x90\x8b\xb4\x19\xcb\x66\xe4\xdc\x6c\xe0\x1b\x0e\x71\xa6\x4e\xc7\x6e\x80\xb2\x21\xb5\xcb\xb2\xfd\xbc\xec\x87\x29\x83\xb2\x8c\x67\x58\x76\x10\x10\x5e\xa5\xd6\x95\xc3\x2c\xf4\x91\x13\xea\xc8\xa5\xe4\x84\x48\xf8\x89\x29\x62\xd1\xea\xb6\x8c\x53\xf6\x74\x65\x8b\xe3\x86\x16\xc3\x41\x0d\xd6\xdd\x93\x3a\xac\x6f\xe2\x42\x8e\x9d\x19\xc9\xd0\x5c\x09\x12\xb5\x94\x63\x84\xfc\x71\xdf\x85\x4b\x67\xdc\xf7\xe0\xc2\x29\xe7\x23\xf4\x7c\x2e\x29\x8c\x9d\x94\xcc\xc8\x89\x0b\xdd\xbe\x7b\x85\xb3\xbe\x70\xfc\x51\x85\xb1\xc1\xf1\x8d\x29\x5c\x7c\xa3\xd4\xbe\xa8\x6f\x8b\x5a\x70\x8c\x88\x41\xe1\xa2\x40\xc2\x17\xce\x0d\x79\xe9\x82\x8b\x66\x89\x4c\x6d\xec\xa5\x73\x51\xcd\xba\xad\xdb\x3e\xb7\xa7\xce\x05\xce\xa4\x1c\x98\xcc\x07\x56\xe0\xba\xa9\x73\x43\xce\xcb\xce\x42\xd5\xd9\x74\xa9\x33\xf7\xd6\x9e\xd6\xba\xe2\xb5\xae\x9e\xd5\x71\xc6\xeb\xfa\x3a\x3e\xc7\x75\x9c\x54\x15\xa0\x25\xf8\xa1\xe9\xd7\x04\x44\x9f\x6d\xd3\xd1\x8c\xec\x17\x3f\xec\x19\xf9\x54\xfc\x00\x2f\xd1\x08\xf8\x43\x40\x1e\x0c\x2b\xa8\xf5\x12\x31\xd1\xc4\xd9\x70\x1c\x87\x4c\x9c\x43\x12\xa8\x91\x29\x6a\xfa\xf0\xe1\xa4\xef\x8e\x10\xdf\x7c\x56\x52\xc8\xbe\x20\x13\xa4\xa6\x94\xda\xd3\x64\x79\x20\x13\xf5\x8d\xcb\x02\xc2\x3f\x04\xe4\x52\x4d\x9b\x25\x90\x65\xb0\xc5\xc9\xa5\xd1\xd2\xcc\x29\x1c\xb9\xce\x2c\x86\x1d\xb7\xaa\x6a\xc6\x90\x80\xdb\xec\xf6\x30\x38\xe3\xfc\xda\x82\x23\x97\xc2\xfb\x36\xc9\x33\x06\xde\x9e\x60\x38\x8f\x0f\xb8\x68\x6b\x92\x47\x07\x6c\xb5\x38\x19\x98\x04\xc2\x7c\x54\x54\xcd\x7b\xaa\x4d\xd6\x9b\xda\xbc\x98\xa7\x9e\xa1\x54\x33\xdc\xe5\x44\x7b\x00\x30\x55\x4e\x29\xec\xb8\xa4\x30\x02\x7b\xb7\x92\x4d\x32\xb9\xa5\xaa\x8e\x2f\x18\xec\xc2\x35\xa6\x1a\x5f\xc5\x4a\xb3\xec\x40\x9b\xe6\xcf\x29\x6c\xbb\xce\x8c\x7c\xd5\x5a\x38\xa3\x29\xde\xdb\xb6\xbe\x51\xf4\xbf\x79\xeb\x3a\x37\x31\x9c\xb6\xd2\xce\x0f\x92\xc4\x8f\x50\xc2\xfe\xe1\xde\xdf\x41\x4f\xac\xc5\x75\x97\x10\x7d\xc3\xcf\xfb\xee\x7b\x74\xd2\x2f\x6b\x72\xed\x11\x05\xc2\x11\xb9\xef\xdd\x6e\xeb\x26\x57\x09\xd3\x0f\x8d\x41\x66\xe4\xd4\x85\x2f\x28\xd3\x3d\xe1\x9b\x26\x74\xc7\x41\xeb\x84\x62\x46\x66\x64\xd7\x88\x54\x7d\xf6\x69\x4e\xe1\x93\xdb\x7e\x7d\x71\x11\xc2\xfa\xa6\xfe\x46\xd1\x0c\x9e\x0c\x70\xff\xbe\xae\x6e\x37\xe4\x1b\x7a\x5c\x78\xbb\xb3\xba\xee\x93\x41\xd9\xbd\xaa\xfe\xaa\x5d\xad\xbd\x38\xfe\xe4\xff\x96\xbc\xf2\x14\x3e\xba\xed\xb7\xcd\x83\x3f\xe2\xd1\x2d\xf9\xe8\x42\xfc\xdf\xff\x3d\x84\x23\x22\xd4\x36\x0c\x1f\xc6\xa3\x23\x22\x41\x50\x5b\x52\x5b\xc9\x32\x1f\x56\x28\x8c\x3e\x6a\x89\x12\x75\xcc\xb0\xbf\xe2\x5b\x47\x64\x46\x3e\xb8\x10\xf7\xba\x9c\x48\x0a\x2f\x71\x0b\x35\x58\xbd\x59\xd1\xff\xbe\xea\xdf\x1a\x60\xba\x0b\xbd\x89\x5f\xee\xb1\xe7\x05\xec\x0e\xf9\x86\x66\xca\xd2\xfb\x41\xae\xab\x50\xf7\x77\x71\x4f\x47\x74\x91\x3a\x33\xf2\x5d\xc0\x00\x53\x2d\xa5\x8e\x60\xc0\xd5\xab\xb3\x00\x8e\x5c\x90\x29\x85\x2c\x5d\x81\x4e\x24\x10\x85\x61\x5b\x6f\xcd\x26\x59\x7a\x49\xf4\x45\xa7\x98\x53\x0a\x7f\x7e\xcb\x7d\x73\xd5\xae\xb0\xf4\x9e\x11\x4e\xbc\xb4\x7d\x57\xb4\x0d\xc3\x87\xc3\x63\x63\xc4\xa0\x4d\x96\x21\x59\xee\x3b\xae\xdf\x3e\x17\x59\x43\x74\xa6\x90\x7d\x34\x12\x3d\x24\x96\xb9\x3f\x49\x2d\x98\x91\x2c\x85\x37\x19\x10\xe1\x08\xda\x77\xaf\xf4\x65\x56\xe9\x05\xc4\x53\x22\x30\xe7\xa5\x7a\x9d\x7b\x0f\xe1\x4b\x76\xa6\x5f\xea\xec\x1e\x18\x42\xe7\xc8\x55\xef\x4f\xcd\xfb\x64\x3c\xe6\xb1\xcc\xdf\x1e\x9b\xdb\x38\xdf\xd2\x29\xb9\xdf\xba\x3a\x4d\xf7\x11\x5e\x3c\x79\x26\xa3\x8e\x70\x66\x84\xa5\x60\xb1\xc9\x24\x0a\x75\x9c\xa8\x47\x57\x69\x82\xd9\x1a\x5f\xd7\xd4\x7f\xa7\x01\xb9\x25\x5e\x8a\x2b\xb2\xad\x55\xb4\x61\xeb\x7a\xb3\x63\xc5\x48\x1c\x03\x3b\xb5\x31\xa3\xfb\xd4\x16\x90\xda\x71\x3f\x05\x76\x66\x63\x56\x77\xd7\xb3\x31\xb5\x8a\x7b\x68\x63\xb2\x77\x14\x6b\xe7\x14\x82\x56\xf0\x48\x42\x82\x10\xbc\xc6\xb5\x07\x1b\x05\x37\x75\x1a\x25\x01\x29\xc2\x31\xa1\x73\x48\x53\xe3\x94\x33\xb3\xe0\xc9\x60\xf3\x19\x7f\x8c\x8b\xe2\x5b\x80\x41\x0b\xf0\xc7\xa5\x05\x1b\x4f\xcc\xf3\xd8\xd2\x98\x5d\x6d\x89\x85\x07\xe5\x1b\x85\x28\x5d\x79\xb8\x9e\x2e\xf9\xaa\xe1\xcb\x8a\xdb\xd9\xaf\x5f\x4f\xe7\x6a\xd8\x4f\x7b\xf7\xb1\x4e\xfd\x4f\xa4\xad\x33\xcb\xe7\xa7\x2d\xd4\x2f\x63\x24\xa6\xa3\xa1\x3d\x28\xe2\xfd\xde\x65\x43\xbb\x31\xac\x0d\xf3\xf1\xf3\x35\x59\x1b\xe9\xf3\x41\xfe\xaa\x18\xec\x70\xbd\x78\x57\x0c\x78\xf8\x78\x98\xbf\x2b\x68\xc3\xf0\x59\xf1\xae\xa0\x0f\xeb\xc3\xf5\xfc\x5d\x41\x23\xd6\x37\x37\xf2\x77\x05\x9d\x58\x7f\x5a\xbc\xab\x04\x43\x18\x6c\xae\xc9\xfa\xca\x6c\x6c\x6c\xae\xa1\x35\xc3\x34\xbd\x9b\x4b\xe9\xb6\xd6\x61\x6a\xf5\xd6\x66\xc4\xc7\xb3\x42\xd7\x14\xfe\x1c\xa7\x6d\x64\x76\x69\x69\x37\xea\x10\xa0\x77\x63\xfd\xb9\xbd\xfe\xac\x0e\x0b\x75\x9a\xbb\x31\xa8\x13\xdd\x61\x3d\x7c\xc5\x20\x5f\xda\x7a\x34\x8a\x7a\xe4\x89\x41\x3d\xca\xc4\x70\x79\x11\x97\x56\x10\xe9\xeb\x65\x7a\xe7\x85\xc0\x8c\x8c\xf5\xea\x40\xe6\x78\xcc\x98\x4b\x0e\xd7\xff\x70\xb2\x5f\xbf\x8e\xd1\xbb\xab\xbc\xe1\xfc\xc9\x3e\xd9\x12\x58\x62\x0b\x70\x87\x76\x3c\x37\x4c\x5c\xc2\x48\xb6\x36\xa4\x20\x1d\xd9\x43\x86\xf5\x22\x6d\xbf\x5a\x9a\x91\xa9\xf9\x1e\x9e\x4f\x61\xee\x7d\x26\xa9\xb1\x00\x62\xcd\x2a\x05\x96\x28\xf1\xc9\x54\x9a\xa4\x0d\x95\x88\x87\xbb\xbd\x4e\x1f\x6d\xfc\x52\x12\xdf\x6d\xcb\x99\x02\xee\x88\x17\x6d\x67\x5e\x3a\xa1\x5a\x03\x60\x8c\x48\xba\x36\x5c\xf2\x09\xe8\xa6\xa0\x2b\x10\xe9\xcc\x52\xf5\x90\x30\xb2\xf1\xbb\xec\xad\xd3\x86\x28\xd0\x45\xed\x89\xaa\x5a\x08\x8d\x8b\x80\xc1\x7b\x33\x12\xa5\x88\xe1\x6b\x10\x62\x4a\x86\x45\xc9\x93\x5a\xc9\x7a\x51\xf2\xb4\x56\xb2\x51\x94\x3c\xab\x95\x6c\x16\x25\xcf\x6b\x25\x8f\x8b\x92\x12\xb0\x4c\xd1\x13\x55\x54\x83\x30\x24\xdf\x37\x2b\xc8\xf7\x31\x9a\x50\xfd\x31\x18\xc5\xf6\xe0\x8f\x63\x64\xda\x46\x02\x79\xb6\xeb\x15\xad\xea\x67\xf6\x96\xdc\xa8\x25\x28\xc1\x54\xc7\xde\x58\xc1\xba\x5c\xa7\x80\x09\x27\x0d\x90\xbd\x72\xf3\xa7\x83\x92\xd9\x3e\x4e\x57\x39\xe3\x28\x32\xf4\x7b\xbc\xf6\x84\x6f\xfe\x2e\x30\x58\x93\x5c\x53\xa2\xde\xeb\xd6\x8f\xde\x90\x63\xf5\xd1\x4f\xc5\xa7\xbe\x17\x4f\x5f\x8a\xa7\xaf\xe5\xe7\xaf\xda\xf0\x3d\x92\x55\x43\xaf\x88\xab\xb8\xbf\x97\x7a\x22\x6a\x49\x5e\xeb\xc7\x9e\xa4\x8f\x9e\xf0\x4d\x04\xf2\xad\xb4\xf5\x6e\xd8\x55\x90\xb7\x26\x21\x73\x66\xe4\x4a\xb5\x4c\x42\xc2\x91\xd3\x50\x0f\x3d\x35\xb9\x8c\xa2\x29\xc2\x55\x5a\x98\xe6\x5e\x90\x0c\xca\xfb\x3a\xf1\x82\x97\x95\x65\xc5\x7b\x53\x82\x69\xc5\x29\x1d\x71\x74\x7a\xd8\x5b\xb1\xa7\xb7\x64\x2b\xc5\x7b\xad\xdb\x14\xed\x37\x5f\xa6\x58\x4c\x61\x80\x36\xd1\x4d\x2d\xeb\x89\x99\x8a\x43\x91\x13\x90\x4d\xbb\xde\xb9\xe9\x15\xca\xcd\xc0\x9f\xb8\x19\xe6\xe9\x8b\x79\x1a\x14\xfa\xab\x8d\xbf\xde\xcd\xa0\xd2\x7c\xfd\xbe\xcd\x07\x0b\xcd\x86\x65\xb3\xbd\x14\x86\x43\x1d\x88\x60\x11\x7d\x60\xd1\x7a\xa5\x64\xb0\x50\x32\xa8\x94\xd4\xba\xab\x94\x6c\x2c\x94\x6c\x54\x4a\x36\x17\x4a\x36\x2b\x25\x8f\x17\x4a\x1e\x57\x4a\x9e\x2c\x94\x3c\xa9\x94\x3c\x5d\x28\x79\x5a\x29\x79\xb6\x50\xf2\xac\x52\xf2\x7c\xa1\xe4\xb9\x2e\xa9\x21\x19\x3d\x21\xe3\xad\x38\xa7\x70\xde\x0e\xef\xc8\x54\xa7\xac\xe4\xf7\x7e\xb2\xd0\x16\xc0\xae\xec\xbc\x44\x50\xf0\xa6\x9a\x80\xc5\xc0\xbe\xa3\xb6\xf6\x56\x3d\xfe\xd0\x7a\x96\x13\x14\x92\x3e\xa7\x2b\x63\x2e\x1e\xdb\x67\x68\x4b\xc0\x4e\xd5\x43\x4c\x61\x6a\xdf\x92\xf3\x14\xd0\x67\x17\xd0\xc2\x29\x48\xe1\xe9\x3a\x7f\x8c\x10\xce\x29\xa4\xaa\x22\xb2\xfe\x96\x05\x9c\x11\x8c\xf6\x44\xa9\xe2\xbf\xcf\x38\x39\x49\x17\x5b\x50\xc5\x8f\x5f\x4b\xc5\x8d\xeb\x52\xd5\x0b\xde\x9b\xcc\x29\x1c\xb5\x62\x20\x44\x1c\xbd\x2f\x19\x41\xfd\xcf\x4e\x7a\x87\x43\x8e\x31\x20\xad\xc4\x78\x79\x9f\xb6\x2b\x60\x3c\x9d\x7b\xe2\x14\xdd\x55\x94\x2c\xe1\xa0\xa0\xe0\x39\x28\x3f\x24\xce\x51\x46\x22\x46\x64\x4a\x98\xb6\x77\x28\x7e\x7b\xa5\x4e\xdf\xac\x5c\x96\xaf\x1c\x2f\x56\xce\xc4\x5f\xad\x2f\xd3\x8c\x1c\xa5\xda\x6f\xa2\xba\x56\x9e\x5e\x20\xbd\xda\xc5\x1a\xb1\xca\x1a\xdd\xc4\x0e\x6b\x0d\x50\x5b\xdd\xc7\x4c\x8d\x84\xab\xae\xa5\xea\x92\x81\x7b\xa5\xe4\x52\x8c\x25\xf0\xd1\xf6\xc0\xfd\x64\x63\xc4\xe3\xab\x55\xf0\xe0\x06\x36\x07\xf7\x42\x75\x31\x56\xad\x3f\x6b\xb1\xf6\xa3\x70\x30\x00\x44\x98\xbe\xfe\x9e\xb1\x08\x63\x3f\x24\x09\xc2\xc7\x83\x81\x89\xe2\x10\xa6\x47\xfc\x82\xdf\x58\x90\x98\x20\x0e\x53\x16\x65\xbc\x8c\x35\x51\x8d\xb7\x7a\xa5\x2f\x70\x77\x84\x53\x0d\xd3\x7a\x57\x10\xda\x3c\x2f\xdd\x5f\x0a\x40\x9b\x27\xb9\xfb\xfb\xd1\x67\x5b\xe2\xc3\x7e\x30\xe6\xd8\xef\x52\xa7\x3d\x06\x07\x06\xed\xd8\x31\x41\x3b\x94\xcc\x9c\xaf\x46\x21\x55\x17\x2f\x4a\xf1\x3b\x7f\x55\x0b\xf2\xb1\x14\xd2\xa3\x94\xfe\xcf\x12\xf2\x51\xe0\x48\x6f\xca\xa0\x1b\x70\xd6\x48\xc3\x5a\x74\x0b\x16\x88\x6f\xd5\xac\x97\xe7\x09\x48\x28\xad\xdb\xdf\xa5\x5a\xe3\xbb\x9d\x3a\x44\x24\x4e\x12\xd6\x83\x45\xc7\xe4\x3a\x26\x22\x21\xdb\x4c\xf2\x7e\x9c\xcc\x88\xbe\x8c\xa5\xf0\x36\xad\x6a\xc4\x4d\xa4\xf5\x2d\x35\xdb\x44\x68\x8d\xf8\x69\x7a\x57\xfc\xc9\xdc\x91\xc7\xc4\xb5\x19\x99\x10\x43\x23\x13\x22\x69\x64\x5c\xad\x46\x13\x49\xa6\xc4\x78\xc6\xf4\x19\x70\x34\xd7\x63\x98\xd9\x05\x3d\x52\x98\xf1\x4c\xc9\xcc\x5f\x6e\xfe\x4a\xf3\xd7\x20\x8f\xef\x42\x89\xf7\x37\xe4\x4a\x9d\x5d\xcb\x82\x07\x43\x03\xe9\x4a\x6c\xff\x91\x3a\xea\xb8\x59\x96\x3a\x6f\xea\xdf\x33\x5b\x27\x4b\x37\x88\xee\xca\xfe\x2e\xd4\xb1\xc3\x97\x73\xd8\x6d\x52\xef\x1c\x2a\x84\x12\x53\x88\x18\x32\xd2\x18\x7c\x06\x5d\xc5\x74\x16\x82\xf2\xe6\xf1\x05\x5e\x65\x96\x57\x24\x8a\xf3\xfc\x82\x77\x99\x98\x83\x05\x15\x7b\x0a\x01\x5a\x5b\x2c\xfe\x4d\x76\x5c\xde\x09\x63\xcc\x3d\x34\x61\xa9\xb4\x34\x42\x44\x6d\x72\x33\x3a\x64\x8e\x50\xe8\x10\xad\xde\x8e\xb5\x49\xf7\x29\x9a\x74\xbb\x87\x68\xdb\xc9\xce\xf2\x0f\x1f\x65\x24\x20\xa7\x29\xac\xf4\x1c\x3a\x21\x3f\x52\x50\x8b\x23\xd4\xda\x60\xbc\xec\x6c\x11\x17\x71\xbc\xb9\x1e\xe3\xe4\xa7\x8c\xc8\xbe\xfb\x19\x1f\x38\x3e\x44\x8c\x64\xf8\x30\x23\xbb\x29\xa8\x47\x10\xea\x37\x42\xde\xa7\x56\x22\x70\x8c\xbf\x90\x33\x57\xac\xdb\xd7\x76\xa2\x2a\x5b\x25\xa5\x5b\xf2\x55\xb1\x1a\xc3\xf5\xdf\x05\xf0\x9a\xfd\x42\xc0\x14\x7a\x67\xce\x70\xfd\x77\x35\xbe\x61\x6f\x48\xd7\x94\x78\x96\xf5\x59\x42\xd5\x0f\x01\x9e\x92\x1a\xb5\xce\x7c\x1d\x18\x55\xe2\x23\x73\x50\x36\x64\x30\x5c\x57\xa2\x57\x29\x0f\x30\x23\x0f\x28\x62\xb0\x36\x23\x9f\xd4\x4c\xd9\x27\x2d\x12\x20\x81\xa8\x71\x4e\x7c\xed\xe9\xef\xa2\x8e\xb1\xf8\x1a\x22\xe6\xef\xcd\xd4\x7d\x29\x00\xdc\x12\xdf\xa9\x75\x5e\x9c\xae\x15\x42\xd2\xa6\x1d\x3b\xc3\xc7\x20\x1c\x8c\x15\x6b\x02\xb6\x34\x44\xf5\xd8\xc8\xeb\xa1\x3c\xd1\x5e\x6f\x3d\xaf\x87\xe2\xc7\x8a\x8a\xc3\x05\xa6\x53\x02\x6e\xc5\x46\xce\x7d\xaa\xf9\x28\x80\x78\x6d\x1e\x97\xd8\xca\xb2\xcd\x70\x75\x9b\x81\x1d\x3b\xeb\x6a\x7e\xeb\xab\x46\x63\x2a\xad\x5a\x02\x5c\x81\x21\x08\xe7\x69\x73\xa5\x32\xd2\xc9\x8a\x5a\xe8\xdc\xb2\x42\x02\xfb\x9e\x2a\x50\x8a\x75\xa6\x7c\x45\x82\xef\xa8\xbb\x51\xa9\xfb\xa1\xfd\x04\xdc\x92\xc3\x34\x8f\x14\x56\xc8\x3f\x1c\x24\x1d\x49\x1b\x3b\x8a\x71\x19\xd1\x41\x71\xbf\x01\x75\xf5\x1c\x2d\x04\xa4\x95\x70\x7b\xcb\x83\x7d\xd3\xd2\xf2\x7b\x63\xcb\xea\xd0\xbf\xd4\x5a\x66\xb9\x45\x47\x83\xe2\x27\x0f\x23\xcb\x17\x2c\x31\x76\x5b\xbc\x4e\x74\xa6\x80\x79\xc5\x23\x38\x1e\x1d\xaa\x33\xfc\x03\xb8\xfa\x73\x45\xa9\x7d\x48\xf0\xf1\x07\x05\x7c\x31\xd7\x02\xe5\x38\x00\x8d\xe1\xf1\xc0\xbf\x68\xf7\xce\x38\x21\x19\xfc\x64\x89\x62\xe7\x2e\x99\xf6\x2d\xac\x49\x34\x95\x1a\xb8\x13\xeb\x6a\xe2\x7a\x15\x86\xd0\x1b\xaa\x5f\xe5\x7b\xa9\x49\x68\x0d\xe2\x75\x17\xdf\x4b\xeb\x85\x9a\x00\x94\x97\x5f\xcb\x39\xad\xfa\xf4\xf1\x26\xf3\xe2\x8a\x97\xee\x05\x46\xe6\x2e\x8c\xa0\x5f\x94\x88\x82\x08\x25\xc8\x63\xb9\x5a\x90\x2f\x19\x51\x2f\xf1\x01\x07\x3b\xc4\x71\xc7\x8e\x62\xed\x91\x65\xef\x21\xe3\x2f\xd6\xb4\x77\x25\x6b\xbd\x0e\xea\x77\x47\x42\x7b\x51\x70\xbc\x08\xd5\xda\x04\xcf\x59\x4a\x68\x94\xf5\xd9\xf7\x5a\x88\x36\xdc\xba\x5b\xb5\x25\x61\x25\xcd\x35\x28\xda\x5a\x29\x28\x7c\xb0\x58\xee\x4c\x5f\xa4\xfa\x29\xfa\xd1\xc9\xbd\xaf\x65\x3e\xf9\x05\x82\x9b\x17\x23\x87\x4e\xe7\xc6\x9d\xbd\xb5\x07\x9d\x62\xbb\x62\xc5\x7e\x1e\x82\x5e\x3b\xa1\x17\xe7\x02\xe1\x82\x8e\xb0\x91\x96\xb0\x04\x02\xde\xc2\x67\x08\x85\x04\xc3\x66\x79\x8e\x57\x5e\x18\xeb\x9d\x0d\x6d\x4f\x4b\x81\x4a\x02\x40\x30\x55\x62\x5f\xa2\xc4\x3e\xf5\x2e\x01\x04\xe0\x79\x21\xfd\x6a\xd7\x71\xb5\x36\x01\xba\x07\xb9\xe8\xa9\xdf\x2e\x51\xfd\x31\x18\x0d\x6c\x24\x65\xc6\x30\x3d\x71\x6e\x89\x80\x10\x82\xb6\xdb\xb3\x78\x34\x23\x6f\x52\x98\x91\x14\x9e\x0c\x00\x23\x67\xdb\x33\xb2\xaf\xdf\xac\x6f\xea\x37\xf3\x92\xb9\xd4\xf3\xb8\xb2\x93\xbe\xab\x46\x9d\x60\x34\xfb\x22\xdd\x13\x0e\x2f\xd0\xdf\xac\x0c\x7b\xc9\x05\x26\x3f\xfd\x72\xc1\xf7\xaf\xbe\xd8\x8a\x1d\x46\xbe\xa0\x88\x6f\x31\x23\xf1\xe8\x63\x6a\xbf\x4a\xc1\xd5\x00\xb7\x34\xae\x40\x8f\x2b\x28\xa2\x69\x45\xed\xba\xa0\x3a\x67\x3a\xc1\x0f\xe4\x1c\x28\x6d\xe2\x2e\xaf\x4c\xac\x42\x11\x39\xbb\x02\x64\xe4\x1c\x08\xe0\x51\xeb\x85\xd7\x91\x2d\xfa\xec\x08\x2d\xe3\xfb\x2e\x05\xcf\x16\x18\xfd\x4b\xe8\xc0\x61\xe0\xbe\xb6\x85\x8e\xfe\x25\xfa\x4c\x8d\x36\x8b\x9c\x1f\x02\x58\xe4\x9c\x0a\xf0\xa2\xa6\x43\x7f\x4b\x58\x04\xc3\xc1\x10\x37\xf8\xd7\x2f\xfc\xf9\xe4\xb9\xd6\xcb\xe5\x52\x32\x46\xb0\xc2\x92\xcd\x0d\x8c\xf0\x6a\xea\x6d\x3e\xc6\x5f\x23\xb9\x36\xb4\x25\x5e\xf3\x65\x91\x8e\x00\x5b\xd1\xa6\x09\x3a\xea\x89\x72\xcd\x63\x05\x44\x51\xbb\x86\x53\x75\xfb\x04\x3f\x3f\x9a\x11\x2f\x02\xec\x13\x9d\x51\x0c\x28\x79\x51\x0e\x8b\x61\xb4\xd2\xe6\x91\xa3\xf3\x06\xd7\xb6\x1a\x4d\x91\xe4\xd1\x1d\xed\x43\x02\x26\x28\xec\x0b\x51\x45\x75\x6a\xe8\xcc\x44\x9a\xd2\xc9\x18\x24\xfd\xa3\x68\x10\x57\xa3\x4d\x69\xff\x06\x1e\x15\xd1\x47\x83\x68\x75\x94\xc7\x6a\x82\x0a\x53\x88\x17\x8d\x51\xab\xe8\xa3\xa3\xe6\x2b\xf1\x25\x71\x66\x24\x89\xc0\xd3\x19\x2a\xc3\x80\x24\xa5\xf5\x49\x3e\xab\x1b\x12\x44\xc0\x10\x10\xfa\xec\xa8\x47\x92\x35\x86\x31\xdf\x81\xf5\x3d\x9a\xa7\x9c\xe8\xbb\x90\x34\xc7\xfa\x42\xdc\xab\x6b\x79\x95\x3a\x53\x12\xaa\xa5\xd7\x01\x44\xd4\xa8\x5e\x14\x2e\x55\xcd\xeb\xca\xd4\x32\xbd\xc8\xcc\x46\xb0\x90\x60\x74\x2f\xfc\xb0\x96\xc2\x5e\x54\x82\xad\x70\x8c\x6a\x50\x6f\x5d\x2c\x72\x86\xe2\x99\x5e\xe6\x04\xb4\x13\x78\xab\xb9\x40\x87\x3b\x3f\xd9\x3b\x3c\x65\x14\xd8\x17\x3b\x06\x36\x54\xd2\x4f\x26\x29\xb8\xcc\xbc\x77\xb9\x7a\x25\x25\x05\x6f\xdb\x16\xe0\xc6\xfa\xfd\xbc\x9e\xaf\x41\x83\xe4\xb3\x22\x52\x5a\xc5\xaa\x14\xcf\x85\x28\xf9\x79\x7d\x96\xd6\x07\x3a\xaf\x1c\x1d\xe1\x8a\xf1\xbe\xb7\x0d\xbc\xef\x32\x0c\x9a\x24\x23\x6d\x4e\x47\x21\x56\x93\x53\xf5\x51\x45\xba\x5c\x3f\x46\x4e\x56\x44\xf0\xac\xde\xe2\xf9\xb3\xa6\x06\xec\x5d\xde\x60\xbd\xd2\x20\x20\x6e\x59\xe3\x8b\xea\x97\xab\xa7\x21\x1c\x12\x01\x83\x6a\x48\x94\xd6\x9a\xd8\xe7\x70\x50\x2c\x80\x6a\xa3\xcd\xa1\xe6\x14\x76\x85\xb6\xc4\xb8\x82\x2b\x0a\x69\xe4\x2c\x84\xae\xfb\x22\x55\x85\xbf\x13\x87\x2d\x35\x71\xd8\xd2\x54\x31\x0f\x07\x0d\xe1\x9e\xcc\x3f\xfb\x21\x0c\xa0\x21\x81\x49\x25\x7a\x78\x16\x36\x59\xe9\xee\x65\x24\x5e\xd3\xb9\xbb\x54\x4f\x69\x04\x7e\x40\x31\xa2\xf8\x16\x7e\x3b\x0f\x4f\x65\x62\x2a\x45\x51\x11\x53\xe9\x40\x50\xd8\x49\xc9\x54\x12\xeb\x4c\x24\xf1\x45\xc7\xcf\x04\x1a\x35\x74\x74\xbe\x70\x0c\xf4\xed\x47\x2b\xcd\xb7\x99\x6f\x8f\x13\x70\x3f\xdb\x31\xca\xc0\xd3\xc8\xc4\x29\xea\xde\xd9\x8c\x60\x84\x53\x14\x84\x15\x85\xd1\xe9\x6c\xed\x69\xa4\x65\xe9\x5a\x73\xd9\xe8\x3e\xac\x97\x3e\x62\x84\x39\xb8\x9f\x5c\x3d\xcb\x3e\x3b\x43\x71\x3b\x5b\xe6\xb8\x6e\x49\x1c\xc1\x51\x8a\xa2\x40\x8d\xf3\x92\xfd\xb4\xef\x7e\x7e\x11\x3b\x9c\x91\x6a\xbc\x3e\xc5\xce\xe9\x32\xb4\xf5\x24\x15\xba\x2a\xe1\x67\x6a\xcf\x88\x1f\x41\x06\xb2\x9f\xa2\xaa\x13\x7f\x2a\x2a\xe9\x1e\x56\xf8\x64\x93\x3f\x06\xcd\xc3\x71\x88\xee\x21\x0e\x11\x07\xcc\xfe\x4f\x0c\x95\x9d\x95\x43\x65\x67\x15\x7e\xbc\x6d\xa8\x9e\x13\x45\x7a\xa8\x24\xd3\x83\x0b\x52\xf0\xd0\xe4\x49\x7d\x15\xd7\xdc\x3e\x49\x51\x09\xd5\xf4\x5d\xb6\xf8\xdd\x6c\xf1\xbb\x1b\x76\xbd\x49\x37\x82\x28\xaa\xb4\xe9\x46\xb0\x9b\x96\xc3\xc1\xd6\x7a\xc1\x55\xcd\xda\x8a\x6f\x2e\x74\xc7\x4e\xf5\x37\x63\xcd\xbe\x9c\x56\xd8\xc8\xa5\x6a\xdd\x08\xa6\xb5\x5a\x4f\x16\x6b\x1d\x2f\x74\x76\x5c\x54\x7b\xba\x5c\xad\xe8\xac\xac\xf5\xcc\xd6\x7e\xaf\x4b\xfb\x7d\x48\x64\x7f\xaa\xc4\x36\xfc\x73\xd5\xaa\x3e\x3b\xc7\x35\xc0\xf9\xe3\xea\x94\x52\x02\xca\x09\x0b\x6c\xfe\x39\xc9\x57\x47\x6b\xe0\x85\x76\x25\xd3\x4b\xa9\x7f\x9b\x92\xb8\x28\x89\x22\x68\x50\xf1\x0b\x30\x1a\xd8\x65\xa8\xf2\x9c\xa4\xef\xa9\x3d\xe2\xe8\xc6\xeb\x1e\xda\xc2\x49\xfa\x4c\xab\xfa\x8b\x5b\xa4\xc4\x21\x5e\x15\x9e\x34\xd4\xa8\x09\x2b\x51\xc9\x53\xa7\xd7\xcb\xaf\x24\xcc\x01\x5e\xfc\x50\x71\x0f\xe1\xe1\x0a\x79\x53\xaa\x6d\xa0\x96\xb4\xde\x58\x5b\x7f\x1c\xad\x2b\xa2\x55\x5e\x4a\x55\x7f\x10\xad\x16\x9b\x91\x83\x14\xd0\xaf\x44\xf4\xd9\x66\x11\x48\xb8\x66\x43\xdd\xb5\x6f\x11\x61\x9d\x53\x60\x9b\xf6\x0c\x9f\x37\x29\xb8\xef\xec\x2f\x9c\x58\x5b\x49\x16\xf9\x9d\x38\x91\x9d\x34\x73\xc7\xa1\x44\xdd\xa5\xc2\xa9\x90\xe7\x16\xee\x84\x29\x96\xdf\x72\xd9\xc1\x40\xfc\x7d\x2b\xf7\xf1\xc9\xaa\x11\x8b\x73\xdb\xf4\x77\xb6\x97\x54\xec\x98\x73\x03\xf5\x8f\x19\xec\x22\xcf\x96\xa4\x3a\x68\x21\xbc\xc5\x13\x78\x6a\xcc\xb9\x5f\xa1\x39\x37\xbe\xa8\xf8\x1c\x3c\x37\x87\x7c\xe3\x81\xa3\x76\x45\xa1\x90\x7e\x77\xf4\x39\xb1\x4b\x6b\x7d\xb9\x56\xcd\x91\x6c\xad\x79\x95\x53\x5d\x19\x93\x92\x83\x8b\x53\xc2\xaa\x66\xc0\xda\x39\x81\x99\x71\xde\x92\x77\xae\xf1\x24\x71\xaf\x14\xcb\x74\x4c\x71\xe0\xdb\x69\x83\x57\x43\x43\xd2\x4b\x24\xa5\x65\xda\xcb\xbe\x37\x55\xcb\x7e\x43\x3e\xa7\x26\xe6\x86\x87\xb1\x57\x05\x82\x85\xb7\x63\xe3\x1c\x74\x36\xcc\x46\xaf\x41\x01\xb7\xe4\x4c\x2d\x19\xce\x5e\x8f\xf1\x03\xae\xe5\x92\xa9\xff\xc6\x03\x23\x68\x8f\x54\xb3\xcf\x89\x92\x6a\x1b\x46\x88\x6c\x81\x34\xe9\xf7\x15\x4f\xa8\xad\x15\x97\xc6\x3c\x23\xef\x53\xe4\xd3\x6b\x23\x15\x7d\xf7\xdd\x1c\x16\x2d\xd4\xcf\x5c\x5a\x31\x58\xd1\x9c\x73\x03\x78\x7a\xcb\xe0\xb9\xa7\xc1\x90\xc5\x08\x64\x7e\x98\x4e\x22\x76\xdb\x61\x41\xa0\x63\x17\x61\x2e\xef\x74\x25\x30\x42\x0b\x88\x17\x40\xea\x2d\x7b\x38\xec\x69\x20\x2d\xc1\xf3\x6b\x0e\x9e\xe8\x2b\xe3\xa3\x1e\xfc\x8a\x2e\x87\xc5\x28\x3b\x88\x57\x38\x84\xaa\x1a\x5b\xcb\x4e\x5a\x4b\x7d\xa8\x3d\x51\x0b\x3d\xd6\x48\xba\x38\x99\x6e\xb2\xe0\xc6\x37\x30\x98\x78\x46\xbe\xa4\x45\xc5\xfe\x74\xc9\x6d\x48\x75\x15\xe2\x9a\x6f\x42\xb2\xe0\x88\x37\x1c\xda\x59\xe9\x31\xa3\x97\x2a\xc1\x18\xe2\x4b\x2e\x2e\x78\x5f\xa3\x60\x61\x61\x7d\xbe\xeb\xf5\x49\x9a\xfc\xe7\x72\xf7\x85\x62\xbe\x68\xb3\x15\xad\x0e\x25\xb6\xfd\xfa\xdd\xeb\x93\xd7\x8b\xd1\xc4\x26\x51\xc5\xcd\x40\xdf\xdd\x19\x1f\x83\x59\xf4\xd7\x6e\xd2\xfa\xae\xd7\x72\x99\x76\x11\x81\x80\x49\xa4\xaf\x34\x6e\xff\x4a\xb7\x0d\xc1\xa7\x96\x6e\xe9\xce\x12\x82\x17\x75\xe8\xb2\xdd\xba\x02\x17\x3a\x94\x97\xfb\x41\x7b\x57\x5c\x47\x8d\x06\xc8\x2f\x99\xce\x8c\x90\xfb\xbc\x5e\xda\xfb\x82\xc4\x14\xd8\x8d\x1d\x83\x7b\xa2\x2f\x86\x5f\x46\xad\xe6\x87\xcb\x19\xc2\x59\x96\x5f\xd4\xd7\x20\xd2\x4d\x6a\x6a\x52\x2f\xa9\x6f\xf3\x17\x93\x72\x65\x4e\xe1\x38\x72\xce\xc8\x9f\x43\x58\x87\xc1\x37\x0a\xaf\xdb\x34\x02\x0d\xe9\xf3\x6b\xc0\x76\x63\xcf\xc8\xcb\xa8\x59\x5b\x7c\xce\xe1\x3a\x52\xd3\x3d\x8e\xe8\x1c\xe2\x9a\xbf\xed\xd2\x79\x3c\xb7\x8d\x33\x9d\x86\x6d\x77\x0f\xb1\xde\x8d\x3e\xe6\xb7\xe4\x16\xd9\x38\xb8\x5e\x9e\x7a\x4e\x22\xf6\x14\x71\x37\xdc\x79\xdf\xf5\xe8\xb2\x1f\xad\x66\x35\xbd\x45\xd2\x91\x37\xae\x7c\x4d\x53\xbe\x5b\x32\x8b\xd0\x4a\x60\x2a\xc9\x4b\x25\xcd\x78\xa3\x92\x66\xf1\x2a\xcd\xb2\xa8\xfd\x39\x29\x49\xde\x66\x71\xd8\x17\x4e\x6b\x88\x9c\x0a\x43\xc6\xc5\xd3\xce\x6d\x37\x0c\x92\xbe\x47\x51\x3d\x21\xdc\x5a\xa7\x80\x7e\x6e\x61\xee\xe7\xe6\x39\x6c\xc9\xcf\x2d\xa1\xa0\x4e\x89\xdd\x80\x04\xc2\x39\x46\xc4\x6c\x3e\xea\x55\x9f\x3c\x34\x68\x88\x56\x18\x5c\xb5\xef\xfe\x12\x0a\x1d\xde\x89\x42\x37\x97\x77\x0d\x5d\xa6\x97\x71\x6c\x95\x16\x56\xb0\xad\xbb\x6f\xa3\xe9\x89\x6e\x63\x57\xc9\x81\x2a\x63\x19\xe1\x86\x28\xe6\xd8\xef\x98\x69\xa2\x20\x15\x51\xe0\x39\x51\x40\x51\xbc\xaa\xdc\xcf\x1a\xfc\xe8\xd4\xe8\x86\x08\x7e\x67\x29\x48\xc8\xe0\x35\xab\xa0\xcf\x55\x2d\xca\x3d\x5a\xe4\x6d\x32\xed\xe9\xf6\x43\xac\x34\x29\xfa\x68\xc7\xc0\x4e\x6c\x09\xee\x99\xcd\xc1\xfd\xaa\x31\xc5\xa9\xb1\xf5\x48\x44\x78\x11\xc6\x2c\x5a\x95\x7a\x77\x62\x4c\x2b\xbe\xaf\x4a\xa2\x8c\xf6\x29\xdf\xd5\x67\x3e\x1a\x9f\xfd\x56\x15\xea\x1b\x5b\x80\xc9\xe7\x0b\xbb\x66\x1c\xcc\xf7\x05\x4f\xd3\x76\xfb\x14\xad\x82\x58\x6d\x91\x22\x38\xf3\x6f\x9b\xec\x51\x52\x2e\x65\x54\x33\x3b\xc9\x2d\x52\xfc\x30\x65\x6e\xf4\x4f\x25\x44\xbe\xd2\xe6\x38\x13\x8e\xd6\x1f\xdb\x82\x9c\x25\x64\x57\xc9\x28\x79\x32\x96\xdc\xf4\xe4\x40\xe8\xcc\x2d\x7a\x72\x65\xc6\x16\x97\x93\xef\x3a\x1d\xf2\xae\xb8\xc3\xf8\xe2\x27\x3b\xb0\x25\xb0\xf7\x76\x06\xec\xd0\xe6\xc0\x9e\xd9\x0c\x5c\xd4\x9f\x7d\xd1\x4b\x7a\x60\x96\xf4\x22\xf9\xc4\x45\x1a\x26\x71\xb9\xa8\x6e\x16\x46\xfe\x36\x5a\xf7\x2c\xbc\x3a\x4d\xb9\xa8\xbc\x12\x2c\xf6\x2e\xab\x69\x69\xa6\xe1\x62\x3f\xd3\x4a\xc7\x2e\xc7\xb9\x6a\x1b\x97\xad\x28\xb7\xbe\x91\xa1\x9a\xe2\x41\x6e\x5e\x13\x07\xe1\x85\x05\xa7\x62\xa1\xfd\x5e\x1c\x24\xb8\x28\xba\x52\x94\xa5\x18\x11\xee\x4a\xf5\xf9\x43\xf7\x09\x7b\xd1\x3d\xfc\x71\x72\xee\x40\xbb\xa0\xbf\x0f\x20\x86\xad\x68\x91\x2e\x0b\x45\x97\xb5\x35\xf8\x61\xb4\xea\x0a\x4c\x0b\x00\x7b\x11\x14\x0c\xf6\x15\x83\xaf\x8a\xc1\xce\x11\x84\xfb\xd1\xfe\xe9\x62\xfe\x8f\xb9\x41\x7b\x14\xce\xa3\xf6\xe8\x7c\x4f\x75\x04\xa3\xcd\xe5\xab\xca\x27\x06\x5b\x4c\xc9\x67\x17\xae\xd5\x21\x77\x37\x0c\x6b\xcf\x41\x52\x1d\x54\xaa\x8e\x15\x36\x90\x29\xf3\x32\xdb\x03\xef\x93\x7d\xca\x09\x92\x27\x56\xf2\x76\xc8\x70\x43\xe2\x4c\xc9\xeb\x08\x5e\x22\xea\x38\x37\xbd\x2a\xcc\xa0\xc9\x46\xdd\xdf\xd4\x74\xf7\x51\x75\x07\xde\xb9\xbe\x06\xd2\x48\xef\x80\x03\x5b\xb6\x49\xcd\x1b\x7e\xb2\xb7\x34\x73\x58\x48\x00\x5f\xb5\x8f\x6a\xee\xa2\x5a\x53\x20\xa0\x27\xb3\x73\x4b\xae\x22\x30\x01\xc5\x93\xb9\x1a\x62\x57\x0f\x2d\x5c\x1e\xda\x27\xfb\x03\x27\x8a\x3c\x75\xed\xb0\x1c\xd5\xa7\xea\xa8\x06\x76\xd8\x80\x40\xbd\x4f\xf6\x2b\x4e\x42\x9a\x0f\x0f\x77\xf3\x18\x73\xd4\x86\x5a\x8e\xdb\x52\xc5\x8b\xe2\x65\xd0\xdc\xd1\x77\x4e\x02\xba\x38\xcf\x5d\x4e\x7e\xa8\xb7\x8d\x2e\xee\x79\xbb\xd7\x8b\xcc\xfb\x66\xb3\x13\x7c\x5c\xb2\x61\xc3\x05\x71\xef\x30\x21\xd5\xb2\x16\xaf\x7b\x59\x89\x1c\xb1\x6c\x7c\x5c\x0e\xe6\x2a\x69\xa4\x9f\x33\x72\x68\x44\x8e\x82\x70\x06\x1a\x26\x35\xc7\xe1\x6e\xa8\xcf\x64\x39\x4c\x06\x4d\x30\x89\xd7\x73\x05\x14\x6a\x32\xe8\x6a\x10\xd4\x9d\x78\xe7\xba\x13\x03\x82\xee\xf2\x3e\x9f\xdb\x6e\x33\xd0\x3d\xb6\x5d\x75\xac\xf3\xe1\x78\x9f\x5b\x3a\xf8\x6c\x3a\x60\x0b\x9c\x5a\xaa\x81\xcd\xb4\x35\x60\x96\x2e\xb7\xee\xda\x69\x33\x74\xad\xdb\xa9\x73\x43\x2e\x8b\x2e\xbe\xe6\x73\x68\xe8\xe4\xab\xe9\xa4\x18\x42\x1d\x24\xdc\xe7\x4b\x8c\x4e\xdd\x3b\xdf\x9b\x2d\x55\x79\x5e\xaf\x12\xd5\xab\xac\xd7\x79\x58\x2f\xac\x54\x69\x91\xd2\x92\x45\xd6\xed\x24\x5a\x9d\x3b\x71\x66\x73\xf8\x61\x4b\xc8\x74\x42\xac\x0c\x5e\x6a\x8a\xf3\x39\x6a\xf6\xcf\x73\x1f\x3e\x24\x96\xa5\x79\x2f\xad\x0d\x74\xf3\x6c\x06\x7d\x06\x9f\x23\x54\x8f\x50\x7b\x67\x0e\x47\xad\x28\xf9\x46\xe6\x37\x87\x23\x1d\xea\xd6\xce\xd3\x21\x98\x40\xc4\x3b\x8b\x1f\x97\xe2\xb6\xd2\xd4\xe7\x0d\xa9\xbf\xe8\xdc\x63\x86\x6d\x28\xf2\x2c\xcd\xe7\xf0\xbe\x0d\x85\xeb\xa8\xaf\x8e\x65\x12\xd3\x49\x35\x2f\xa9\x03\x03\x3e\x90\x79\x54\xc0\xd8\x51\x8f\x0c\xa4\xb3\x13\xe1\x15\x73\x7e\x37\x57\x49\x73\x28\x5e\x48\xf4\x5f\x8f\x55\x9d\x78\xe1\xf2\x4d\x1b\x5f\xe8\x68\xbf\x70\x14\xe9\xf0\x28\xd5\x0c\x02\x14\xde\xb5\xae\x51\x4b\x7c\xf6\x07\x71\xe9\x40\x6f\x32\x49\x2d\xe6\x08\x10\xfd\x2c\x8f\xd0\x48\xcb\x35\xc3\xab\x8d\x17\x79\xcc\x32\xde\x67\x0f\x1f\x2e\x46\x71\x2b\xea\xc4\x8e\x9c\xcf\x49\x4c\xa6\xe4\x24\x82\x9d\x25\x07\xc9\x3d\x4d\xa3\xcb\x89\xea\x1d\x7f\xf8\x50\x47\x29\xeb\xb3\x51\xdc\x77\xed\x98\xce\x95\x2c\x29\x29\xa8\x75\x20\x98\x33\xfb\x88\xf6\xbb\xa3\x28\xb1\x31\xf0\xc0\xfb\x08\xa2\xc4\x84\xde\x7d\x68\x69\x54\xa9\x76\xff\x31\x64\x52\xcb\xd6\x67\xd1\x8a\x74\x0f\x39\xdb\x74\x61\x4b\x38\x57\x80\x7b\x63\x73\x38\xb2\x3d\x38\xb6\x19\x9c\xd8\x19\xec\x6a\x20\xde\x8e\xfe\x42\x1e\x34\x49\x8a\x38\xbd\x22\x30\x4a\x66\xcc\xe5\xbf\x42\xcf\x71\x12\x81\xe8\xbf\x04\xd1\xcf\x40\xf4\x7f\x80\xe8\xcf\x20\xbf\x47\xc2\xb4\x84\xd1\x42\xa6\xf6\x22\x4f\x7b\x7b\xfe\xf1\xb8\xff\x12\xb7\x32\x43\x03\xfa\x1f\x68\x3f\x3f\x7b\x51\x95\x99\xdf\xe2\x82\x7f\xa6\xe0\xe9\x3d\x2a\xb9\x49\x3d\xdc\xe6\xd4\x8e\x33\xb2\x1d\xa1\x27\xcd\x5e\x02\x3b\x19\xec\x86\x44\x5f\xbc\x31\x98\x84\xc4\x0a\x58\x94\x72\x4b\xeb\xd4\xe1\x34\x5a\x11\x3d\xa3\x31\xbd\x7c\xf1\x8d\x77\x89\xa2\x27\x78\x1c\x57\xde\x24\xcf\xc8\xa9\x1a\x0d\x91\x8e\x68\xbc\xbe\xfc\x51\x89\xea\x89\x93\x54\xcb\xa2\x16\x45\x40\xdc\x9f\x01\xc1\x6b\xd3\xcf\x94\xa0\x65\x0f\xfd\x86\x16\x26\xb9\x23\xf9\x6e\x65\xfe\xde\x8a\x44\x73\x8d\x2b\xad\x1d\x15\x3e\x1b\x94\xa0\x31\x00\x66\xd6\x2b\xa9\xc2\x05\x89\xc1\xa3\xa3\x7c\x64\x45\x22\x97\x62\x1b\xbe\x29\xec\x57\x44\xed\x9c\xc3\xc1\x8a\xfd\x68\x4c\x9f\xe2\x1a\xbc\x9a\xa7\x42\x44\x59\x75\xae\xc4\xbb\x19\xd9\x82\x33\xf5\xcd\xef\xe2\x8e\x7f\x76\x05\xc9\x03\xaa\x53\x38\x28\x22\x33\x53\xfc\xa1\x03\x30\xe3\x25\xcb\x76\x04\x4b\x21\x98\x73\x20\x99\x60\x22\x50\xaa\xdb\x14\x81\x9c\xa9\x02\xb1\x4a\x38\x69\xf3\xbb\x8c\x1d\x6d\x5e\x18\x7f\x01\x23\xe6\xa9\xf1\x28\x89\x2d\x8c\x2f\xd4\x88\x6a\x09\x06\x93\x7f\x76\x9f\xf4\xb9\xd2\x78\x18\xb3\xae\x3a\x09\xf1\x0c\x66\x2f\x50\xf5\x8e\x46\xd5\x75\xd8\x32\x11\x5b\x05\x85\x58\xef\x20\x91\x08\x53\xe5\x16\x2a\x69\x70\xa6\xc4\x99\xe3\x93\xa3\xbd\xf7\x6f\x2c\xb8\x91\x28\xa2\x23\x18\xef\x96\x8b\xa1\x66\x86\xef\xae\xf0\x2d\xf7\x43\x7d\x6d\xbd\x2d\x16\x53\xa6\xc6\xa5\xb1\x68\x0c\x99\x23\x57\x9e\x29\x01\xe8\x34\x94\xe5\xa7\x2a\x5c\xe1\xb7\x9f\xdf\x94\x09\x0a\xee\x95\x7d\xab\xf6\xf1\x4b\x40\xa6\x01\x85\x71\x40\x76\xa8\x49\xef\x18\xb6\x0c\xdc\x80\x4d\xcc\x67\x16\x85\x5b\xb2\x2d\x20\xac\x82\x8e\x7a\xce\x7d\x2a\x34\x54\x20\x3c\x36\x0c\xda\x25\x67\x0a\xb8\xaf\x25\x3c\x18\xaa\x7f\xf3\xff\x63\x4e\x26\xf3\x95\xca\x97\xab\xe0\xa9\x97\x7a\x79\x74\x57\x78\xb0\xf1\xa5\x16\x0c\x29\xfc\x88\x1c\x22\x13\x07\x15\x5e\x5b\xf0\x91\xc3\x81\x9a\xc4\x16\x6c\x25\x68\x1e\xa1\x1e\xaf\x12\x38\x30\x8f\xaf\x38\x84\xfa\xe9\x03\x57\xbd\xa9\xa7\xef\x1c\x7e\x98\xe2\x53\x0e\xa7\xe6\x51\x47\x31\x6c\x98\x96\xe6\x55\xe6\xf4\x1b\x6d\xb2\x8d\x40\xec\xd7\xc4\x32\xa1\xf9\x8b\x44\x89\x72\x37\x72\x5e\x09\x38\x88\x4c\xa4\xc4\x4f\xda\x40\x61\x63\x0e\x5f\xf5\xd3\xfa\x1c\xbe\xeb\xa7\x27\x73\x78\x15\xdd\x27\x1b\xd4\x56\x8b\x1d\xf2\x18\xb8\xb3\xc7\x74\x14\xd9\x19\x79\x15\x69\x3b\xa4\x30\xd1\x49\x70\xad\x89\x48\xfc\x0c\x1b\x59\x90\x24\x18\x4b\x4d\x49\xa6\x23\x45\xb5\x6d\x45\xe1\xbd\xa6\x56\x17\xb5\xb8\xfe\x79\xcb\xc4\xc9\x63\x1f\x39\x8e\x6e\x97\xff\xae\xb6\x0e\x6a\xa1\x9a\x6e\x74\xeb\xd1\xd0\x1e\x40\xd8\x10\x97\x89\x57\xac\x2b\x6a\x1e\xd0\x4b\xa9\x2d\x3a\xb7\x89\x89\xc3\xa4\x3f\xbf\xf0\x61\xbf\xe6\x11\x63\x3e\x0c\x99\xf3\x96\x93\x7c\xce\x97\x52\x4e\xec\x47\x8f\xa2\xc4\x63\xd1\x65\x92\x4a\xfb\xf9\xe0\xf9\xc6\x23\xab\xaa\x71\x88\xe0\x50\xbb\x72\x8f\x9d\x9f\xfa\xa6\x6d\x07\xef\xcd\x64\xa9\x8d\x76\x13\x70\x4f\xec\x61\x05\x7e\xba\xcb\xb8\x6d\xba\xfc\xca\x5f\x7e\x15\x2d\xbf\x4a\x97\x5f\xb9\xcb\xaf\x82\xe5\x57\xe1\xf2\xab\x06\x9c\xdb\x40\x2e\xd9\xf2\xab\x6c\xf9\x55\x43\x42\xd7\x06\x56\x4b\xac\xe0\xbe\x94\xb8\xe9\x83\xbb\x69\x87\xe0\x3e\xb5\x03\x70\x9f\xdb\x1e\x78\xa1\xcd\xc1\x8b\xec\x0c\xbc\xc4\x96\xe0\x65\x76\x0a\xde\xcc\x66\x78\x57\x09\xde\x1b\x3b\x01\xef\x93\x1d\x81\xf7\xd9\x8e\xc1\xfb\x6a\x4f\xc1\x3b\xb7\xc7\xe0\x75\xed\x2e\xb8\x1f\x6d\x77\x5e\xff\xdf\xe2\x65\x69\xbe\x73\x0f\x86\xe0\xee\xe3\x8d\x1c\x99\x91\xcb\x04\xe3\xf5\xa9\xc7\xae\x7a\xf4\x28\x25\x9c\x92\x90\x92\x8b\x84\x6a\xa6\x96\x64\x94\x30\x4a\xbc\xa4\xfc\x2f\xa6\xc4\xa3\x44\x52\xf2\xd3\x9b\xda\xc9\x5c\xb3\xb0\xaf\x84\xf3\x36\x56\x20\xf2\xb1\xf5\x24\x97\x62\x82\x58\x23\x96\x6d\xad\x5d\x15\x09\x7d\xe0\xc3\x2a\x9b\x58\x23\x3e\x49\xfb\x88\x48\x38\x22\xa5\xe8\xb4\x1f\xb5\x33\x5d\x9e\x22\x3d\x47\xae\x89\x3c\xf4\x26\x72\xf6\x23\x62\x79\x11\x4b\xd3\xf7\x6c\xcc\x2d\x0a\x5f\xa2\xdc\xf1\xca\xbd\x52\x22\x5c\xec\x3b\x6f\x62\x62\xf9\xe1\xd4\xa2\x20\xf4\x8f\x74\xc2\x62\x8b\x82\xf4\x9d\xfd\x18\xb8\xef\xcc\x48\xec\xc3\x0e\x20\x1a\x16\xe6\x49\xfa\xc4\x7a\x97\x30\x3f\x8c\x2f\xfa\xfd\xbe\x45\xbf\xe9\x90\x35\x99\xef\xc4\x02\x98\xdf\x12\xf9\x25\x39\x9d\x4c\xb8\xd8\x62\x29\x27\x74\x0e\x9e\xff\x17\xee\xbd\xf4\xa5\x57\xae\x21\x58\xbc\x28\xe0\x7e\x1d\x57\xbc\x67\xc5\x4d\x57\xa2\x27\xe5\x66\x52\x26\x6a\x5a\xa1\x7f\xcf\x60\x49\x81\x6e\x18\x5a\x14\x5c\xdf\xc9\x04\xa4\x7e\xfb\xc2\xbb\x3e\xc4\x60\x7a\x10\x68\x53\x16\xf9\x0e\x13\xe0\xfb\x0d\x8a\xd0\x78\x64\x05\xac\x37\x0e\xe3\x2c\xb5\x6c\xf5\x38\x89\xb2\xd4\xaa\xe4\xcd\xf4\xd5\x12\x9f\x21\x26\x7a\x13\x11\xcb\x95\x71\xc7\x95\x71\x2f\xc9\x64\x14\xc6\xbc\x17\xc6\x41\xd2\x71\x13\xe1\x73\xd1\x1b\x74\xc6\xa2\x37\xec\x8c\xdd\xde\x10\x49\x7c\xe4\x83\x35\x66\xe2\x22\x8c\x7b\x11\x0f\xa4\x05\x56\x6f\x43\xf0\xb1\xda\x21\xbd\x83\x01\x76\xae\xba\x0d\x18\xaa\xc3\xbf\x51\xd8\x31\x66\x7b\x53\x1f\x01\x46\x86\x32\x52\xc0\xd2\xd5\x2b\x90\x45\x16\x85\xb1\x7e\x66\x16\x85\x4b\x5f\xdb\xf4\xb5\x2e\xc6\x31\xcf\x83\xce\xb5\x56\xf9\x1e\x98\x2a\xe4\x8c\xfc\x99\x63\x66\x0b\xf0\x29\x55\x8f\xdf\x14\xa7\x70\xe1\xaf\x4c\x86\x13\x57\x93\x34\x98\x40\x1e\xda\x1f\x19\x8c\x75\x88\x73\xe9\x93\x0c\xf3\x78\x8c\xf4\x81\x62\xfd\xee\x88\xa0\x6f\x46\x91\x46\x2d\x76\x4c\x4c\xff\x89\x24\x18\x4e\xd3\x93\xd0\x95\x44\xae\x59\x1d\x8b\x02\xaa\x90\x49\x4b\x1d\xac\x21\x9a\x6a\x08\x8a\x19\x3d\xaa\x5f\xad\x7d\xad\x2b\x49\x3e\x10\x9d\x0d\x2a\xc3\x8f\x55\x7a\xd2\x1f\x58\x33\xbd\x2d\xbc\xd7\xef\x30\x5a\x7a\xe3\x59\x3b\x23\x79\x9c\xa5\x7e\x3a\x89\x42\x49\x1e\xfd\x23\x5d\x7b\x74\x81\x89\x62\xcd\x1e\x33\x71\xc1\xa5\x62\x03\xf5\xc6\x4a\xdf\xa2\x70\x63\x9e\x2f\x2d\x0a\xd7\xe6\x59\xf1\x85\x2f\xfd\xf6\x6b\xf0\x18\xdd\x2f\xfa\x6c\x40\xe9\xa8\x02\xba\xd7\xf2\x3e\xb0\x9b\x8b\x10\x8d\xc0\x99\x9f\x11\x05\xe1\xeb\x96\x86\x53\x50\x88\x07\x6f\x20\x14\xc8\xda\xf9\x07\xef\xfc\x92\x45\xe1\x8c\x91\x62\xa4\x6d\xdf\x53\x07\xb1\xfd\x73\x73\x0a\xc7\x7a\x55\x22\x85\x15\x5e\x2f\xac\x0a\x87\x42\xa0\x69\x4f\x8a\x20\xaa\x49\x11\x14\xd7\x3f\x3a\xcc\x73\xdd\x62\xb6\x33\xfb\x10\xe3\x68\x18\x81\xb3\x92\x06\x05\x75\x2a\x02\x0e\xc9\x0e\x68\x2e\x9f\xc2\xd5\xca\xc3\x51\x4f\xbf\x18\x17\x5e\x49\xfe\x0b\xc5\x42\x5d\xf9\xda\x3b\xa0\xcf\x69\x9e\x26\x6a\xcb\xec\x38\x73\xf1\xf4\xef\xf9\x8e\x60\x70\xe8\xb7\xb9\xc1\x41\xec\x1c\x11\xc2\x9d\x19\x79\xed\x9b\x00\xa6\x12\x3e\x4a\x52\xc9\xf1\x42\xab\xb1\xc9\x5b\x71\xc1\xab\x02\x17\xc4\x14\x03\x94\x07\x98\xaa\xe2\x52\x89\x20\xe8\x80\xec\x56\x58\xb4\x63\x5f\xef\x9c\xc2\x73\x93\x24\x0d\x35\x83\x8b\x32\x43\xe8\x59\x1a\xe6\x58\x14\x5e\xc4\xbd\x50\xf2\x71\xda\x43\x87\xef\x4e\x14\xa6\xb2\xa7\xa3\xd8\xab\xd7\x25\x00\x4e\x14\x02\x75\x7b\x9b\x25\x08\xc6\x05\x48\xcc\x7a\xc3\x01\x96\xae\x77\xfc\x5e\x10\xf1\x9b\xce\x52\xc7\x79\xb3\xef\x4a\xa4\x84\xc1\x1f\x5f\xd1\x9e\xf0\xad\x3a\x09\x99\xdf\x28\x2b\x64\xe4\x67\xd7\x7e\x86\x79\xc8\x50\x58\x7a\xe9\xeb\xb8\x27\xb6\x82\x34\x8b\x02\xc1\x60\x99\x4f\xd0\x60\x11\xdf\xd8\x3f\x98\xb6\xe1\xff\x84\xdf\xd8\xc9\xd4\xda\x7c\xec\xbb\x7e\xa5\xca\x8c\x8c\xef\x77\x0c\x3a\x92\xdf\x48\x7c\xd5\x7e\xe6\xf8\x2d\xef\xa5\x11\x4b\x2f\x1b\x0e\x42\xa1\x01\x50\xc4\x7e\x51\x73\x58\xdc\x65\x8c\xb4\xa8\xe7\x58\x6b\x07\x21\xf9\x94\x2d\x9b\xb0\x97\xde\xc2\x21\xaa\x94\x07\x26\x8b\x55\xa0\x38\x23\xbd\xed\x95\xb4\x79\xf7\x3e\xdf\xdb\x0c\x63\xaa\x36\x4f\x2a\x0a\xe3\xeb\xa5\xf9\xbc\x0b\xe3\x6b\x7d\xb0\x09\xa6\xa1\x82\x0b\x22\xd0\x7b\x2c\xc7\x66\x5b\x45\x2f\x78\x28\x3a\x05\x44\x0c\xb1\x1b\x9c\xd9\x29\xd3\xf0\xca\xde\xd2\x72\x17\x0d\xcb\x84\x15\x76\xd1\x23\x43\xa3\x8f\x73\xff\x0e\x57\x15\xd9\x77\x47\xed\x36\xff\xda\xe2\xb6\x5c\x9c\xb8\xba\x38\x1a\xbe\xab\xec\x40\xbe\x18\x62\x61\x0d\x11\x04\xc6\x19\x6e\xa3\x61\x18\xb2\x94\x8b\x5e\xca\x23\xee\x29\x86\x21\x8c\x43\x19\xb2\xa8\x28\xed\x8d\x93\x1f\xbd\x3b\xaa\xcc\xb8\x7b\x1d\xca\x3b\x6a\x99\xcd\xf2\x92\x48\xc9\x68\xd6\x7f\x3d\x76\xbd\x81\x5f\x60\x7f\xe9\x13\xb1\xf6\x9b\x63\xfd\xb6\x16\xaf\xfd\x66\xfd\x86\x1b\x72\x17\x7e\xd7\x68\xfd\x1d\x23\x87\x44\xab\x73\x61\xea\x13\x6b\x07\x01\xb0\xe3\xde\x76\xe4\x65\x98\x76\x22\xe6\xf2\xa8\xf2\x15\x6b\x2d\xe7\x57\xe7\x20\xa9\xdd\xb0\x44\xea\x33\x29\xf7\x92\xd8\x67\xe2\x76\x79\x45\x55\x1f\xef\x13\xd9\xc1\x05\x37\xa7\xe1\x9b\x92\xae\xd9\xaf\x5f\x18\x75\x39\x43\xaf\x2b\x67\x35\x12\x78\x5e\x22\x01\xdf\xd7\x3e\x50\x90\x39\x98\x00\x0d\x32\x67\x88\x51\xef\xaf\x14\xa9\x5f\xb3\x3a\x78\xb2\x2c\x7b\xe1\x67\x6a\x41\xe6\xd4\x06\x3f\x8e\xf4\x50\x07\xc5\x9a\xcf\x2e\x43\xc9\x7b\xe9\x84\x79\xdc\x02\x2b\x4e\x66\x82\x4d\x2a\xf3\xc8\xf4\xd8\x17\x40\x6a\xa7\x8e\x08\xc7\x6e\x6f\xc3\x00\xbc\x27\x41\xc0\x11\xf1\x30\x61\x1c\xb0\xd1\x8c\x74\x8b\x6a\x25\x92\x35\x43\xc8\x8f\xc8\x8c\x1c\xfa\x98\x8d\x1e\x19\x1c\x7d\x44\xf4\x79\x38\xf1\xdb\x35\xef\x98\xf2\x55\x1d\x91\x34\x81\x54\x9b\x42\xfa\x55\x9b\x19\x7d\x72\xb4\xb3\x51\x6c\x32\x55\xc5\xd5\x1b\x8f\xea\x14\x26\x51\xef\xb1\x19\xd0\x91\x6c\x23\x45\x01\x39\xf7\xf1\x40\x06\x78\x57\x72\x89\x17\x16\x7b\x98\x87\x7e\x8e\xfa\x84\xaa\xf5\xfb\x1e\xc4\x0e\x62\x2c\xa6\x6a\xbe\x58\xec\x62\x46\x4e\x7c\x8c\xee\x84\x13\x18\xa8\x0e\x4a\xc9\xc4\x7a\x9f\xe8\x1d\xd4\x00\x94\x76\x82\x24\x8b\x7d\xb4\xc8\x66\xe2\x0e\xd9\xee\x63\x60\x64\xbb\x23\x25\x5e\x10\xcb\xbb\xe4\xde\x35\x9e\xe4\x1d\x23\xae\xc4\x93\x4c\xb1\x75\xef\x0d\xa3\xa2\x61\x1f\xde\xf9\xa5\x59\xa6\x61\xfd\xa0\x68\xfc\x8d\xa2\x9e\xe7\xcc\x70\x86\xb7\x13\x45\xfe\xb7\xfd\x76\xe1\x34\x27\xc3\x6a\x65\x63\x36\x45\x8a\x58\xa2\x9b\xf7\x25\xe0\x48\x05\x8e\x0a\x26\xb5\x06\xba\xe7\x25\xb1\x14\x49\x54\xfc\x54\x03\x70\x93\x9b\xb2\xed\x8e\xe6\x1f\x7d\x33\x33\x2c\x43\x9a\xbe\xd8\x41\x2f\x9f\xe6\x91\x5f\xa6\x39\xa4\x14\x5e\x31\x7d\xeb\x21\x21\xce\xa8\xc1\xf4\xd5\xf3\xb1\xd4\x8b\x1f\x7a\xa8\x31\xba\xbb\xae\xcf\x53\x4f\x84\x13\xe4\x37\xca\xf3\x13\x1b\x4c\xa2\xc1\xf9\xad\xbf\xda\x81\xb1\x7d\xd5\x4c\xc6\x9f\xea\xf7\x55\x15\x24\x5b\x15\x26\x99\x97\x2c\xaf\x41\xa9\xcc\xbb\x56\x50\x14\xfb\x16\x58\x52\xb0\x38\x9d\x30\x81\xfa\x5b\x73\xfe\x83\x24\xd6\xa8\xf8\x92\x8b\xb0\x7c\xed\x65\x22\x45\x24\x3c\x49\xc2\x58\x2b\x7f\x75\x81\xc1\xae\x88\x2b\x62\x6e\x16\x3f\x1f\x8a\x46\xb7\x78\xfd\x83\x83\xd1\xb3\x3e\xf5\xef\x19\xd8\xf7\x87\x86\xd9\xc2\x76\x8b\xc2\xae\xef\xfc\xc6\xe3\xa9\x53\xd5\x56\xfe\x06\x07\x1a\x10\x43\x55\xe3\x93\xef\x3c\x83\xaf\xbe\x33\xdc\x80\xef\x0a\x86\x0f\xa5\xe6\x26\x6f\x24\x4c\x25\xfa\x8b\xc3\xab\x7b\xc8\xe5\xc3\x42\x2e\xff\xd8\x74\x12\x74\x98\x25\x63\xa8\xfc\x41\xd5\x08\x12\xb0\xae\xf9\xed\x56\xe2\x73\x0b\x30\xd4\x3a\x9e\x4e\xe3\xa7\x97\x16\x4e\x75\x7e\x50\x75\xac\x63\x41\xe9\xfa\xb6\xef\x17\xae\x6f\x4c\xe8\x4c\xca\x6f\x8c\x82\x65\xcc\x22\x75\x28\xbf\xe8\x79\xea\x8f\x53\x88\xa7\xad\xe1\xb6\xbc\x2f\x5a\x58\xbd\xd2\x81\x65\x42\x4c\x16\xca\xf6\xc1\x73\xf6\x15\x0e\x87\xc4\x39\x63\xfa\x20\x48\x13\x4a\x38\xf4\x31\xaa\x94\xa7\xd0\x74\xe8\x7c\xf7\x15\x55\x09\x9c\x0b\xe2\xe1\x92\x99\x9c\x1f\xfb\x0c\x90\xfd\x37\x39\x56\x2c\xcb\xf6\xfa\xdd\x91\x75\xc9\xd2\x9e\xcf\xe2\x0b\x2e\x2c\x1b\x7f\xa4\x99\xe7\xf1\xb4\xaa\xa2\x28\x31\xab\x48\x66\x9d\x38\xe9\x5d\x64\x52\x72\x91\xb6\xb0\xc8\x47\x26\x37\x89\xa7\xbe\x57\xa3\x2e\x5e\x12\x75\xac\x35\x51\x28\x2f\xc2\xb8\x37\x0b\x7d\x79\x69\x81\x1c\x59\x1b\x83\xc1\xe4\xc6\xb2\xad\x75\xfc\xdb\xc0\xa4\x37\x7e\x5e\x9d\x59\x1e\xcb\x5e\x2a\x05\x97\xde\x65\x53\x3b\xf5\x55\x44\x22\x3d\x73\x57\xb6\x88\x81\x0e\xfc\xe6\x1c\x9b\x78\x1c\x82\x44\x14\x78\x01\xb7\x11\xe5\x71\x8f\xd4\x3c\x60\xd5\x99\xfe\xea\x57\x7c\x85\x1b\xb7\x27\x4f\x26\xf6\xc9\xa7\x26\xe7\xd8\x03\xc7\xc9\xf2\x46\x1f\x99\xaa\xf7\x22\x76\x0e\x09\x83\xb0\xe2\x7b\x67\x4c\x33\xba\x23\xe9\xda\x33\x72\xea\xc3\x03\x64\xa3\xfb\xac\xa0\x34\xd2\x9d\x53\x90\x1e\x69\x49\xef\xfb\xc9\xa7\x23\xd5\xfb\x90\xda\x58\x33\xf6\xc8\x07\xd6\x80\x06\x2b\x8b\xd4\x73\x65\x5c\x2e\xd4\x32\x67\x36\x11\xe1\x98\x89\x5b\x4b\x9d\x74\x12\x50\x48\x1a\x58\x2e\xc5\xd5\xc9\x51\x7d\x27\xbc\x24\xea\xb1\x4c\x26\x9d\xda\xd7\x14\xf1\x58\x6f\xda\xbe\xc6\xad\x1b\xdf\xc5\x2b\x6e\x33\xf2\x86\x19\xec\xd5\x28\x26\xb8\x3c\x8a\xb4\xf0\xd3\x4b\x96\xc4\x05\xe3\xbf\x52\x99\x49\xc9\xcb\xc0\x8c\xbc\x29\xfb\x52\xf0\xa1\xd8\xec\x05\x5e\xbb\x5c\x8c\x2d\xa4\x2c\x1d\x03\x54\xd0\xe1\xfd\x8b\xbe\xd5\xcc\xec\x22\x01\x40\x76\x74\x6c\x60\xdb\x65\x29\x47\x0c\x8d\xb8\xf8\x03\x23\xbb\x3e\x2d\xfb\xde\xf5\xcb\xd1\x99\xfc\x12\x62\x7a\x4f\x85\xa7\x9c\x36\xe8\x2a\x73\x05\x0f\xa5\xa3\x22\x40\x9c\xb5\xa4\xab\x54\x43\xf6\x45\x32\xf1\x93\x99\x3e\xfc\xa8\x59\x04\x44\x4a\x62\x8a\xde\xfe\x95\x41\x0a\x43\x38\xf8\x74\x15\x8b\x51\xb2\x11\xc1\x93\x8e\x1f\xba\x9d\xb1\xbb\xde\x19\x8b\x46\x61\xdc\xe3\x9a\x88\xad\x64\x23\x8e\xd4\x97\x15\x8b\x20\x1b\xc0\x7c\x52\x01\x34\xb5\x47\x46\x35\x6a\x46\x9a\x4d\xef\x49\xe2\xd8\xd4\x79\x06\xde\xd4\xd9\x1c\x40\x32\x55\x44\x2b\x9c\x3a\x1b\xcf\x20\x98\xde\x33\xab\x77\x6e\xf2\x53\xa4\xf5\xbe\xd0\x89\x37\x0b\x2c\xa2\xe4\x7f\x5a\x4d\xee\xed\x4e\x97\x92\x7b\xe7\x54\x83\x7d\xc5\x1b\x75\xf7\x1a\x95\xa2\x6e\x84\x5a\x51\x4f\x6a\xab\xd2\x7d\xf4\x39\x18\x07\xa0\x69\x03\x62\xa5\x56\x32\x1a\x4c\x73\x15\xcd\x7b\x89\x11\x63\x33\xb4\xda\x15\x23\xa6\xb0\x8f\xde\x61\xa9\xef\x9d\xc7\x01\xec\x64\x7f\xa5\x47\x8e\xbd\x35\x91\x96\x3b\xf0\x73\x3d\x2b\x61\x0b\x6e\x96\x6d\xb8\xd9\x9b\x16\xab\x9a\x7a\x24\x2c\x30\x71\xb8\xf0\x3e\x29\xde\x27\xe5\x7b\x31\xc2\x89\x32\x83\xcd\xc5\x14\xb1\x79\x86\x1a\xc9\x7c\x35\xf2\x66\x6c\x5a\x41\xec\x32\xef\xc1\xf5\x0a\xc4\xfe\xdd\x27\x0c\x5d\x35\x4a\xe4\x6e\xfc\x70\xf5\xf2\x66\x53\x8d\xdc\x59\x05\xb9\xb3\xd5\xc8\x9d\x4d\xe9\x48\x7d\x61\x48\x6d\x66\x90\x7b\xe8\xa1\x72\x29\xc0\xef\x2a\x6e\x05\x99\x43\x2f\x4b\x2d\x70\xb9\xda\x53\x0a\x91\x47\x02\x6c\xf4\x2f\x24\x03\x02\xb1\x40\x4c\x9b\x49\xc1\x9c\x42\xda\xce\xf5\x18\xf8\xf5\x24\x64\xce\x03\x14\xac\x25\xc4\x7d\x6f\x97\x2a\x31\xfb\x55\x90\x1b\x74\x32\x49\x24\x55\xcc\x4b\xb6\x82\x75\xc1\x20\x54\x0d\x22\x2d\xba\xf9\xf0\x29\x58\xaf\x63\x54\xf1\x68\xae\x5f\x4b\x66\x68\x2b\x23\x20\xf3\xaa\xfe\x62\x62\x54\x0a\xc9\x12\xfe\x2e\x27\xe4\xb7\x73\x42\xac\x81\x13\xaa\xf3\x3f\xee\x14\x45\x5f\xc0\x65\xa8\x28\x5c\x81\x2b\x19\xe2\xfe\x94\xe8\x9d\x12\x0f\x3b\xd7\xfc\xb6\x13\x24\xa2\x98\x74\xae\x5f\x30\x2a\xf6\x7f\x51\x77\xff\x14\x9d\x0b\xbd\x45\xbd\x72\xf9\xcd\xca\x5b\xc3\x5e\x10\xee\x90\xcc\x89\xa9\xc2\x78\xb1\x93\x29\xfc\x57\x5d\x65\xc5\x67\x78\xc9\x78\x12\x71\xc9\x7b\x63\x1e\x67\x1d\x6b\x8d\x90\xac\xcf\xd6\x7f\xfd\xca\xfa\xee\x16\x7d\xf8\x50\x1d\x3d\x2b\xbd\x4c\x66\x8a\xd6\x51\x0a\x9e\x47\x18\x9e\x1b\x0a\x89\x7e\x1c\xd2\x06\xa6\xa4\xa0\x80\xaa\xd7\x52\xfd\x21\xa7\x0a\xc7\x69\x1d\x60\x4e\x97\x91\x79\x98\x53\x88\xa6\xce\xcf\xae\xfd\x64\x0e\xfe\x1d\xc4\xb7\x91\xc8\x96\x92\xa1\xdd\x54\xbe\x4a\xda\x3b\x63\x64\xea\x11\xd1\xf7\x8e\x97\xf4\x3f\x71\x4e\xa6\xfb\xee\x38\xbf\x18\x9c\x9a\x74\x53\xdd\x06\x42\xa8\x91\x7e\xac\xd0\x7e\x99\xbe\xb8\x3d\xd5\xa4\xbc\x23\x61\x69\xdc\xf7\x8e\xe7\x85\x90\xf7\x51\xea\x68\x01\x62\x51\x2d\xbc\x2c\x49\x2b\x56\x51\x6d\xad\x06\x30\x01\x96\x64\xee\x5e\xec\xf3\x1b\x4c\xc5\x33\xa4\xf9\x5a\x54\x2e\x0d\x04\x8f\x98\x5e\xc0\x16\xe1\x77\x71\x8b\xd5\x92\x4d\x35\x95\xd1\x8a\x0d\xd1\xdb\x5c\xb1\xc4\x25\x88\x1e\x19\xcb\x3a\xbb\x63\xad\x69\x03\x2e\xb4\xad\x11\x15\x2c\x38\x9e\xb6\x9b\x02\xb9\x6f\x7f\xfd\x12\x7d\x77\x6b\xa4\x00\x5b\x3a\x82\x2a\xc4\x88\xd9\xb4\x76\x31\x40\xa6\xa6\xe1\x68\xc2\x78\xfd\x97\x08\xb9\x34\x84\xdc\x2b\xc8\xb6\xbc\x7f\x7b\xa9\xdb\xae\xda\x0a\x5c\xa1\xce\xf2\x79\xc3\x93\x85\xa7\xaa\x5b\x9e\xaa\xae\x3e\x55\xff\xc4\x2e\x95\x9c\x41\x3d\xdd\xaa\x62\x03\x38\xd2\xb7\x68\x8a\xc4\xf0\xd2\x5b\x62\x0a\xaa\xc6\x9f\x53\x3a\x1a\x7b\xc4\xa3\xb6\x61\x07\xd4\x2f\xa6\x7f\x25\x53\x3a\x9a\x7a\x5a\xd6\xe6\xd0\x16\x29\x51\xc3\x70\x46\x29\xb5\x8f\xdd\xb9\x81\x99\x2a\xb8\x18\x27\x79\x0b\x2c\x37\x4a\xbc\xeb\x52\x5d\x6b\xf0\xfd\x70\x30\xf8\x7f\x4a\xa5\x54\x0b\x8a\xe9\x2c\xfc\xea\x89\xf0\xe2\x52\x96\x68\xc7\x9f\x2a\xb1\x54\x6a\x7c\x63\xcf\x48\x77\x8a\xf6\xf7\xee\x0f\x5a\xde\x95\x03\x03\x6f\x4e\xe1\x72\x05\x0d\xde\xd5\x81\xbe\x9f\xeb\x8b\xf5\x73\x7d\xb3\x7e\xab\x7d\x68\x4f\x00\x3d\x83\x5e\xeb\x20\x73\xdb\x78\x3d\xe1\x25\xab\x95\xc9\x1e\x13\x7e\xa7\x4a\x7e\xeb\x85\xbd\x4b\xce\xfc\x2a\x33\xdf\xad\x02\x58\x47\x01\x99\x64\x6e\xda\xa9\xd4\xc5\x17\x79\x83\x1b\xf2\xd6\x87\x01\x78\x1a\x85\x9c\x30\x4c\xaa\x68\x4e\xe2\x8e\xb1\x42\x54\x64\x02\xeb\x0d\x1b\xeb\x89\x3e\xbb\x18\x59\x68\x2f\xdb\x21\x9a\x23\xa0\x96\xad\x5f\xe4\xe2\x5e\xe6\x2f\x64\x6d\x9c\x91\xf1\x54\xad\xee\x0d\x30\x8a\xb6\x95\x3e\xe4\x02\xa3\x6f\x81\xe8\x9f\xc0\x11\xcb\xdf\x97\xf7\x68\x20\xfa\xc7\xf0\x99\xe5\x94\x6b\x69\x29\x0c\x70\xe8\x89\x79\xa3\xf2\xab\x32\xff\x6a\x3a\x05\x0e\x6a\xbc\xb8\xc7\x79\xb1\xc8\x8b\xe3\x29\xfc\xf4\xbe\xd8\x0f\x06\x08\x8d\xd5\xcf\xec\xb4\xb1\x6f\xb9\xcc\x5c\x5e\x80\x2c\x99\x9a\x6c\x68\x02\xa2\xef\x31\x86\x4a\x00\x7a\x10\xcf\x0d\x45\x8c\x47\xab\xee\xf1\x37\xaa\x02\xf5\x56\x12\x45\x6c\x92\xf2\x0e\x8b\x22\xa3\x0b\xb7\xe8\x37\x7b\xc5\xbd\xfc\x42\x73\x6d\xa6\xb8\xd8\x38\x9f\xa0\xe7\xc3\x2d\xf9\xec\x43\x02\xa1\xe2\x93\xa4\x41\xb3\x17\x53\x54\xc0\x5d\x0e\x2d\x0a\x93\xe9\x82\x65\xd3\xc5\xb4\xb4\x6c\x8a\x13\x99\x2b\xe5\xf3\x1e\x75\x43\xc4\x27\xa9\xb6\x9b\x98\xa2\xf2\xee\x52\x8e\xa3\x9d\x44\x51\xd4\x9b\x7b\x4a\xb3\x77\x29\xc5\x05\xf3\xc3\xe4\x9f\xd2\x88\x0b\x44\x78\x31\x62\x3a\xf9\x9f\xd4\x82\xcf\x29\x5c\x4f\x8d\xe9\xeb\xcb\xa9\x09\xd1\x78\x8c\x0f\xc3\xc1\x1c\x5e\xe3\xd3\xb3\x39\x5c\xe1\xc3\xe6\x1c\xb6\xa6\xad\xf7\xa1\x15\xf9\x78\xf0\x87\x83\x11\xbb\x75\xb4\x6e\x25\xaa\xe4\x97\x29\x92\x64\x1a\x9b\x79\x4e\x0c\x89\x23\x20\x74\x24\x04\x0e\x07\xd7\xc1\xbb\x2a\x06\x19\x85\xd4\x31\x41\x79\x05\x26\xed\x7d\x11\x3b\x1e\x08\x27\x01\xe9\x84\xc0\x9d\x00\x32\xc7\x05\xe6\xa4\x4a\x98\xde\x9b\xae\xf4\xa5\xea\x04\x64\x6b\x8a\x4e\x16\x6f\x24\xe6\xe0\x41\x97\xf0\x1d\xb8\x25\x1f\x52\x0c\xf1\xa7\x03\x12\x1e\x4e\xdb\x0c\xdb\x4c\xfe\xb1\x5a\x22\xc9\xe1\xe6\xe6\x80\xd2\xa5\x54\x53\x35\x8f\xfa\xcd\x7a\x12\xaf\x5a\x18\x85\x27\x35\xff\xc0\x41\x3d\x75\x57\xdd\x48\x6e\x1d\xb3\x67\xb4\xa1\xfe\x4b\x8c\x2f\xce\x9d\x29\xd9\x9b\xc2\x70\x08\x18\x9f\x56\x02\xc9\x9c\x1b\x45\xfe\x0e\xa7\x26\xba\x6e\x11\x79\xf7\x69\x4f\xfb\x7c\x8d\x32\x5b\x5f\x5e\x3e\xb5\xb3\xde\x90\xe6\x41\x78\x21\x73\xca\xa8\xbc\x0a\x36\x1d\x12\x57\xbb\xd2\x1a\x85\x22\x8c\xaf\xe9\xeb\xa9\xe3\x38\xf1\x68\x60\xc7\xd8\x27\x3e\x99\x3e\x33\x0a\x0b\x43\x8b\xab\x5a\x04\xb9\x38\xec\x0c\x24\x45\x1b\xca\x23\x92\xef\xd1\x49\x2b\xc3\x85\x11\x3a\xfe\x78\x3a\x12\xf6\x8c\x9c\x4c\x15\x33\xe9\x62\xca\x38\x98\x91\xf7\x09\x08\x7d\x2a\x53\xfd\xce\xa8\xd8\x3e\x23\x3c\x6f\xcc\xe1\x68\x7a\xa7\x45\xe7\xaf\x5f\xc6\xbf\xcc\x38\x7f\x2c\x44\x3a\x9d\x53\xd8\x59\xe8\x42\x62\x42\x1d\x63\x01\x70\x41\xae\x3d\xb5\x29\xd7\x9e\x3a\x04\x23\x0b\xaf\xb7\xc7\x49\x2c\x2f\x95\x9c\x02\x59\xfb\x35\x48\x11\x41\x37\x2e\x82\xba\x0a\xba\x32\xb0\xf0\x83\xb8\xdf\x7d\xf8\x70\x88\xd6\xde\x19\x9a\x69\x70\x3a\x12\xb6\x65\xcd\x35\xbf\x88\x03\x0e\xc1\xea\x70\x85\x25\x51\x97\x84\xaf\xbe\x83\xd5\x19\x27\x59\xca\x13\x74\x25\x41\x95\x10\x16\xdc\x82\xd5\x31\x62\x30\xc4\xcd\x01\xc9\xd8\x2d\x76\xda\x16\x8c\xcc\xb2\x8a\xfb\x57\x17\x6d\xa9\x0e\x89\x19\x1b\x8e\x4e\xd1\x64\x8c\x5f\x87\xed\x45\x9f\xfd\xfa\xb5\x8e\x81\x65\xdd\xb2\xbd\x79\xe8\xb8\x5c\xce\x38\x8f\xad\x39\xa1\x39\x0b\x7d\x48\x30\x4f\x81\xa2\xd1\x47\x53\x68\xcd\x55\x16\x02\x3a\x6e\x97\x83\x6d\xd4\x5c\xf9\x4c\xf2\x8e\xcb\xbc\x6b\x6b\x8d\xb0\x3e\x53\xff\xb8\x6b\x31\x6d\x14\x1a\x55\xd5\x40\x24\xb1\xb4\xd6\x92\x35\x12\xae\x11\x6f\xcd\x44\x4e\x4b\x7d\xb0\xca\xb5\x04\x17\x3d\xb4\xf3\x24\xc2\x8a\x00\x7f\x9e\x96\x38\xf8\x4a\x81\xd2\x01\xc6\x06\xe7\xb4\x22\x59\xbc\x5f\x49\x96\x16\x38\xb4\x9d\xfc\xba\x7e\x67\x9a\xe7\xd5\x32\x9d\xbc\x9b\xb6\x7a\x59\x9e\xa3\x44\x80\x01\xa6\xd4\xcf\x93\x29\x48\xd8\xa1\x6d\x37\x45\x57\x59\x2a\xc3\xe0\xb6\xb8\xa1\xa9\x2b\x6e\x2b\xd6\x57\x9c\x5f\x57\x38\x41\x1c\xd9\x4b\x0f\xce\x4c\xf4\x69\x6f\x3a\xfa\xd3\x3a\xce\x94\x80\x70\x80\x62\xc2\x49\xa6\x24\x82\x33\xc5\x54\x59\x27\x97\x99\x05\xd6\x8e\x08\x2d\xb0\x8e\x99\xb4\xbe\xd9\x7f\xde\xa3\x16\xba\x34\x68\xc3\xd3\xfa\x06\xe1\x78\x59\x18\x9b\x8b\xd2\x72\x63\x32\x89\xfb\x72\x35\xa5\x15\x6b\x87\xf7\xf5\xe5\x3b\x43\xfc\x30\x98\xc3\xf6\xd4\x04\xac\x7c\xab\xf9\x09\x57\xf5\x77\xaa\x9f\x27\x16\x85\x1f\xed\x9b\xf5\xb3\x6b\x3f\xa9\x66\x82\xc6\x38\x1c\x2b\xab\x3f\xae\x57\x3f\xd0\x1f\x4a\xa5\x48\xe2\x0b\x8b\xae\xc8\x1d\xbd\x14\xb3\xc3\xa4\x1f\x8b\xfb\xdd\xd1\x69\x62\xbb\x9c\x6a\xa7\x35\x98\x91\x90\x17\x09\xfb\xda\xad\x17\xf2\x14\x43\x95\xec\x42\xc4\xf2\x8a\xb8\x75\x5e\x12\x4f\xb9\x90\x9d\x54\x8a\x10\x47\xf6\x69\xaa\x2f\x69\x3f\x1a\x7e\x76\xc6\x81\x09\x4a\xe1\xeb\xb4\xcd\xdd\xb3\x0e\x68\x32\x1c\xf3\x49\xe8\x5d\x57\x81\xeb\x60\x9a\xd7\x48\x33\xf7\x8a\x7b\xb2\xc2\xc2\xc8\x91\xf5\x3a\xf6\x2d\xdb\x3a\xce\x55\x75\x8b\x50\x70\x99\x64\xa2\x49\xe9\x99\x4d\x7a\xda\xb4\xdc\x5c\x4b\x14\x10\xad\xce\xe7\x2d\xf9\xa1\x8e\xc3\x00\x86\xed\x57\x4e\x2c\xbe\x88\x78\xcf\x68\xb8\x77\xcc\x77\x77\xcc\x15\xbf\x02\x34\x37\xca\x84\xa5\x17\x7a\x46\x76\xb1\x3f\xb5\x40\x14\x99\xbc\x0a\x1e\x0d\x03\x22\x73\x6c\x2a\xfa\xec\xaa\x1a\xa7\xd3\x1a\xa0\xf1\x50\x9e\x76\x40\x3b\x20\x60\xb5\x1f\x77\x56\x23\x94\xc2\x6b\x8f\xac\x6b\xae\x73\x1a\xf2\x59\x7d\xaa\x39\xd7\x58\xb9\x0b\x52\x42\xe9\x3d\xd6\xa5\x77\xe7\xc2\xa8\x9e\xca\xa5\x59\x12\x92\x92\x68\xf9\x0b\xa5\xe2\xc5\x5e\xde\x47\x25\x89\x48\xfe\x77\x77\x72\xf8\x2f\xde\xc9\xe1\xdf\xdf\xc9\xef\xf7\xdb\xc9\xef\x2b\x77\xf2\xef\xef\xdd\xf0\x9f\xde\x3b\x75\x42\x5b\xa0\x49\x6f\x1f\x89\xdb\xd1\x93\xf6\x82\x6c\x53\xbf\x28\x74\x63\x59\xf6\x8c\x4c\x12\x78\x06\x02\xd9\x9e\xb9\x41\x52\x78\xf9\xac\x38\xae\x2b\xf5\x4f\xa8\xa4\x66\xfc\xf9\x43\xfd\x73\x5b\x50\x4e\x64\x35\xe2\x39\x85\xef\xd3\xa6\xf0\xef\xc6\xad\xa7\xbc\xdb\x6f\x30\x20\x1c\x77\xdc\x8b\x5e\xc0\x7c\xee\x2f\x1b\x14\xae\xa3\x5d\x71\x8b\x01\xa6\xe4\x37\x72\x95\xf5\xe5\x62\x79\xa3\xe9\xa5\xae\xb2\x78\xe3\xfa\x22\x5e\x8c\x1b\x9d\xfc\xcd\x21\x9f\x55\x0d\x6d\xcb\x79\x37\x5e\x09\x88\x92\x11\x79\x35\x6d\x4d\xbe\xda\x66\x22\xac\x23\x06\x39\x3a\x62\x10\x46\xde\x74\xd1\x6a\xf3\x88\x10\x79\x87\xdd\x79\x6e\xc2\x87\x26\xe4\xb2\xef\x2e\xf3\x69\xab\x6d\xc8\xff\x9e\x85\x78\xc5\x1e\x5c\x18\x7b\x70\x61\xec\xc1\x5f\xfa\xc8\x14\x14\x76\xde\xe8\x10\x50\xb3\xf3\x56\x32\x80\xb6\x43\x5e\xf4\xaa\xb8\xbf\x1d\xb2\x68\xb7\x43\xfe\x3e\x45\xa7\x0c\x9e\xef\xc7\xc7\x69\xfb\xf1\xaa\x18\x4e\xb2\x81\xfe\x5c\xf9\xa5\x57\x53\x25\x84\x68\x33\xc4\x0f\xd3\xfb\xf8\x70\x2d\x25\x44\x59\x5a\x44\xb3\x70\xb2\xef\x8e\xfe\x9c\x91\xd3\x69\xce\x95\x1e\x54\xb4\x32\x2f\xf3\x80\xa2\xfa\x3e\xc9\xee\xa0\x33\xdd\xbe\xc0\x64\x20\x05\x82\xf9\x38\x45\x67\x0a\x54\x23\x35\xf7\xf3\x3e\x29\x63\x93\x56\x6e\xba\xcc\xb9\xaf\x25\xff\xc5\x75\xac\x49\xe2\x15\x7f\xb3\xd6\x59\x69\x03\x4f\xfc\xb7\x37\x63\x22\x46\xbe\xa6\xe4\x36\x70\x0f\xe6\x14\xf6\xa7\xed\x71\xf7\x1a\xbc\xe0\xaa\xc3\x19\x94\x81\x80\x32\x1f\xb6\x3c\x98\x91\x0f\x7a\xee\xf5\x6c\x37\xc6\x1b\x6e\x29\xfd\x95\xaf\x46\xf0\x66\xea\x58\x63\xd9\x7b\x6c\xc1\x97\x65\x4b\x82\xe5\x89\x1d\x91\x37\x53\x30\xbe\x25\x23\xab\xb3\x70\xdf\x6a\xd1\x9a\x0d\xe8\xed\xb4\xd9\x8a\xaa\xc2\x90\x15\xbb\x72\x60\x12\xf1\x75\x34\xcd\xc6\x0d\xc7\xcd\x29\x37\x2b\x37\xe8\x4e\x2b\x86\x38\x5a\x13\xfb\x3d\xab\xe8\x3a\x87\x88\xea\x61\x91\xa0\x6a\x51\xb0\x0c\x6a\xae\x17\xb2\xa0\xb1\x4d\x5b\x58\x35\x25\xe8\x05\x9c\xfb\x28\xd0\xd5\xb4\x5e\x73\x62\x8e\x53\xdc\x35\xac\xbd\xe8\x3a\x2b\xd5\xaa\xe6\x42\x1a\xf1\xe8\x99\x5b\xe1\x56\x3e\x08\x4d\x0f\x5f\x96\x30\x09\xb2\x7b\x9f\x43\xaa\xf0\x53\xc7\x5a\x7b\x93\x8b\x83\xa2\x5b\x73\xce\x40\x96\x00\xbd\xa3\xb9\x65\x5b\xa7\x98\x65\xb1\xd1\x88\x67\x1c\xf5\xd6\x3b\xcb\x17\xf9\x0a\xe5\x77\x97\x66\x1e\x2f\x32\x0e\x0b\x6d\x0d\x50\x60\x53\xb1\x70\x17\x86\xea\xd3\x82\x2a\x70\x5c\xb6\xe7\x73\xc8\xba\x46\x87\xc2\xba\x77\xf8\x48\xb0\x3e\xf3\xf3\xa8\x6c\x5e\x25\xa2\x76\x93\x1c\xae\x4d\x85\x34\x7f\x91\x44\xd9\x58\x27\x1b\x30\x56\xea\x15\xfb\x80\x8e\xb5\x26\xab\xc0\xbb\x8c\x36\x44\x85\x87\x51\xaf\xbe\xf8\x4a\xa8\xff\x8c\x4a\x56\x7d\xb7\x94\x2d\x5b\xa0\x74\x16\x60\xa8\xd8\xfa\x9c\x25\x5a\x3a\xc8\xf7\x1d\xf9\xbf\x67\xb4\x95\x81\xe5\xa8\x42\x47\x7f\xf3\x9a\x0f\xc8\xdd\xab\xab\xa1\xe0\x3f\xb2\xb8\x39\xc0\x2d\xdf\x99\xdd\x75\x8e\x93\x1c\x1a\xe7\x14\xbc\xee\x1d\xda\x92\x8a\x88\x59\x39\x70\x01\x61\x5d\xd8\xf1\xc1\x48\x8f\xa0\x04\x93\x5e\x74\xd1\xdb\x44\xcb\xc6\x27\x26\xe2\xca\xb9\x07\x33\x8f\xc2\xcc\x23\x59\x97\x42\x4c\xa1\x68\xb6\x6d\x52\x61\x94\x2d\x37\x16\x5a\xee\x2d\xb4\x94\x95\x96\xaf\xd1\xf2\xbb\xf1\x73\x87\x0b\x8d\x44\xc3\xa2\xe8\x8d\x32\x8d\x87\xa6\xf1\xd2\x4e\xa9\x63\xfb\xff\xfd\xbf\xd6\x92\x43\xce\xc2\x5e\xd4\xd1\x9c\xc1\x1d\x1d\x7d\x2f\xde\x5b\x30\x3c\x98\x79\x84\x77\xdb\x85\x07\x8f\x45\x5c\x71\x9d\x0b\xa2\xc3\x37\x7a\x97\x7f\x44\x22\xe0\xd1\xff\x90\x24\xfe\xa5\xc6\xf5\x12\x6b\x74\xe9\xa3\xb0\x2f\x79\x2a\x49\xec\xc4\x74\x64\xf9\x4c\xb2\x9e\xb5\x16\xdb\x31\x3c\xfa\x9f\x7f\xa4\xbf\x93\x2b\x36\x65\xfa\x42\xc3\xfe\xa5\x0a\x6d\xc5\xc4\xfe\xe3\xd1\xa5\x1c\x47\x45\x53\xe1\x08\xb4\x2e\xd2\x79\xbc\x14\x72\x67\x02\x74\x57\x17\x82\x8d\xc7\xe7\xdc\x0f\x31\xe8\x44\x1e\x64\x09\xc2\x6e\x9b\x4f\x6e\xee\x8c\x2b\xfe\x11\xff\xfa\x87\xf8\xf5\x8f\x58\xfb\xe4\x06\x5d\xed\x79\xc9\x6f\x24\x13\x9c\x59\x14\xdc\xee\xca\x1c\x4d\x98\x01\x7f\x03\x86\x8f\x61\x5f\x90\xb0\xab\x33\x7e\xe2\x4d\x7e\xf6\xef\x41\x8e\x62\xf5\xf9\x8d\xcb\xf3\x1b\x74\xcd\x01\xce\xf2\x03\x2c\xf1\x00\xf3\x7b\x63\x47\x38\xf1\x08\x53\x4b\xfd\x2f\x40\x93\xff\x96\x71\x37\x0d\xf1\x5f\x8d\x30\xff\x03\x0b\x5e\x90\xe8\x85\xc9\xfc\x6d\xd4\x99\x76\x1b\x9d\x5f\x74\xac\x54\xde\x67\x9b\x80\xb9\xc5\xce\xc1\x53\x7f\xba\x90\x38\xbc\xef\xbe\x83\x50\xfd\xda\xc3\x8c\x63\x6c\x0b\xe3\x40\x75\x47\x87\xc4\x7a\xcf\x67\x79\xc2\x00\x85\xce\x8e\x13\x4c\x90\x46\xed\x43\x62\xbd\xf6\x43\x59\x96\xfd\xe0\xc4\x04\x97\xab\xc6\xf0\x6a\xb9\xd9\xcd\x05\xac\x5b\xe2\x75\x21\xeb\xbb\x87\x90\xf5\xd9\x19\x64\xfd\x54\xcd\xfb\xcb\x14\x3c\x60\x15\xe4\x5a\xc4\x93\x79\x33\xd5\x28\x75\x52\xa0\xd4\x97\x53\x4c\x65\x79\x4a\x61\x4a\xdc\x2e\x58\x5b\x79\x2e\xea\xbc\xea\x45\x51\xf5\x5a\x57\x3d\x5e\x58\xda\x82\x74\xdc\x92\xfd\x29\x70\x48\x74\xf0\x22\xd9\x45\x29\x51\x8d\xf1\xca\xd3\xde\x79\x73\x0a\x51\xd7\x91\x02\xfc\xae\xc3\x05\x4c\x1b\x91\xcb\x97\x98\x84\x02\xd5\x1c\x26\x5e\x43\xb7\x7b\x4f\x8b\xe5\x71\x6b\x45\xf7\x19\x8a\x91\x2f\x05\xef\xdc\x26\x59\x27\xcd\xcc\xc3\x8c\xc5\xb2\x23\x93\x8e\x4e\x25\xbe\xc0\x92\x8f\x2c\x0a\x6c\xc3\x5e\x6d\x15\x7a\xc6\xc8\x01\x27\x33\xd2\xd5\x73\xad\x1a\xf3\x6d\x25\x71\x10\x8a\xb1\xa6\x35\x6e\x62\x1f\x70\xf2\x32\xa1\xe0\x6d\xda\xd6\x6b\xfd\xb5\x7c\xdf\x31\xd9\xca\x0a\xe6\xb8\x49\x2f\x61\x2c\x0b\x53\x1e\x05\x46\xdb\x54\xda\xf8\x9b\x63\xb5\x86\x96\xdf\x7b\xbe\x09\x3b\x87\x36\x03\xad\x5f\x41\x5f\x82\x35\x62\x3d\x34\x41\xb3\xb4\xcb\xb3\x16\x8c\x27\x1a\xa5\x5f\x3e\x56\x52\xf8\x8a\x71\xfe\xef\xf4\x67\x5f\x0e\xd5\x64\xb2\x1d\x2c\xd3\x93\xcb\x2e\x32\x21\xa9\x05\xa2\xcf\xce\xea\xa2\xa7\x2e\x47\xae\x08\x6b\xb8\x87\x4b\x62\xa8\xe9\xc2\xe4\xa5\xd7\xbd\x60\xa4\x27\x54\x91\xb8\x4a\xae\xfd\x4b\x4e\xe0\x8b\xe1\xd6\x65\x05\xba\x3e\x19\x8d\x27\xa5\x70\xe4\x11\x41\x1b\xa3\x92\x2f\xcc\x73\xb5\x1e\x60\x95\x98\x67\x10\xf9\x62\xfc\x89\x03\x4e\x3e\x7b\x24\x5e\x00\x79\x3d\x71\x6b\x81\x88\xfc\x2b\xfb\xdf\xe6\x11\xd7\x0a\x6f\xb5\xaa\x62\x49\x0d\x5b\xea\x75\xde\x7b\x6a\x83\xae\x28\x85\x77\x1e\x89\x47\x78\x37\x3d\xee\x82\x80\x07\x43\x4a\xed\x6b\x59\xa8\x45\xe5\x9c\xc2\x6d\xb7\x21\xe2\x9d\x59\x43\x59\x53\xfd\xf0\x4a\x50\x3e\x0c\x18\x61\x82\x70\xb6\xd9\xce\xb1\x9b\xb9\x36\x26\x5d\x69\x25\xea\x9e\xcc\x8d\x75\x28\x5e\x78\x52\x8a\xae\xf3\xd3\xee\xb2\x3b\x72\x39\xc5\x86\x8e\x0e\xd5\x96\x2b\x20\x9b\x75\x73\xff\x7f\xf5\x82\xa2\xc5\xee\x1c\x3d\x96\x8d\xcf\x6e\x0e\x55\xb9\x69\xd0\xb2\x96\x45\xb6\x6a\x59\x6e\xba\xd5\x7c\x1b\xd7\x2b\xa4\x0c\x34\x4b\x93\x79\x24\xcf\x33\x0f\x0e\x78\x5d\x97\x21\x8a\xcb\x81\x4a\xa4\x43\xe9\x93\x1d\x46\xb6\x3d\xd4\x86\x2d\x98\xf0\xb8\xcc\xbf\xe0\x1d\xfc\xb7\x37\x09\xa3\x28\x99\x99\x1f\x66\xa4\x06\x0d\x20\x9e\x94\xc9\x64\xc1\xe7\x4a\x5f\x2c\xc7\x86\x05\x9f\xb7\x7c\xee\x1b\xde\x2d\xcc\x29\xbc\xec\x2e\xa9\x90\xc2\x80\xe8\x44\x0e\x15\x65\x79\x8b\x45\x5f\x81\xb8\xcc\x3d\xea\xb5\x02\xbe\x81\x5a\xbc\x65\xe5\xf5\xbd\xba\x68\x8c\x3a\x81\xbd\xc6\x7d\xef\x12\x14\x0c\x15\xf7\x02\xc7\xff\x9b\xe9\xe0\x27\xbc\xfa\xc7\x0b\xdd\xbe\xeb\xcd\x5b\x29\xe2\xeb\xae\x31\xac\xba\xd2\xe4\xc7\xb5\x28\x6c\xdd\x4b\x85\x54\xe1\x43\x45\x32\x6b\x72\xe6\x56\xd2\xe1\x3a\xca\x88\x58\x77\x21\xaa\xc2\x8c\x5c\x75\x9b\x78\xd3\x85\x9b\xba\xde\x70\x70\xb7\xef\x96\x28\x64\xbd\xbd\x66\x7e\xb2\x3d\xac\xc2\xa7\x22\x82\xbd\x09\xab\xf0\x41\xdf\xc3\xd3\x3b\x98\xc2\xd2\xb1\x0f\x6a\xb1\x14\x0a\x1b\xd5\x23\x64\xaa\xff\xd3\xe4\xe2\x13\x27\xaf\xbb\xff\x2e\x5a\xb1\xd4\xf9\x22\xa1\x90\xe5\x26\x6e\x75\xc1\xda\xdb\xb6\x00\xf9\x67\xc4\x8c\xfa\x9d\xa6\xea\x1d\x26\xb1\x68\x0f\x4b\x0f\x69\x51\xac\x98\x82\x85\x42\x76\x56\x16\x6a\x15\xa4\xbf\xd8\xf8\x53\x59\xae\x75\x95\x7e\xc7\xbd\x35\xdf\x65\xa7\x65\x59\xce\x6a\xeb\x82\xe3\xea\x80\x24\xc7\xd7\x0a\x2f\x99\x9d\x29\x4a\x73\x05\xb3\x05\x8b\xc6\x2e\x7b\x09\xec\x78\xe6\x76\x84\x82\xd4\xe9\x61\x40\x28\x22\xc8\x35\x11\x3c\xee\x82\x84\x07\x83\x82\x08\xce\x29\x1c\x1a\xf6\x6e\xdd\xa2\x70\xae\x9f\xbd\xc4\x57\x20\x74\xa2\x7f\x4d\xd4\x3e\xc1\x67\x53\xcd\xd8\xb1\xc0\x91\xfe\x1d\xb3\xa9\x45\x61\xa7\xeb\xfc\xbc\xb1\xcb\x1c\x00\xe0\x8e\x6d\xa3\xf3\xb6\xe6\xf0\x5e\x97\xea\xb7\xd6\x1c\xde\xe5\xb5\x8b\x20\xbb\x58\x72\x9c\xff\x9a\xc3\x59\x51\xa3\x48\x20\x84\x35\xf2\x5f\x73\xd8\x2e\x6a\x98\xd4\x26\x58\xae\x9f\xe7\xf0\xb6\xdb\x1e\xe5\xab\x11\xf4\x97\x72\xf7\xef\x74\x6b\x20\xff\xbe\x9b\x13\x49\xc3\x0e\x18\x13\xc5\x45\xcb\xc4\xb3\x6e\x2d\xf5\xe5\x76\xb7\x96\x99\xe1\x7d\xb7\x0e\xef\xef\xba\xf3\xb9\xbe\x86\x1a\x59\x9d\xd2\x31\x75\x4e\xe1\xf4\x4e\x6e\x7b\x21\x00\xc3\xda\x8c\xbc\x35\x08\x78\xd9\x8f\xb8\x12\xf8\x60\x9b\x11\xd1\xbf\xc1\x00\x33\xc6\x31\xa6\xe6\x26\xa3\x01\xe3\x07\x12\xfa\x9d\x2e\x9c\x75\x61\xbb\x0b\xef\xba\xa0\x56\x3c\x0f\x6b\x37\x11\xc9\x98\xcb\x4b\x9e\xa5\xfd\x30\x79\xe4\x27\x5e\xaa\x37\x3f\x8c\x2f\xf4\xc3\x98\xc5\xec\x82\x8b\x47\x7a\x6b\x76\x79\x34\xb1\xe6\xdf\x28\xec\xae\xc6\xe1\x0b\x46\xf6\x46\x96\x60\xbe\x8f\xfe\x6b\xd6\x26\x7a\x8e\xe5\x6e\x2a\x26\xf6\x8e\x58\xbe\xa5\x85\x03\x8f\xc4\x7d\xef\x53\x1d\x6f\x1b\xc3\xa7\xce\xa4\x22\x56\xec\x7a\x55\x9b\xdf\x83\xae\x93\x08\x62\x09\x8c\x34\xf2\xa9\xdb\x9e\xa4\xfc\x95\x07\x16\x2e\xa7\xea\x03\xe3\x35\xc1\x41\x97\x58\xa9\xbc\x8d\x78\x7a\xc9\xb9\x2c\xac\xab\xa2\x84\xf9\x68\x59\x25\x08\xc3\xc0\xc8\x85\x41\x1c\x17\x22\x11\xa6\xe8\x0b\x27\xd6\x0e\x0b\x23\xee\x2b\x3a\xac\xda\x74\xb6\x8e\x8f\x3b\x81\x48\xc6\x3a\xd5\x0f\x35\xae\x8c\x3a\x56\xe7\x59\x4c\x7e\x7a\xaf\xec\x2d\xf0\x0e\xed\x43\x06\xde\x07\xbb\x91\xb1\xec\x8e\x14\x19\xd1\xc4\x96\xcd\x6d\xf5\x63\x13\x98\x7d\x4b\x3e\x44\x60\xfd\x97\x05\x24\xd6\x19\xc6\xd8\x63\xd0\xef\x46\x96\xe2\x27\x8e\xd0\xb7\xef\x63\xa4\xd8\x8a\x14\x8e\x14\x69\x98\x8d\xca\x80\x86\x76\x19\xe5\x30\xee\xbb\xae\xe2\x2f\xfb\x0c\x43\xe1\xcf\xc1\x5b\xb7\xa7\x92\xbc\x12\x14\xbc\xa7\xf6\x79\x04\xde\x73\xbb\x29\x80\xea\x33\xfb\x8c\xfc\xf9\x51\x2f\x3d\x72\x00\x2f\x2b\x20\x63\xcd\xe7\x73\xfa\x82\x27\xce\xcf\x03\x16\xc6\xf6\xcf\x30\x0e\xa5\xfd\x4a\x90\x77\x21\x25\x03\xf5\x91\xb8\xff\x3a\x1a\x8f\x8a\x94\xff\xc6\x7c\x2a\x48\x04\x41\x1e\xbd\x13\xc6\x1d\x49\xf1\x8f\x18\x61\xe8\x26\xcb\x71\xf8\xe8\x92\x3c\xa1\x76\x4c\xc4\x9f\xfc\x1b\xc8\x3f\xf9\x37\x6a\xab\x47\x47\x3d\xce\x09\x76\x09\x3c\xa1\x36\x3e\x39\x3c\x99\x13\xc5\x06\xd1\x17\xff\x7f\x00\x00\x00\xff\xff\xe8\x33\x75\xc0\x2b\xae\x01\x00"), }, "/templates": &vfsgen۰DirInfo{ name: "templates", @@ -163,16 +163,16 @@ var Assets = func() http.FileSystem { "/templates/default.tmpl": &vfsgen۰CompressedFileInfo{ name: "default.tmpl", modTime: time.Date(1970, 1, 1, 0, 0, 1, 0, time.UTC), - uncompressedSize: 4554, + uncompressedSize: 5233, - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xec\x57\xc1\x6e\xe3\x38\x0c\xbd\xe7\x2b\x08\xef\xa5\x39\xc4\xdd\x73\x81\x62\x51\x2c\x76\xe7\x52\x0c\x06\x29\x32\x97\xc1\xc0\x50\x6d\xc6\x55\x2b\x4b\xae\x44\xa7\x0d\x1c\xff\xfb\x40\xb6\x9b\x58\x91\x9d\xda\x41\xe7\x34\xbd\xd5\x2c\xf9\x48\x3d\x3e\x91\x4a\x59\x42\x82\x6b\x2e\x11\x82\x28\x62\x02\x35\x65\x4c\xb2\x14\x75\x00\x55\x75\xd3\xf9\x2e\x4b\x40\x99\x40\x55\xcd\x06\x43\x56\xcb\x5b\x1b\x55\x96\x10\xfe\xf7\x4a\xa8\x25\x13\xab\xe5\x2d\x54\xd5\xe5\x5f\x97\xb5\x9f\xf9\x47\x63\x8c\x7c\x83\xfa\xda\x3a\x2d\xdb\x0f\xd8\x41\xa1\xc5\x73\x81\x7a\xdb\x84\xb7\x89\xdc\x4c\xa6\xb8\x7f\xc4\x98\x6c\x86\x1f\x36\xfa\x8e\x18\x15\x06\x76\x40\x6a\x95\xe7\xa8\x9b\x50\xbe\x06\x7c\xde\xff\x33\x58\x73\xcd\x65\x6a\x63\xae\x6c\x4c\x7d\x20\x13\xfe\x5f\x5b\x61\x07\x02\x65\x37\xe3\x4f\xb0\x4e\x5f\xb4\x2a\xf2\x5b\x76\x8f\xc2\x84\x77\x4a\x13\x26\xdf\x18\xd7\x26\xfc\xce\x44\x81\x36\xe1\xa3\xe2\x12\x02\xb0\xa8\xd0\xa4\x4c\x09\x2e\x2c\x56\xf8\xaf\xca\x32\x25\x9b\xe0\x79\x6b\xeb\xe0\xcd\xa1\xaa\x2e\xca\x12\x5e\x38\x3d\xb8\xce\xe1\x12\x33\xb5\x41\x37\xfb\x57\x96\xa1\x69\x19\xed\xcb\xbe\x2f\x7c\xbe\xff\x6b\xa0\x4d\x09\x9a\x58\xf3\x9c\xb8\x92\xc1\x09\x8e\x09\x5f\xa9\x69\x69\x24\xb8\xa1\xd6\x55\x33\x99\x22\x84\x50\x55\x4d\x5d\x57\xb3\x83\xd1\xe7\xc9\xb2\xb2\xa8\x89\xb4\xe5\xdb\xaf\x6b\xd8\x1f\xa0\x2d\xac\x49\x7e\x23\xa5\x22\x66\x6b\x72\x20\x3b\xe6\xf3\x70\xef\x54\xa1\x63\xbc\x6a\x9a\x89\x12\x35\x23\xa5\x1b\x25\xce\x7a\x88\x72\x38\x30\x82\xc5\x4f\x61\x82\x6b\x56\x08\x0a\x89\x93\xc0\x96\x05\xc2\x2c\x17\x8c\x5c\x2d\x86\x43\x94\xbb\x38\x85\xb1\xb7\x21\xeb\x83\x72\xef\xdc\x48\xbc\x35\x13\xe2\x9e\xc5\x4f\x1e\x5e\x6f\xf9\x16\x14\x76\xf0\x9e\xa3\xe0\xf2\x69\x74\x05\x71\x5b\x01\x4f\x82\x71\x01\xb9\x46\xab\xae\x91\xde\x9d\x82\x4e\x32\x56\x8f\x9c\x91\x25\xf3\x58\x49\xcc\xd4\x23\x0f\xc6\xfb\x17\x5a\x8c\xad\x78\xfc\xe1\xd6\x4a\x51\x33\x60\x07\x44\x98\xdb\xa3\x25\x05\x6d\xf7\x21\xfe\xfd\x9d\x26\x47\x1f\x31\x16\x1c\x25\x9d\x2f\xc8\x21\xc4\xc3\x12\x38\xaf\x67\x3e\x2e\x97\x86\x98\x8c\xd1\xf4\xe0\x7a\x03\x2b\x1c\x66\x55\xe5\x26\x45\xc9\x71\x0f\x9c\xa1\x31\x2c\x3d\xef\x7e\x7b\x60\x7e\x87\xda\xf9\x3e\x30\xce\x7a\x07\xfa\xec\x68\x9d\x38\xfb\x6a\x0e\x7f\xc3\xa2\xaa\x66\x8d\x11\x1a\x63\x3d\x38\x4f\x33\xe2\x2e\xbd\x3a\xc9\xa2\x73\xa2\x9e\x7c\x4b\x34\x4a\x6c\x30\x39\xca\xf8\x66\x1e\x9f\xf3\x2d\xc2\xcb\xba\x18\x43\xa9\xa9\xe7\xf8\x74\x35\x39\x5d\x7f\xc1\xf8\x81\xd1\xd4\x9e\xcf\x3e\xfb\x77\xa2\x7f\xdd\x77\xe1\x4a\x0b\x0f\xaf\xb7\x3f\x03\x5d\x3f\xea\x0f\xa9\xc8\x2e\xcb\xc1\x49\xea\xbb\xe7\x4c\xd3\x76\x82\x3f\xb1\x74\xac\x37\x4b\x51\x52\x74\xbc\xe2\x5c\x7d\x6d\x78\x4c\x4a\xab\xdc\x1c\x64\x4b\x8c\x30\x72\x85\xf6\xa9\xa5\x69\xb3\xc0\x67\x15\x25\x71\xda\x46\x09\x37\xb9\x60\xdb\x68\xe0\x35\xf5\xfe\xe0\xf6\x91\x33\x25\x39\x29\x4b\x48\x44\x4a\x89\x89\x2b\xd1\xd9\x5d\x85\x79\x50\x1b\xd4\x1f\xf0\x7e\xf4\xa0\x7e\xbf\x9e\x3e\x46\x4e\xe3\xd5\xf4\x71\x62\xea\xe4\x1c\xc1\xe4\xe1\x4d\x37\x65\xa7\x74\x5f\x73\xb2\x73\xd9\x0f\xbf\x4a\xa7\xff\x46\xe8\xe0\x7c\xb6\x77\x4a\x7b\xbb\x2c\x12\x0a\x4c\x35\xcb\xfa\xa8\xfc\x93\x48\xf9\x15\x00\x00\xff\xff\xf5\xfe\x1f\x22\xca\x11\x00\x00"), + compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xec\x57\x41\x6f\xdb\x3c\x0c\xbd\xe7\x57\x10\xfe\x2e\xcd\x21\xee\x77\x2e\x50\x0c\xc5\xb0\xed\x52\x0c\x43\x8a\xec\x32\x0c\x86\x62\x33\xae\x5a\x59\x72\x25\x3a\x6d\xe0\xf8\xbf\x0f\xb2\xdd\xc4\x8e\xec\xd4\x0e\xb2\xd3\x72\xab\x59\xf2\x91\x7a\x7c\x22\x95\x3c\x87\x08\x57\x5c\x22\x78\x41\xc0\x04\x6a\x4a\x98\x64\x31\x6a\x0f\x8a\xe2\xae\xf1\x9d\xe7\x80\x32\x82\xa2\x98\xf4\x86\x2c\xe6\xf7\x36\x2a\xcf\xc1\xff\xf2\x46\xa8\x25\x13\x8b\xf9\x3d\x14\xc5\xf5\x7f\xd7\xa5\x9f\xf9\xa4\x31\x44\xbe\x46\x7d\x6b\x9d\xe6\xf5\x07\x6c\x21\xd3\xe2\x25\x43\xbd\xa9\xc2\xeb\x44\xed\x4c\x26\x5b\x3e\x61\x48\x36\xc3\x2f\x1b\xfd\x40\x8c\x32\x03\x5b\x20\xb5\x48\x53\xd4\x55\x28\x5f\x01\xbe\xec\xfe\xe9\xad\xb8\xe6\x32\xb6\x31\x37\x36\xa6\x3c\x90\xf1\xbf\x96\x56\xd8\x82\x40\xd9\xcc\xf8\x1b\xac\xd3\x37\xad\xb2\xf4\x9e\x2d\x51\x18\xff\x41\x69\xc2\xe8\x07\xe3\xda\xf8\x3f\x99\xc8\xd0\x26\x7c\x52\x5c\x82\x07\x16\x15\xaa\x94\x31\xc1\x95\xc5\xf2\x3f\xab\x24\x51\xb2\x0a\x9e\xd6\xb6\x06\xde\x14\x8a\xe2\x2a\xcf\xe1\x95\xd3\x63\xdb\xd9\x9f\x63\xa2\xd6\xd8\xce\xfe\x9d\x25\x68\x6a\x46\xbb\xb2\xef\x0a\x9f\xee\xfe\xea\x69\x53\x84\x26\xd4\x3c\x25\xae\xa4\x77\x84\x63\xc2\x37\xaa\x5a\x1a\x08\x6e\xa8\x76\xd5\x4c\xc6\x08\x3e\x14\x45\x55\xd7\xcd\x64\x6f\x74\x79\xb2\xac\xcc\x4a\x22\x6d\xf9\xf6\xeb\x16\x76\x07\xa8\x0b\xab\x92\xdf\x49\xa9\x88\xd9\x9a\x5a\x90\x0d\xf3\x69\xb8\x0f\x2a\xd3\x21\xde\x54\xcd\x44\x89\x9a\x91\xd2\x95\x12\x27\x1d\x44\xb5\x38\x30\x82\x85\xcf\x7e\x84\x2b\x96\x09\xf2\x89\x93\xc0\x9a\x05\xc2\x24\x15\x8c\xda\x5a\xf4\xfb\x28\x6f\xe3\x64\xc6\xde\x86\xa4\x0b\xaa\x7d\xe7\x06\xe2\xad\x98\x10\x4b\x16\x3e\x3b\x78\x9d\xe5\x5b\x50\xd8\xc2\x47\x8e\x82\xcb\xe7\xc1\x15\x84\x75\x05\x3c\xf2\x86\x05\xa4\x1a\xad\xba\x06\x7a\x37\x0a\x3a\xca\x58\x39\x72\x06\x96\xcc\x43\x25\x31\x51\x4f\xdc\x1b\xee\x9f\x69\x31\xb4\xe2\xe1\x87\x5b\x29\x45\xd5\x80\xed\x11\x61\x6a\x8f\x16\x65\xb4\xd9\x85\xb8\xf7\x77\x9c\x1c\x5d\xc4\x50\x70\x94\x74\xba\x20\xfb\x10\xf7\x4b\xe0\xb4\x9e\xb9\xb8\x5c\x1a\x62\x32\x44\xd3\x81\xeb\x0c\x2c\xbf\x9f\x55\x95\x9a\x18\x25\xc7\x1d\x70\x82\xc6\xb0\xf8\xb4\xfb\xed\x80\xb9\x1d\xaa\xe7\x7b\xcf\x38\xeb\x1c\xe8\x93\x83\x75\xd2\xda\x57\x53\xf8\x1f\x66\x45\x31\xa9\x8c\x50\x19\xcb\xc1\x79\x9c\x91\xf6\xd2\x2b\x93\xcc\x1a\x27\xea\xc8\x37\x47\xa3\xc4\x1a\xa3\x83\x8c\xef\xe6\xe1\x39\xdf\x23\x9c\xac\xb3\x21\x94\x9a\x72\x8e\x8f\x57\x53\xab\xeb\xaf\x18\x3e\x32\x1a\xdb\xf3\xc9\xa5\x7f\x47\xfa\xd7\x7c\x17\x2e\xb4\x70\xf0\x3a\xfb\xd3\xd3\xf5\x83\xfe\x90\x0a\xec\xb2\xec\x9d\xa4\xae\x7b\xca\x34\x6d\x46\xf8\x13\x8b\x87\x7a\xb3\x18\x25\x05\x87\x2b\xae\xad\xaf\x35\x0f\x49\x69\x95\x9a\xbd\x6c\x89\x11\x06\x6d\xa1\x5d\xb4\x34\x6e\x16\xb8\xac\xa2\x24\x4e\x9b\x20\xe2\x26\x15\x6c\x13\xf4\xbc\xa6\x3e\x1e\xdc\x2e\x72\xa2\x24\x27\x65\x09\x09\x48\x29\x31\x72\x25\xb6\x76\x57\x66\x1e\xd5\x1a\xf5\x19\xde\x8f\x0e\xd4\xdf\xd7\xd3\x79\xe4\x34\x5c\x4d\xe7\x13\x53\x23\xe7\x00\x26\xf7\x6f\xba\x31\x3b\xa5\xf9\x9a\x93\x8d\xcb\xbe\xff\x55\x3a\xfe\x37\x42\x03\xe7\xd2\xde\x31\xed\x6d\xb2\x48\x28\x30\xd6\x2c\xe9\xa2\xf2\x9f\x25\x25\xe2\x26\x54\x3a\x3a\xc3\x20\x3a\x44\xba\xb0\x6b\x9f\x09\x4b\x7c\xbb\x5c\xdd\x93\x78\xfc\x13\x00\x00\xff\xff\x40\x8b\x18\x6d\x71\x14\x00\x00"), }, "/templates/email.tmpl": &vfsgen۰CompressedFileInfo{ name: "email.tmpl", modTime: time.Date(1970, 1, 1, 0, 0, 1, 0, time.UTC), - uncompressedSize: 13888, + uncompressedSize: 14084, - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xec\x1a\x7f\x6f\xda\x48\xf6\x7f\x7f\x8a\xb7\x5e\x9d\xb6\x95\x30\x90\xa4\x8d\x1a\x7e\xe9\x28\x98\xc4\x3a\x62\x22\x70\xda\xad\x4e\xa7\xd5\x60\x3f\xf0\xec\xda\x33\xbe\x99\x21\xc0\xe6\xf2\xdd\x4f\x33\x06\x62\x08\x69\xbb\xab\x53\x61\xaf\x69\xd4\xc4\x33\x7e\xef\xcd\xfb\xfd\xde\x78\xc6\xba\xbf\x87\x08\x27\x94\x21\xd8\x98\x12\x9a\x94\x23\x9c\x90\x59\xa2\xca\x72\x36\xfe\x15\x43\x65\xc3\xc3\xc3\xfd\x3d\x28\x4c\xb3\x84\x28\x04\xfb\x97\x5f\x36\x6f\xca\xf9\x3b\x64\x11\x3c\x3c\x3c\x4f\x29\x56\x69\xa2\xc9\x58\x8d\x1f\x1c\xc7\x1a\xa9\x65\x82\x40\x58\x04\x57\xc1\x75\x1f\x22\x14\xf4\x0e\x23\x98\x08\x9e\x42\xac\x54\x26\x6b\x95\xca\x94\xaa\x78\x36\x2e\x87\x3c\xad\x68\x4a\xd3\x19\xab\x28\x41\x98\x24\xa1\xa2\x9c\x91\xc4\x31\x0b\x38\x6b\xa6\xa4\x65\x59\x41\x8c\x70\xed\x05\xd0\xa7\x21\x32\x89\xf0\xea\xda\x0b\x5e\x5b\x56\x87\x67\x4b\x41\xa7\xb1\x82\x57\xe1\x6b\x38\xad\x9e\xbc\x81\xeb\x9c\xa2\x65\xdd\xa0\x48\xa9\x94\x94\x33\xa0\x12\x62\x14\x38\x5e\xc2\x54\x10\xa6\x30\x2a\xc1\x44\x20\x02\x9f\x40\x18\x13\x31\xc5\x12\x28\x0e\x84\x2d\x21\x43\x21\x39\x03\x3e\x56\x84\x32\xca\xa6\x40\x20\xe4\xd9\xd2\xe2\x13\x50\x31\x95\x20\xf9\x44\xcd\x89\xc8\x25\x24\x52\xf2\x90\x12\x85\x11\x44\x3c\x9c\xa5\xc8\x14\xd1\x12\xc0\x84\x26\x28\xe1\x95\x8a\x11\xec\xd1\x0a\xc3\x7e\x6d\x16\x89\x90\x24\x16\x65\xa0\xdf\xad\x5f\xc1\x9c\xaa\x98\xcf\x14\x08\x94\x4a\x50\xa3\x85\x12\x50\x16\x26\xb3\x48\xf3\xb0\x7e\x9d\xd0\x94\xae\x56\xd0\xe8\x46\x70\x69\x29\x0e\x33\x89\x25\xc3\x67\x09\x52\x1e\xd1\x89\xfe\x8b\x46\xac\x6c\x36\x4e\xa8\x8c\x4b\x10\x51\x4d\x7a\x3c\x53\x58\x02\xa9\x27\x8d\x1e\x4b\x5a\x8e\x0a\x17\x20\x31\x49\xac\x90\x67\x14\x25\x18\x59\x1f\xb9\x33\x30\x9a\xf5\x4c\x2b\x54\xad\x54\x24\xf5\xcc\x3c\xe6\xe9\xb6\x24\x54\x5a\x93\x99\x60\x54\xc6\x68\x70\x22\x0e\x92\x9b\x15\xb5\x4f\xe9\x19\x0d\x3e\xe1\x49\xc2\xe7\x5a\xb4\x90\xb3\x88\x6a\x89\x64\x2d\x37\x32\x19\xf3\x3b\x34\xb2\xe4\x76\x65\x5c\xd1\x30\x57\xb7\x31\x40\xf6\x68\xd5\xd5\x2b\x19\x93\x24\x81\x31\xae\x14\x86\x11\x50\x06\xa4\x20\x8e\xd0\xcb\x4b\x45\x98\xa2\x24\x81\x8c\x0b\xb3\xde\xae\x98\x65\xcb\x0a\xae\x5c\x18\x0d\x7a\xc1\xc7\xf6\xd0\x05\x6f\x04\x37\xc3\xc1\x07\xaf\xeb\x76\xc1\x6e\x8f\xc0\x1b\xd9\x25\xf8\xe8\x05\x57\x83\xdb\x00\x3e\xb6\x87\xc3\xb6\x1f\x7c\x82\x41\x0f\xda\xfe\x27\xf8\x87\xe7\x77\x4b\xe0\xfe\x7c\x33\x74\x47\x23\x18\x0c\x2d\xef\xfa\xa6\xef\xb9\xdd\x12\x78\x7e\xa7\x7f\xdb\xf5\xfc\x4b\x78\x7f\x1b\x80\x3f\x08\xa0\xef\x5d\x7b\x81\xdb\x85\x60\x00\x7a\xc1\x15\x29\xcf\x1d\x69\x62\xd7\xee\xb0\x73\xd5\xf6\x83\xf6\x7b\xaf\xef\x05\x9f\x4a\x56\xcf\x0b\x7c\x4d\xb3\x37\x18\x42\x1b\x6e\xda\xc3\xc0\xeb\xdc\xf6\xdb\x43\xb8\xb9\x1d\xde\x0c\x46\x2e\xb4\xfd\x2e\xf8\x03\xdf\xf3\x7b\x43\xcf\xbf\x74\xaf\x5d\x3f\x28\x83\xe7\x83\x3f\x00\xf7\x83\xeb\x07\x30\xba\x6a\xf7\xfb\x7a\x29\xab\x7d\x1b\x5c\x0d\x86\x9a\x3f\xe8\x0c\x6e\x3e\x0d\xbd\xcb\xab\x00\xae\x06\xfd\xae\x3b\x1c\xc1\x7b\x17\xfa\x5e\xfb\x7d\xdf\xcd\x97\xf2\x3f\x41\xa7\xdf\xf6\xae\x4b\xd0\x6d\x5f\xb7\x2f\x5d\x83\x35\x08\xae\xdc\xa1\xa5\xc1\x72\xee\xe0\xe3\x95\xab\xa7\xf4\x7a\x6d\x1f\xda\x9d\xc0\x1b\xf8\x5a\x8c\xce\xc0\x0f\x86\xed\x4e\x50\x82\x60\x30\x0c\x36\xa8\x1f\xbd\x91\x5b\x82\xf6\xd0\x1b\x69\x85\xf4\x86\x83\xeb\x92\xa5\xd5\x39\xe8\x69\x10\xcf\xd7\x78\xbe\x9b\x53\xd1\xaa\x86\x2d\x8b\x0c\x86\x66\x7c\x3b\x72\x37\x04\xa1\xeb\xb6\xfb\x9e\x7f\x39\xd2\xc8\x5a\xc4\x35\x70\xd9\x72\x9c\x96\xd5\xf8\xa1\x3b\xe8\x04\x9f\x6e\x5c\xd0\x49\x0a\x6e\x6e\xdf\xf7\xbd\x0e\xd8\x4e\xa5\xf2\xf1\xac\x53\xa9\x74\x83\x2e\xfc\x6c\xd2\xd4\x49\xb9\x0a\x81\x4e\x41\x34\xcf\x40\x95\x8a\xeb\xdb\x60\xeb\x84\x55\xab\x54\xe6\xf3\x79\x79\x7e\x56\xe6\x62\x5a\x09\x86\x95\x85\xa6\x75\xa2\x91\x57\x8f\x8e\x2a\x60\x96\x23\x15\xd9\x2d\xab\x61\x16\x5c\xa4\x09\x93\xcd\x3d\x64\x4e\x2e\x2e\x2e\x72\x6c\x1b\xa4\xce\x98\x4d\x3b\x25\x62\x4a\x59\x0d\xaa\x75\x98\x70\xa6\x9c\x09\x49\x69\xb2\xac\xc1\x4f\x57\x98\xdc\xa1\xa2\x21\x01\x1f\x67\xf8\x53\x09\x36\x13\x25\x68\x0b\x4a\x92\x12\x48\xc2\xa4\x23\x51\xd0\x49\x1d\xc6\x7c\xe1\x48\xfa\x3b\x65\xd3\x1a\x8c\xb9\x88\x50\x38\x63\xbe\x58\x11\x95\xf4\x77\xac\xc1\xc9\x9b\x6c\x51\x37\x4c\x22\x89\x5a\x56\x23\x45\x45\x80\x91\x14\x9b\xf6\x1d\xc5\xb9\x8e\x10\x5b\x47\xa6\x42\xa6\x9a\xf6\x9c\x46\x2a\x6e\x46\x78\x47\x43\x74\xcc\xc0\x5e\xe3\x68\xc1\x1c\xfc\xf7\x8c\xde\x35\xed\x4e\x0e\xef\x04\xcb\x0c\x0b\xd8\x0a\x17\xaa\xa2\x05\xad\x9b\x54\x2b\x51\x35\x6f\x83\x9e\xf3\x4e\xd3\x50\x54\x25\xd8\xfa\x5c\xf9\x69\x54\x72\x18\xab\x61\xb4\xd4\xb2\xfe\x9e\x62\x44\x09\x70\x96\x2c\x41\x86\x02\x91\x99\xd4\xf0\x2a\x25\x8b\x9c\xb7\x1a\x9c\xbf\xa9\x66\x8b\xd7\x70\x6f\x01\x8c\x79\xb4\x34\x0f\x00\x19\x89\x22\xa3\x93\x2a\xfc\x40\x53\x2d\x22\x61\xaa\x6e\x01\x3c\x58\x16\x40\x7c\x52\xb2\xe2\xd3\x92\x15\x9f\x95\xac\xf8\xcd\x0a\xc5\x68\x6c\x8e\x3a\x0d\xd5\xe0\x5d\x75\x17\x11\x60\x6d\xb2\xd3\x6a\xb6\x80\x2a\xbc\xcd\x16\xfb\x69\x17\xe9\xe5\x16\x38\x3d\x7d\x06\xf6\xf4\x29\xec\xc9\xbb\x67\x60\xcf\xf6\xc0\x9e\xef\x87\x2d\x6b\x7b\x10\xca\x50\x7c\x49\x1d\x00\x2b\x35\x9e\x54\xab\x7f\x7b\x96\x14\x32\xf5\x55\x7a\x5d\x03\x3b\x73\x41\xb2\x5d\x8c\x93\xea\x33\xcc\x52\x76\xc7\x75\x66\xbf\xff\x22\x3f\x0f\x56\xa3\xb2\xf2\x8c\x46\x25\xf7\x66\xab\x61\xac\x4e\x15\xa6\x32\xe4\x19\x9a\x27\xb5\xcc\x70\x13\x87\x32\x8c\x31\x25\x26\x0e\x5d\xdd\x68\x5c\xa3\x94\x64\x8a\x07\x8b\x44\x70\xe6\x38\xfe\x8d\x2a\x27\x7f\x91\x72\xae\x62\x83\x94\x57\x2c\x4a\x24\x46\x8f\x40\x3a\xa0\x0c\xb6\x43\xa2\x5f\x67\x52\xd5\x80\x71\x86\x75\x88\x57\x8e\xaa\xf5\x54\x87\x84\x32\x74\x36\x53\xe5\x73\x4c\xeb\x30\x26\xe1\x6f\x53\xc1\x67\x2c\x72\x42\x9e\x70\x51\x83\x1f\x27\xe7\xfa\xa7\x5e\xd4\xb1\x4e\x0c\x56\x43\x91\x71\x82\x10\x26\x44\xca\xa6\xad\x15\x6a\x2c\x78\x38\x15\x7d\x2d\xef\xf9\xa8\x69\xeb\x91\x0d\xe3\xa9\x01\x6e\xda\x2b\x60\xbb\x65\x01\x34\x94\x38\x60\xce\xd5\x1e\xdd\x50\xd1\xc1\x14\x79\x87\x42\x13\x49\x1c\x92\xd0\x29\xab\x81\xe2\x59\xdd\x86\x3b\x33\x6a\xda\x8a\x67\x76\xab\x51\x51\xd1\x23\xa3\x2b\x1f\xd8\x24\x90\x8d\x8a\xcf\xab\xd5\x8d\x3f\x1c\x8e\x77\xdd\xd8\x66\x09\x59\xd6\x60\x9c\xf0\xf0\xb7\x3a\x14\x2b\x41\xb5\xaa\xd1\x36\x3a\x06\x32\x53\xbc\x0e\x61\x82\x44\xe8\xa5\x54\xbc\x2b\xba\x91\x1a\xa0\x11\xd1\xbb\xa2\xe0\xc8\xd4\x37\x16\xf5\xcb\x52\xec\xca\xbd\x49\xab\xba\x1a\xd5\x37\xa2\x68\x23\x16\x63\x39\x25\x94\xed\x44\x49\x88\x49\xb2\xc2\x6e\xda\xd5\x7c\x2c\x33\x12\xae\xc7\xc7\x14\xf3\x13\x43\x40\x23\xd5\xe0\x24\x5b\x80\xe4\x09\x8d\xe0\x47\xbc\xd0\x3f\xeb\x57\x8e\x20\x11\x9d\xc9\x1a\x9c\x69\x4d\x14\xb3\xc0\x64\x52\x50\xcc\x31\xa4\x82\xf5\xbf\xfb\x7b\xa0\x13\x98\x2a\x78\x95\x20\x83\x72\x3b\x41\xa1\x64\xb9\x47\x05\x65\xd3\xd7\x50\xd5\xdb\xeb\x22\x78\x21\x30\x89\x06\x05\xf3\xdb\x99\x13\xa1\x37\xad\xdf\xcc\x64\x7b\x03\x72\xa7\x1d\xa9\xc3\x96\xf1\xb6\x9a\xaa\xb7\xd5\xea\xae\xe3\x82\xa9\x6f\x2b\x7a\x21\x32\x85\x62\x9f\x59\xcd\xff\xaa\x16\x6c\x8f\x97\xb8\xe7\x6f\x4f\x4f\x3b\x3b\xb1\x0d\xab\xe7\x9c\x66\xd1\x2b\x72\xf0\xa7\x06\xc1\x44\xe2\x13\xc5\xeb\xdc\xf8\x8c\xf6\xa7\x9c\x47\xdf\xbb\xea\xcf\xdf\xbd\xbf\xa8\xf6\xbe\x5a\xf5\x39\xf8\x1e\xd5\xe7\xdf\x9c\x00\x76\xe6\x57\x81\x01\xff\x01\x1d\x26\x0f\x0f\xb9\xe6\xf7\x06\xcf\x6b\x38\x81\x87\x07\xb9\x21\x06\x13\x2e\x34\x09\x41\xd8\x14\xa1\x7c\x29\xf8\x2c\xeb\x93\x31\x26\xb2\x3c\xe2\x42\x61\x74\x43\xa8\x90\x4f\x57\xcd\xd7\xf5\x49\xaa\x7d\xa1\xa9\x9f\x3f\x90\x64\xa6\x07\xf0\x94\xbf\x3d\x7c\x3f\x56\xd3\xcd\x58\x1c\x67\x0e\xda\xa9\xf6\xeb\xb6\xdd\x3e\xaa\x5e\xe5\xd1\x67\xcf\x4c\x9d\xdb\x5b\xbf\x77\x4a\xdf\x5f\xa1\xda\x3d\xe1\xfd\x98\x3c\xe3\x59\xff\x30\xbd\xc7\xb1\x3a\x48\x15\xaa\xab\x66\xe8\xb3\x4e\xb2\x12\x8e\x40\x2c\x70\xd2\xb4\x77\x3e\x47\x98\x04\x93\x12\x46\xa6\x28\x6e\x87\xfd\xfc\xb3\x84\xbd\xd9\x19\x29\xe6\x64\x82\xa6\x44\x2c\x0f\xa7\x05\x93\xb6\x23\x0c\xb9\x30\xdf\x86\xd7\x1b\xc2\x75\x4a\xee\xf5\x7a\x7b\x53\xf5\xd9\x9b\x77\x18\x91\xc7\x76\x6a\xd5\x4a\x6d\x4f\x3b\x9b\xdd\x55\xb6\x58\x55\x89\xad\xad\xe5\xa9\xde\x58\x6e\x15\x96\x31\x4f\xa2\xfd\xa5\x24\x9c\x09\xa9\x57\xce\x38\xcd\x27\x36\x2d\x2c\x65\x86\xe8\xaa\x93\xdd\x29\x39\x6f\x37\x32\x9a\xef\x6c\x13\x2e\xd2\x1a\x84\x24\xa3\x8a\x24\xf4\x77\xac\xdb\xad\x0f\x14\xe7\x40\x19\x7c\xc6\x74\xeb\xcf\x49\x64\xaf\x63\xef\xa4\xe8\xfd\x89\xfa\x4f\x35\x6a\x2f\x71\xfc\xad\xe3\x58\x2a\xc1\xd9\xf4\x70\x0a\xff\xe7\x63\x9b\xb2\x72\x8b\x4d\xb7\xf2\x2f\xc8\x27\x1a\x95\x9c\xc9\xff\x81\x2f\xee\x6d\x94\xa0\xd0\xe7\x6c\x73\xf2\xe2\x9d\xdf\xb9\x77\xe6\x1d\xef\xc6\x01\x1b\xe3\xe3\x32\x7e\xd1\x73\xf7\x36\xe7\x85\x5e\x1c\x9a\x50\xec\xc6\x0f\x2a\xca\xf3\x91\xb8\xaf\x66\x30\xc6\xf3\x63\x5c\x99\x57\x8c\x83\x7b\x45\x81\xa3\xe3\x70\x8d\x2f\xea\x73\x9d\xdd\x1e\x19\xff\x7f\x70\x94\x62\x13\x5a\xbe\x44\x86\x82\x28\xae\xdb\x4e\xd3\x73\x1e\x2a\xff\xed\x36\x8c\x4f\xfa\xcd\x19\x8b\x50\xe8\x0e\xae\x6e\xb7\x46\x7c\x26\x42\xd4\x8d\xd6\xb1\xe5\x96\x3f\x57\x59\xbf\xb2\x01\x1c\xa2\xe4\xc9\x1d\x46\xcf\xb4\x80\x2f\x7d\xe3\xd1\x57\xe6\xa3\xab\x84\x8d\xf8\xe8\x38\xfa\x0b\x47\xf4\xe7\xba\xe5\x97\x40\xfb\x5e\x37\x68\xeb\xb4\x5d\xd8\xa2\xad\xa7\x0e\xb0\x49\xdb\x70\xf3\xe2\xa3\x2f\xdb\xb4\x97\x6d\xda\xcb\x36\xed\x65\x9b\xf6\xb2\x4d\x7b\xd9\xa6\xfd\xf1\xda\xda\xa8\x98\x83\xbf\xd6\x1f\x38\x88\xdd\xa0\x3c\xce\x14\x2e\x01\x4d\x38\x37\x27\xd7\x87\xb2\x6d\xf1\x8e\xdb\xd6\xfd\xa5\x8d\xd5\x2f\x2e\x2e\x3e\x73\x07\x68\xff\x51\xe8\xb1\x1c\x3b\x1f\x8f\x03\x6e\xdd\xec\xa0\x53\x96\x1f\x5d\xc1\x61\x3a\x9c\xaf\x6c\x68\xf6\x9f\xb5\x15\xdd\xa2\x28\xf6\xe9\x93\x16\x68\xe7\x76\x46\x6b\x2b\xbb\xb9\x0b\x85\x82\x91\xe4\x1b\x27\xb7\xcf\xe4\xae\x2f\x88\xd6\x1a\x21\x53\x30\x5e\x7e\xdd\x61\xe0\xd3\x64\xf3\xe4\x7a\xc6\x6e\x2a\x69\x54\x22\x7a\xd7\xca\x7f\x5b\xdb\x79\xe5\x2f\x72\xaf\x33\x17\xf1\x31\xe1\x35\x2a\x63\x1e\x2d\xcd\x0d\x6a\x95\x26\x2d\xcb\x7a\x4c\xab\xff\x0d\x00\x00\xff\xff\xdc\x5c\x67\xbb\x40\x36\x00\x00"), + compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xec\x5a\xfd\x6e\xdb\x38\x12\xff\x5f\x4f\x31\xab\xc5\x61\x5b\xc0\xb2\x9d\xa4\x0d\x1a\x7f\xe1\x5c\x47\x4e\x84\x73\xe4\xc0\x56\xda\x2d\x0e\x87\x05\x2d\x8d\x2d\xee\x4a\xa4\x8e\xa4\x63\x7b\x73\x79\xf7\x03\x29\xd9\xb1\x1d\x27\xcd\xed\x1d\x6a\xef\x35\x0d\x9a\x48\xd4\xcc\x70\xbe\x38\xf3\xa3\x44\xeb\xee\x0e\x22\x1c\x53\x86\x60\x63\x4a\x68\x52\x8e\x70\x4c\xa6\x89\x2a\xcb\xe9\xe8\x57\x0c\x95\x0d\xf7\xf7\x77\x77\xa0\x30\xcd\x12\xa2\x10\xec\x5f\x7e\x59\x3d\x29\xe7\xcf\x90\x45\x70\x7f\xff\xb4\xa4\x58\xa5\x89\x16\x63\x35\x7e\x70\x1c\x6b\xa8\x16\x09\x02\x61\x11\x5c\x06\x57\x3d\x88\x50\xd0\x5b\x8c\x60\x2c\x78\x0a\xb1\x52\x99\xac\x55\x2a\x13\xaa\xe2\xe9\xa8\x1c\xf2\xb4\xa2\x25\x4d\xa6\xac\xa2\x04\x61\x92\x84\x8a\x72\x46\x12\xc7\x4c\xe0\x2c\x95\x92\x96\x65\x05\x31\xc2\x95\x17\x40\x8f\x86\xc8\x24\xc2\x9b\x2b\x2f\x78\x6b\x59\x1d\x9e\x2d\x04\x9d\xc4\x0a\xde\x84\x6f\xe1\xb8\x7a\xf4\x0e\xae\x72\x89\x96\x75\x8d\x22\xa5\x52\x52\xce\x80\x4a\x88\x51\xe0\x68\x01\x13\x41\x98\xc2\xa8\x04\x63\x81\x08\x7c\x0c\x61\x4c\xc4\x04\x4b\xa0\x38\x10\xb6\x80\x0c\x85\xe4\x0c\xf8\x48\x11\xca\x28\x9b\x00\x81\x90\x67\x0b\x8b\x8f\x41\xc5\x54\x82\xe4\x63\x35\x23\x22\xb7\x90\x48\xc9\x43\x4a\x14\x46\x10\xf1\x70\x9a\x22\x53\x44\x5b\x00\x63\x9a\xa0\x84\x37\x2a\x46\xb0\x87\x05\x87\xfd\xd6\x4c\x12\x21\x49\x2c\xca\x40\x3f\x5b\x3e\x82\x19\x55\x31\x9f\x2a\x10\x28\x95\xa0\xc6\x0b\x25\xa0\x2c\x4c\xa6\x91\xd6\x61\xf9\x38\xa1\x29\x2d\x66\xd0\xec\xc6\x70\x69\x29\x0e\x53\x89\x25\xa3\x67\x09\x52\x1e\xd1\xb1\xfe\x8b\xc6\xac\x6c\x3a\x4a\xa8\x8c\x4b\x10\x51\x2d\x7a\x34\x55\x58\x02\xa9\x07\x8d\x1f\x4b\xda\x8e\x0a\x17\x20\x31\x49\xac\x90\x67\x14\x25\x18\x5b\x1f\xb4\x33\x34\x5a\xf5\x4c\x3b\x54\x15\x2e\x92\x7a\x64\x16\xf3\x74\xd3\x12\x2a\xad\xf1\x54\x30\x2a\x63\x34\x3c\x11\x07\xc9\xcd\x8c\x3a\xa7\xf4\x88\x26\x1f\xf3\x24\xe1\x33\x6d\x5a\xc8\x59\x44\xb5\x45\xb2\x96\x07\x99\x8c\xf8\x2d\x1a\x5b\xf2\xb8\x32\xae\x68\x98\xbb\xdb\x04\x20\x7b\x88\x6a\xf1\x48\xc6\x24\x49\x60\x84\x85\xc3\x30\x02\xca\x80\xac\x99\x23\xf4\xf4\x52\x11\xa6\x28\x49\x20\xe3\xc2\xcc\xb7\x6d\x66\xd9\xb2\x82\x4b\x17\x86\xfd\x6e\xf0\xb9\x3d\x70\xc1\x1b\xc2\xf5\xa0\xff\xc9\x3b\x77\xcf\xc1\x6e\x0f\xc1\x1b\xda\x25\xf8\xec\x05\x97\xfd\x9b\x00\x3e\xb7\x07\x83\xb6\x1f\x7c\x81\x7e\x17\xda\xfe\x17\xf8\x9b\xe7\x9f\x97\xc0\xfd\xf9\x7a\xe0\x0e\x87\xd0\x1f\x58\xde\xd5\x75\xcf\x73\xcf\x4b\xe0\xf9\x9d\xde\xcd\xb9\xe7\x5f\xc0\xc7\x9b\x00\xfc\x7e\x00\x3d\xef\xca\x0b\xdc\x73\x08\xfa\xa0\x27\x2c\x44\x79\xee\x50\x0b\xbb\x72\x07\x9d\xcb\xb6\x1f\xb4\x3f\x7a\x3d\x2f\xf8\x52\xb2\xba\x5e\xe0\x6b\x99\xdd\xfe\x00\xda\x70\xdd\x1e\x04\x5e\xe7\xa6\xd7\x1e\xc0\xf5\xcd\xe0\xba\x3f\x74\xa1\xed\x9f\x83\xdf\xf7\x3d\xbf\x3b\xf0\xfc\x0b\xf7\xca\xf5\x83\x32\x78\x3e\xf8\x7d\x70\x3f\xb9\x7e\x00\xc3\xcb\x76\xaf\xa7\xa7\xb2\xda\x37\xc1\x65\x7f\xa0\xf5\x83\x4e\xff\xfa\xcb\xc0\xbb\xb8\x0c\xe0\xb2\xdf\x3b\x77\x07\x43\xf8\xe8\x42\xcf\x6b\x7f\xec\xb9\xf9\x54\xfe\x17\xe8\xf4\xda\xde\x55\x09\xce\xdb\x57\xed\x0b\xd7\x70\xf5\x83\x4b\x77\x60\x69\xb2\x5c\x3b\xf8\x7c\xe9\xea\x21\x3d\x5f\xdb\x87\x76\x27\xf0\xfa\xbe\x36\xa3\xd3\xf7\x83\x41\xbb\x13\x94\x20\xe8\x0f\x82\x15\xeb\x67\x6f\xe8\x96\xa0\x3d\xf0\x86\xda\x21\xdd\x41\xff\xaa\x64\x69\x77\xf6\xbb\x9a\xc4\xf3\x35\x9f\xef\xe6\x52\xb4\xab\x61\x23\x22\xfd\x81\xb9\xbf\x19\xba\x2b\x81\x70\xee\xb6\x7b\x9e\x7f\x31\xd4\xcc\xda\xc4\x25\x71\xd9\x72\x9c\x96\xd5\xf8\xe1\xbc\xdf\x09\xbe\x5c\xbb\xa0\x8b\x14\x5c\xdf\x7c\xec\x79\x1d\xb0\x9d\x4a\xe5\xf3\x49\xa7\x52\x39\x0f\xce\xe1\x67\x53\xa6\x8e\xca\x55\x08\x74\x09\xa2\x79\x05\xaa\x54\x5c\xdf\x06\x5b\x17\xac\x5a\xa5\x32\x9b\xcd\xca\xb3\x93\x32\x17\x93\x4a\x30\xa8\xcc\xb5\xac\x23\xcd\x5c\x5c\x3a\x6a\x8d\xb3\x1c\xa9\xc8\x6e\x59\x0d\x33\xe1\x3c\x4d\x98\x6c\xee\x10\x73\x74\x76\x76\x96\x73\xdb\x20\x75\xc5\x6c\xda\x29\x11\x13\xca\x6a\x50\xad\xc3\x98\x33\xe5\x8c\x49\x4a\x93\x45\x0d\x7e\xba\xc4\xe4\x16\x15\x0d\x09\xf8\x38\xc5\x9f\x4a\xb0\x1a\x28\x41\x5b\x50\x92\x94\x40\x12\x26\x1d\x89\x82\x8e\xeb\x30\xe2\x73\x47\xd2\xdf\x29\x9b\xd4\x60\xc4\x45\x84\xc2\x19\xf1\x79\x21\x54\xd2\xdf\xb1\x06\x47\xef\xb2\x79\xdd\x28\x89\x24\x6a\x59\x8d\x14\x15\x01\x46\x52\x6c\xda\xb7\x14\x67\x7a\x85\xd8\x7a\x65\x2a\x64\xaa\x69\xcf\x68\xa4\xe2\x66\x84\xb7\x34\x44\xc7\xdc\xd8\x4b\x1e\x6d\x98\x83\xff\x9c\xd2\xdb\xa6\xdd\xc9\xe9\x9d\x60\x91\xe1\x1a\xb7\xc2\xb9\xaa\x68\x43\xeb\xa6\xd4\x4a\x54\xcd\x9b\xa0\xeb\x7c\xd0\x32\x14\x55\x09\xb6\x9e\x6b\x3f\x8d\x4a\x4e\x63\x35\x8c\x97\x5a\xd6\x5f\x53\x8c\x28\x01\xce\x92\x05\xc8\x50\x20\x32\x53\x1a\xde\xa4\x64\x9e\xeb\x56\x83\xd3\x77\xd5\x6c\xfe\x16\xee\x2c\x80\x11\x8f\x16\xe6\x02\x20\x23\x51\x64\x7c\x52\x85\x1f\x68\xaa\x4d\x24\x4c\xd5\x2d\x80\x7b\xcb\x02\x88\x8f\x4a\x56\x7c\x5c\xb2\xe2\x93\x92\x15\xbf\x2b\x58\x8c\xc7\x66\xa8\xcb\x50\x0d\x3e\x54\xb7\x19\x01\x96\x21\x3b\xae\x66\x73\xa8\xc2\xfb\x6c\xbe\x5b\xf6\xba\xbc\x3c\x02\xc7\xc7\x4f\xd0\x1e\x3f\xa6\x3d\xfa\xf0\x04\xed\xc9\x0e\xda\xd3\xdd\xb4\x65\x1d\x0f\x42\x19\x8a\xaf\xb9\x03\xa0\x70\xe3\x51\xb5\xfa\x97\x27\x45\x21\x53\x2f\xf2\xeb\x92\xd8\x99\x09\x92\x6d\x73\x1c\x55\x9f\x50\x96\xb2\x5b\xae\x2b\xfb\xdd\x57\xf5\xb9\xb7\x1a\x95\x22\x33\x1a\x95\x3c\x9b\xad\x86\x89\x3a\x55\x98\xca\x90\x67\x68\xae\xd4\x22\xc3\xd5\x3a\x94\x61\x8c\x29\x31\xeb\xd0\xd5\x40\xe3\x0a\xa5\x24\x13\xdc\xdb\x4a\x04\x67\x86\xa3\xdf\xa8\x72\xf2\x07\x29\xe7\x2a\x36\x4c\x79\xc7\xa2\x44\x62\xf4\x40\xa4\x17\x94\xe1\x76\x48\xf4\xeb\x54\xaa\x1a\x30\xce\xb0\x0e\x71\x91\xa8\xda\x4f\x75\x48\x28\x43\x67\x35\x54\x3e\xc5\xb4\x0e\x23\x12\xfe\x36\x11\x7c\xca\x22\x27\xe4\x09\x17\x35\xf8\x71\x7c\xaa\x7f\xea\xeb\x3e\xd6\x85\xc1\x6a\x28\x32\x4a\x10\xc2\x84\x48\xd9\xb4\xb5\x43\x4d\x04\xf7\xe7\xa2\x97\xea\x9e\xdf\x35\x6d\x7d\x67\xc3\x68\x62\x88\x9b\x76\x41\x6c\xb7\x2c\x80\x86\x12\x7b\xac\xb9\x3a\xa3\x1b\x2a\xda\x9b\x23\x6f\x51\x68\x21\x89\x43\x12\x3a\x61\x35\x50\x3c\xab\xdb\x70\x6b\xee\x9a\xb6\xe2\x99\xdd\x6a\x54\x54\xf4\xa0\x68\x91\x03\xab\x02\xb2\x72\xf1\x69\xb5\xba\xca\x87\xfd\xe9\xae\x81\x6d\x96\x90\x45\x0d\x46\x09\x0f\x7f\xab\xc3\x7a\x27\xa8\x56\x35\xdb\xca\xc7\x40\xa6\x8a\xd7\x21\x4c\x90\x08\x3d\x95\x8a\xb7\x4d\x37\x56\x03\x34\x22\x7a\xbb\x6e\x38\x32\xf5\x8d\x4d\xfd\xba\x15\xdb\x76\xaf\xca\xaa\xee\x46\xf5\x95\x29\x3a\x88\xeb\x6b\x39\x25\x94\x6d\xad\x92\x10\x93\xa4\xe0\x6e\xda\xd5\xfc\x5e\x66\x24\x5c\xde\x1f\xd2\x9a\x1f\x1b\x01\x9a\xa9\x06\x47\xd9\x1c\x24\x4f\x68\x04\x3f\xe2\x99\xfe\x59\x3e\x72\x04\x89\xe8\x54\xd6\xe0\x44\x7b\x62\xbd\x0a\x8c\xc7\x6b\x8e\x39\x84\x52\xb0\xfc\x77\x77\x07\x74\x0c\x13\x05\x6f\x12\x64\x50\x6e\x27\x28\x94\x2c\x77\xa9\xa0\x6c\xf2\x16\xaa\x7a\x7b\xbd\x4e\xbe\xb6\x30\x89\x26\x05\xf3\xdb\x99\x11\xa1\x37\xad\xdf\x2c\x64\x3b\x17\xe4\x16\x1c\xa9\xc3\x46\xf0\x36\x40\xd5\xfb\x6a\x75\x3b\x71\xc1\xf4\xb7\x42\x5e\x88\x4c\xa1\xd8\x15\x56\xf3\xbf\xaa\x0d\xdb\x91\x25\xee\xe9\xfb\xe3\xe3\xce\xd6\xda\x86\xe2\x3a\x97\xb9\x9e\x15\x39\xf9\x56\x40\x4c\x48\x8a\x38\xc0\xbf\x40\x47\xe5\xfe\x3e\x77\xf3\xce\x58\xbd\x85\x23\xb8\xbf\x97\xab\xf7\x25\x30\xe6\x42\x8b\x10\x84\x4d\x10\xca\x17\x82\x4f\xb3\x1e\x19\x61\x22\xcb\x43\x2e\x14\x46\xd7\x84\x0a\xb9\x1d\xd7\xe5\xbc\x3e\x49\x11\xee\xef\x9b\xfa\xfa\x13\x49\xa6\xf8\x98\xf0\xe1\xcd\xcc\x46\x62\xac\x6a\xf7\x3a\x5d\x22\xf1\x85\x19\x34\xe1\x3c\xfa\xde\xd3\xe7\xf4\xc3\xc7\xb3\x6a\xf7\xc5\xe9\x93\x93\x1f\x7c\xfa\xc0\x7f\x91\x3f\xdb\x74\x8d\x8a\x12\x87\x59\x47\xb7\x10\xcb\x72\xeb\x61\x1f\x14\xde\x7a\xc8\xd9\x13\xd3\xab\x77\x62\x90\xad\xf6\xfd\x67\xe8\xd8\x8f\x74\x3f\xa4\xcc\x78\x32\x3f\x0c\x7e\x3a\xd4\x04\xa9\x42\xb5\x00\x74\xcf\x26\x49\x61\x1c\x81\x58\xe0\xb8\x69\x6f\xbd\x52\x31\x65\x27\x25\x8c\x4c\x50\xdc\x0c\x7a\xf9\xab\x15\x7b\xb5\xbb\x53\xcc\xc9\x04\x4d\x89\x58\xec\xcf\x0b\xa6\x6c\x47\x18\x72\x61\xde\x6f\x2f\x37\xb5\xcb\x92\xdc\xed\x76\x77\x96\xea\x93\x77\x1f\x30\x22\x0f\x90\xb0\x80\x83\x9b\xc3\xce\x6a\x87\x98\xcd\x8b\x2e\xb1\xb1\x3d\x3e\xd6\x9b\xe3\x8d\xc6\x32\xe2\x49\xb4\xbb\x95\x84\x53\x21\xf5\xcc\x19\xa7\xf9\xc0\x0a\x86\x53\x66\x84\x16\x68\x7c\xab\xe5\xbc\x5f\xd9\x68\xde\x15\x8e\xb9\x48\x6b\x10\x92\x8c\x2a\x92\xd0\xdf\xb1\x6e\xb7\x3e\x51\x9c\x01\x65\xf0\x4c\xe8\x96\xaf\xc4\xc8\xce\xc4\x7e\x54\xb8\x77\x15\xea\x3f\x04\x36\x5f\xd7\xf1\xb7\x5e\xc7\x52\x09\xce\x26\xfb\x73\xf8\xdf\x1f\xc0\x4b\x91\x16\x2b\x0c\xf3\x0f\xc8\x07\x1a\x95\x5c\xc9\xff\x41\x2e\xee\x80\x21\xc5\x93\x02\xfd\x6c\x6a\xf2\x9a\x9d\xdf\x79\x76\xe6\x38\x78\x95\x80\x8d\xd1\x61\x05\x7f\x3d\x73\x77\x42\xf6\x35\x84\x0e\x4d\x58\xc7\xe8\x7b\x35\xe5\xe9\x95\xb8\xab\x67\x30\xc6\xf3\x4f\xd1\x32\xef\x18\x7b\xcf\x8a\x35\x8d\x0e\x23\x35\xbe\xea\xcf\x65\x75\x7b\x50\xfc\xff\x21\x51\xd6\x41\x68\xf9\x02\x19\x0a\xa2\xb8\x86\x9d\x06\x73\xee\xab\xfe\x6d\x03\xc6\x47\x78\x73\xca\x22\x14\x1a\xc1\xd5\xed\xd6\x90\x4f\x45\x88\x1a\x68\x1d\x5a\x6d\xf9\x63\x9d\xf5\x85\x00\x70\x80\x92\x27\xb7\x18\x3d\x01\x01\x5f\x71\xe3\xc1\x77\xe6\x83\xeb\x84\x8d\xf8\xe0\x34\xfa\x13\xaf\xe8\xe7\xd0\xf2\xeb\x42\xfb\x5e\x37\x68\xcb\xb2\xbd\xb6\x45\x5b\x0e\xed\x61\x93\xb6\xd2\xe6\x35\x47\x5f\xb7\x69\xaf\xdb\xb4\xd7\x6d\xda\xeb\x36\xed\x75\x9b\xf6\xba\x4d\xfb\xcf\x7b\x6b\xa3\x62\x3e\xfc\xb5\x9e\xfd\x3c\xbb\x29\x72\xc5\xf2\x30\xb2\x76\x90\x69\xcc\xb9\xf9\x72\xbd\xaf\xd8\xae\x9f\xd3\xdb\x38\x83\xb5\x8a\xfa\xd9\xd9\xd9\x33\xe7\x98\x76\x7f\x0a\x3d\x94\xcf\xce\x87\x93\x80\x1b\x27\x3b\xe8\x84\xe5\x9f\xae\x60\x3f\x08\xe7\x85\x80\x66\xf7\xb7\xb6\xf5\xb4\x58\x37\xfb\xf8\x11\x04\xda\x3a\x9d\xd1\xda\xa8\x6e\xee\x5c\xa1\x60\x24\xf9\xc6\xc5\xed\x99\xda\xf5\x15\xd3\x5a\x43\x64\x0a\x46\x8b\x97\x7d\x0c\x7c\x5c\x6c\x1e\x1d\xcf\xd8\x2e\x25\x8d\x4a\x44\x6f\x5b\xf9\x6f\x6b\xb3\xae\xfc\x49\xce\xa6\xe6\x26\x3e\x14\xbc\x46\x65\xc4\xa3\x85\x39\x05\xae\xd2\xa4\x65\x59\x0f\x65\xf5\xdf\x01\x00\x00\xff\xff\x4e\x2a\x1d\xf2\x04\x37\x00\x00"), }, } fs["/"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ diff --git a/vendor/github.com/prometheus/alertmanager/config/config.go b/vendor/github.com/prometheus/alertmanager/config/config.go index 448f18532..21698681c 100644 --- a/vendor/github.com/prometheus/alertmanager/config/config.go +++ b/vendor/github.com/prometheus/alertmanager/config/config.go @@ -16,9 +16,9 @@ package config import ( "encoding/json" "fmt" - "io/ioutil" "net" "net/url" + "os" "path/filepath" "regexp" "sort" @@ -192,7 +192,7 @@ func Load(s string) (*Config, error) { // LoadFile parses the given YAML file into a Config. func LoadFile(filename string) (*Config, error) { - content, err := ioutil.ReadFile(filename) + content, err := os.ReadFile(filename) if err != nil { return nil, err } @@ -248,6 +248,12 @@ func resolveFilepaths(baseDir string, cfg *Config) { for _, cfg := range receiver.TelegramConfigs { cfg.HTTPConfig.SetDirectory(baseDir) } + for _, cfg := range receiver.DiscordConfigs { + cfg.HTTPConfig.SetDirectory(baseDir) + } + for _, cfg := range receiver.WebexConfigs { + cfg.HTTPConfig.SetDirectory(baseDir) + } } } @@ -335,6 +341,14 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { return fmt.Errorf("at most one of opsgenie_api_key & opsgenie_api_key_file must be configured") } + if c.Global.VictorOpsAPIKey != "" && len(c.Global.VictorOpsAPIKeyFile) > 0 { + return fmt.Errorf("at most one of victorops_api_key & victorops_api_key_file must be configured") + } + + if len(c.Global.SMTPAuthPassword) > 0 && len(c.Global.SMTPAuthPasswordFile) > 0 { + return fmt.Errorf("at most one of smtp_auth_password & smtp_auth_password_file must be configured") + } + names := map[string]struct{}{} for _, rcv := range c.Receivers { @@ -365,8 +379,9 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { if ec.AuthUsername == "" { ec.AuthUsername = c.Global.SMTPAuthUsername } - if ec.AuthPassword == "" { + if ec.AuthPassword == "" && ec.AuthPasswordFile == "" { ec.AuthPassword = c.Global.SMTPAuthPassword + ec.AuthPasswordFile = c.Global.SMTPAuthPasswordFile } if ec.AuthSecret == "" { ec.AuthSecret = c.Global.SMTPAuthSecret @@ -471,11 +486,12 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { if !strings.HasSuffix(voc.APIURL.Path, "/") { voc.APIURL.Path += "/" } - if voc.APIKey == "" { - if c.Global.VictorOpsAPIKey == "" { + if voc.APIKey == "" && len(voc.APIKeyFile) == 0 { + if c.Global.VictorOpsAPIKey == "" && len(c.Global.VictorOpsAPIKeyFile) == 0 { return fmt.Errorf("no global VictorOps API Key set") } voc.APIKey = c.Global.VictorOpsAPIKey + voc.APIKeyFile = c.Global.VictorOpsAPIKeyFile } } for _, sns := range rcv.SNSConfigs { @@ -492,6 +508,26 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { telegram.APIUrl = c.Global.TelegramAPIUrl } } + for _, discord := range rcv.DiscordConfigs { + if discord.HTTPConfig == nil { + discord.HTTPConfig = c.Global.HTTPConfig + } + if discord.WebhookURL == nil { + return fmt.Errorf("no discord webhook URL provided") + } + } + for _, webex := range rcv.WebexConfigs { + if webex.HTTPConfig == nil { + webex.HTTPConfig = c.Global.HTTPConfig + } + if webex.APIURL == nil { + if c.Global.WebexAPIURL == nil { + return fmt.Errorf("no global Webex URL set") + } + + webex.APIURL = c.Global.WebexAPIURL + } + } names[rcv.Name] = struct{}{} } @@ -580,7 +616,7 @@ func checkTimeInterval(r *Route, timeIntervals map[string]struct{}) error { // DefaultGlobalConfig returns GlobalConfig with default values. func DefaultGlobalConfig() GlobalConfig { - var defaultHTTPConfig = commoncfg.DefaultHTTPClientConfig + defaultHTTPConfig := commoncfg.DefaultHTTPClientConfig return GlobalConfig{ ResolveTimeout: model.Duration(5 * time.Minute), HTTPConfig: &defaultHTTPConfig, @@ -592,6 +628,7 @@ func DefaultGlobalConfig() GlobalConfig { WeChatAPIURL: mustParseURL("https://qyapi.weixin.qq.com/cgi-bin/"), VictorOpsAPIURL: mustParseURL("https://alert.victorops.com/integrations/generic/20131114/alert/"), TelegramAPIUrl: mustParseURL("https://api.telegram.org"), + WebexAPIURL: mustParseURL("https://webexapis.com/v1/messages"), } } @@ -693,26 +730,29 @@ type GlobalConfig struct { HTTPConfig *commoncfg.HTTPClientConfig `yaml:"http_config,omitempty" json:"http_config,omitempty"` - SMTPFrom string `yaml:"smtp_from,omitempty" json:"smtp_from,omitempty"` - SMTPHello string `yaml:"smtp_hello,omitempty" json:"smtp_hello,omitempty"` - SMTPSmarthost HostPort `yaml:"smtp_smarthost,omitempty" json:"smtp_smarthost,omitempty"` - SMTPAuthUsername string `yaml:"smtp_auth_username,omitempty" json:"smtp_auth_username,omitempty"` - SMTPAuthPassword Secret `yaml:"smtp_auth_password,omitempty" json:"smtp_auth_password,omitempty"` - SMTPAuthSecret Secret `yaml:"smtp_auth_secret,omitempty" json:"smtp_auth_secret,omitempty"` - SMTPAuthIdentity string `yaml:"smtp_auth_identity,omitempty" json:"smtp_auth_identity,omitempty"` - SMTPRequireTLS bool `yaml:"smtp_require_tls" json:"smtp_require_tls,omitempty"` - SlackAPIURL *SecretURL `yaml:"slack_api_url,omitempty" json:"slack_api_url,omitempty"` - SlackAPIURLFile string `yaml:"slack_api_url_file,omitempty" json:"slack_api_url_file,omitempty"` - PagerdutyURL *URL `yaml:"pagerduty_url,omitempty" json:"pagerduty_url,omitempty"` - OpsGenieAPIURL *URL `yaml:"opsgenie_api_url,omitempty" json:"opsgenie_api_url,omitempty"` - OpsGenieAPIKey Secret `yaml:"opsgenie_api_key,omitempty" json:"opsgenie_api_key,omitempty"` - OpsGenieAPIKeyFile string `yaml:"opsgenie_api_key_file,omitempty" json:"opsgenie_api_key_file,omitempty"` - WeChatAPIURL *URL `yaml:"wechat_api_url,omitempty" json:"wechat_api_url,omitempty"` - WeChatAPISecret Secret `yaml:"wechat_api_secret,omitempty" json:"wechat_api_secret,omitempty"` - WeChatAPICorpID string `yaml:"wechat_api_corp_id,omitempty" json:"wechat_api_corp_id,omitempty"` - VictorOpsAPIURL *URL `yaml:"victorops_api_url,omitempty" json:"victorops_api_url,omitempty"` - VictorOpsAPIKey Secret `yaml:"victorops_api_key,omitempty" json:"victorops_api_key,omitempty"` - TelegramAPIUrl *URL `yaml:"telegram_api_url,omitempty" json:"telegram_api_url,omitempty"` + SMTPFrom string `yaml:"smtp_from,omitempty" json:"smtp_from,omitempty"` + SMTPHello string `yaml:"smtp_hello,omitempty" json:"smtp_hello,omitempty"` + SMTPSmarthost HostPort `yaml:"smtp_smarthost,omitempty" json:"smtp_smarthost,omitempty"` + SMTPAuthUsername string `yaml:"smtp_auth_username,omitempty" json:"smtp_auth_username,omitempty"` + SMTPAuthPassword Secret `yaml:"smtp_auth_password,omitempty" json:"smtp_auth_password,omitempty"` + SMTPAuthPasswordFile string `yaml:"smtp_auth_password_file,omitempty" json:"smtp_auth_password_file,omitempty"` + SMTPAuthSecret Secret `yaml:"smtp_auth_secret,omitempty" json:"smtp_auth_secret,omitempty"` + SMTPAuthIdentity string `yaml:"smtp_auth_identity,omitempty" json:"smtp_auth_identity,omitempty"` + SMTPRequireTLS bool `yaml:"smtp_require_tls" json:"smtp_require_tls,omitempty"` + SlackAPIURL *SecretURL `yaml:"slack_api_url,omitempty" json:"slack_api_url,omitempty"` + SlackAPIURLFile string `yaml:"slack_api_url_file,omitempty" json:"slack_api_url_file,omitempty"` + PagerdutyURL *URL `yaml:"pagerduty_url,omitempty" json:"pagerduty_url,omitempty"` + OpsGenieAPIURL *URL `yaml:"opsgenie_api_url,omitempty" json:"opsgenie_api_url,omitempty"` + OpsGenieAPIKey Secret `yaml:"opsgenie_api_key,omitempty" json:"opsgenie_api_key,omitempty"` + OpsGenieAPIKeyFile string `yaml:"opsgenie_api_key_file,omitempty" json:"opsgenie_api_key_file,omitempty"` + WeChatAPIURL *URL `yaml:"wechat_api_url,omitempty" json:"wechat_api_url,omitempty"` + WeChatAPISecret Secret `yaml:"wechat_api_secret,omitempty" json:"wechat_api_secret,omitempty"` + WeChatAPICorpID string `yaml:"wechat_api_corp_id,omitempty" json:"wechat_api_corp_id,omitempty"` + VictorOpsAPIURL *URL `yaml:"victorops_api_url,omitempty" json:"victorops_api_url,omitempty"` + VictorOpsAPIKey Secret `yaml:"victorops_api_key,omitempty" json:"victorops_api_key,omitempty"` + VictorOpsAPIKeyFile string `yaml:"victorops_api_key_file,omitempty" json:"victorops_api_key_file,omitempty"` + TelegramAPIUrl *URL `yaml:"telegram_api_url,omitempty" json:"telegram_api_url,omitempty"` + WebexAPIURL *URL `yaml:"webex_api_url,omitempty" json:"webex_api_url,omitempty"` } // UnmarshalYAML implements the yaml.Unmarshaler interface for GlobalConfig. @@ -844,6 +884,7 @@ type Receiver struct { // A unique identifier for this receiver. Name string `yaml:"name" json:"name"` + DiscordConfigs []*DiscordConfig `yaml:"discord_configs,omitempty" json:"discord_configs,omitempty"` EmailConfigs []*EmailConfig `yaml:"email_configs,omitempty" json:"email_configs,omitempty"` PagerdutyConfigs []*PagerdutyConfig `yaml:"pagerduty_configs,omitempty" json:"pagerduty_configs,omitempty"` SlackConfigs []*SlackConfig `yaml:"slack_configs,omitempty" json:"slack_configs,omitempty"` @@ -854,6 +895,7 @@ type Receiver struct { VictorOpsConfigs []*VictorOpsConfig `yaml:"victorops_configs,omitempty" json:"victorops_configs,omitempty"` SNSConfigs []*SNSConfig `yaml:"sns_configs,omitempty" json:"sns_configs,omitempty"` TelegramConfigs []*TelegramConfig `yaml:"telegram_configs,omitempty" json:"telegram_configs,omitempty"` + WebexConfigs []*WebexConfig `yaml:"webex_configs,omitempty" json:"webex_configs,omitempty"` } // UnmarshalYAML implements the yaml.Unmarshaler interface for Receiver. diff --git a/vendor/github.com/prometheus/alertmanager/config/coordinator.go b/vendor/github.com/prometheus/alertmanager/config/coordinator.go index 499e939ec..5ee44f81d 100644 --- a/vendor/github.com/prometheus/alertmanager/config/coordinator.go +++ b/vendor/github.com/prometheus/alertmanager/config/coordinator.go @@ -150,7 +150,7 @@ func md5HashAsMetricValue(data []byte) float64 { sum := md5.Sum(data) // We only want 48 bits as a float64 only has a 53 bit mantissa. smallSum := sum[0:6] - var bytes = make([]byte, 8) + bytes := make([]byte, 8) copy(bytes, smallSum) return float64(binary.LittleEndian.Uint64(bytes)) } diff --git a/vendor/github.com/prometheus/alertmanager/config/notifiers.go b/vendor/github.com/prometheus/alertmanager/config/notifiers.go index a2cadf9e6..e0abdcd57 100644 --- a/vendor/github.com/prometheus/alertmanager/config/notifiers.go +++ b/vendor/github.com/prometheus/alertmanager/config/notifiers.go @@ -15,12 +15,13 @@ package config import ( "fmt" + "net/textproto" "regexp" "strings" + "text/template" "time" "github.com/pkg/errors" - commoncfg "github.com/prometheus/common/config" "github.com/prometheus/common/sigv4" ) @@ -33,6 +34,23 @@ var ( }, } + // DefaultWebexConfig defines default values for Webex configurations. + DefaultWebexConfig = WebexConfig{ + NotifierConfig: NotifierConfig{ + VSendResolved: true, + }, + Message: `{{ template "webex.default.message" . }}`, + } + + // DefaultDiscordConfig defines default values for Discord configurations. + DefaultDiscordConfig = DiscordConfig{ + NotifierConfig: NotifierConfig{ + VSendResolved: true, + }, + Title: `{{ template "discord.default.title" . }}`, + Message: `{{ template "discord.default.message" . }}`, + } + // DefaultEmailConfig defines default values for Email configurations. DefaultEmailConfig = EmailConfig{ NotifierConfig: NotifierConfig{ @@ -144,7 +162,7 @@ var ( }, DisableNotifications: false, Message: `{{ template "telegram.default.message" . }}`, - ParseMode: "MarkdownV2", + ParseMode: "HTML", } ) @@ -157,24 +175,72 @@ func (nc *NotifierConfig) SendResolved() bool { return nc.VSendResolved } +// WebexConfig configures notifications via Webex. +type WebexConfig struct { + NotifierConfig `yaml:",inline" json:",inline"` + HTTPConfig *commoncfg.HTTPClientConfig `yaml:"http_config,omitempty" json:"http_config,omitempty"` + APIURL *URL `yaml:"api_url,omitempty" json:"api_url,omitempty"` + + Message string `yaml:"message,omitempty" json:"message,omitempty"` + RoomID string `yaml:"room_id" json:"room_id"` +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *WebexConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + *c = DefaultWebexConfig + type plain WebexConfig + if err := unmarshal((*plain)(c)); err != nil { + return err + } + + if c.RoomID == "" { + return fmt.Errorf("missing room_id on webex_config") + } + + if c.HTTPConfig == nil || c.HTTPConfig.Authorization == nil { + return fmt.Errorf("missing webex_configs.http_config.authorization") + } + + return nil +} + +// DiscordConfig configures notifications via Discord. +type DiscordConfig struct { + NotifierConfig `yaml:",inline" json:",inline"` + + HTTPConfig *commoncfg.HTTPClientConfig `yaml:"http_config,omitempty" json:"http_config,omitempty"` + WebhookURL *SecretURL `yaml:"webhook_url,omitempty" json:"webhook_url,omitempty"` + + Title string `yaml:"title,omitempty" json:"title,omitempty"` + Message string `yaml:"message,omitempty" json:"message,omitempty"` +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *DiscordConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + *c = DefaultDiscordConfig + type plain DiscordConfig + return unmarshal((*plain)(c)) +} + // EmailConfig configures notifications via mail. type EmailConfig struct { NotifierConfig `yaml:",inline" json:",inline"` // Email address to notify. - To string `yaml:"to,omitempty" json:"to,omitempty"` - From string `yaml:"from,omitempty" json:"from,omitempty"` - Hello string `yaml:"hello,omitempty" json:"hello,omitempty"` - Smarthost HostPort `yaml:"smarthost,omitempty" json:"smarthost,omitempty"` - AuthUsername string `yaml:"auth_username,omitempty" json:"auth_username,omitempty"` - AuthPassword Secret `yaml:"auth_password,omitempty" json:"auth_password,omitempty"` - AuthSecret Secret `yaml:"auth_secret,omitempty" json:"auth_secret,omitempty"` - AuthIdentity string `yaml:"auth_identity,omitempty" json:"auth_identity,omitempty"` - Headers map[string]string `yaml:"headers,omitempty" json:"headers,omitempty"` - HTML string `yaml:"html,omitempty" json:"html,omitempty"` - Text string `yaml:"text,omitempty" json:"text,omitempty"` - RequireTLS *bool `yaml:"require_tls,omitempty" json:"require_tls,omitempty"` - TLSConfig commoncfg.TLSConfig `yaml:"tls_config,omitempty" json:"tls_config,omitempty"` + To string `yaml:"to,omitempty" json:"to,omitempty"` + From string `yaml:"from,omitempty" json:"from,omitempty"` + Hello string `yaml:"hello,omitempty" json:"hello,omitempty"` + Smarthost HostPort `yaml:"smarthost,omitempty" json:"smarthost,omitempty"` + AuthUsername string `yaml:"auth_username,omitempty" json:"auth_username,omitempty"` + AuthPassword Secret `yaml:"auth_password,omitempty" json:"auth_password,omitempty"` + AuthPasswordFile string `yaml:"auth_password_file,omitempty" json:"auth_password_file,omitempty"` + AuthSecret Secret `yaml:"auth_secret,omitempty" json:"auth_secret,omitempty"` + AuthIdentity string `yaml:"auth_identity,omitempty" json:"auth_identity,omitempty"` + Headers map[string]string `yaml:"headers,omitempty" json:"headers,omitempty"` + HTML string `yaml:"html,omitempty" json:"html,omitempty"` + Text string `yaml:"text,omitempty" json:"text,omitempty"` + RequireTLS *bool `yaml:"require_tls,omitempty" json:"require_tls,omitempty"` + TLSConfig commoncfg.TLSConfig `yaml:"tls_config,omitempty" json:"tls_config,omitempty"` } // UnmarshalYAML implements the yaml.Unmarshaler interface. @@ -190,7 +256,7 @@ func (c *EmailConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { // Header names are case-insensitive, check for collisions. normalizedHeaders := map[string]string{} for h, v := range c.Headers { - normalized := strings.Title(h) + normalized := textproto.CanonicalMIMEHeaderKey(h) if _, ok := normalizedHeaders[normalized]; ok { return fmt.Errorf("duplicate header %q in email config", normalized) } @@ -207,19 +273,22 @@ type PagerdutyConfig struct { HTTPConfig *commoncfg.HTTPClientConfig `yaml:"http_config,omitempty" json:"http_config,omitempty"` - ServiceKey Secret `yaml:"service_key,omitempty" json:"service_key,omitempty"` - RoutingKey Secret `yaml:"routing_key,omitempty" json:"routing_key,omitempty"` - URL *URL `yaml:"url,omitempty" json:"url,omitempty"` - Client string `yaml:"client,omitempty" json:"client,omitempty"` - ClientURL string `yaml:"client_url,omitempty" json:"client_url,omitempty"` - Description string `yaml:"description,omitempty" json:"description,omitempty"` - Details map[string]string `yaml:"details,omitempty" json:"details,omitempty"` - Images []PagerdutyImage `yaml:"images,omitempty" json:"images,omitempty"` - Links []PagerdutyLink `yaml:"links,omitempty" json:"links,omitempty"` - Severity string `yaml:"severity,omitempty" json:"severity,omitempty"` - Class string `yaml:"class,omitempty" json:"class,omitempty"` - Component string `yaml:"component,omitempty" json:"component,omitempty"` - Group string `yaml:"group,omitempty" json:"group,omitempty"` + ServiceKey Secret `yaml:"service_key,omitempty" json:"service_key,omitempty"` + ServiceKeyFile string `yaml:"service_key_file,omitempty" json:"service_key_file,omitempty"` + RoutingKey Secret `yaml:"routing_key,omitempty" json:"routing_key,omitempty"` + RoutingKeyFile string `yaml:"routing_key_file,omitempty" json:"routing_key_file,omitempty"` + URL *URL `yaml:"url,omitempty" json:"url,omitempty"` + Client string `yaml:"client,omitempty" json:"client,omitempty"` + ClientURL string `yaml:"client_url,omitempty" json:"client_url,omitempty"` + Description string `yaml:"description,omitempty" json:"description,omitempty"` + Details map[string]string `yaml:"details,omitempty" json:"details,omitempty"` + Images []PagerdutyImage `yaml:"images,omitempty" json:"images,omitempty"` + Links []PagerdutyLink `yaml:"links,omitempty" json:"links,omitempty"` + Source string `yaml:"source,omitempty" json:"source,omitempty"` + Severity string `yaml:"severity,omitempty" json:"severity,omitempty"` + Class string `yaml:"class,omitempty" json:"class,omitempty"` + Component string `yaml:"component,omitempty" json:"component,omitempty"` + Group string `yaml:"group,omitempty" json:"group,omitempty"` } // PagerdutyLink is a link @@ -242,12 +311,21 @@ func (c *PagerdutyConfig) UnmarshalYAML(unmarshal func(interface{}) error) error if err := unmarshal((*plain)(c)); err != nil { return err } - if c.RoutingKey == "" && c.ServiceKey == "" { + if c.RoutingKey == "" && c.ServiceKey == "" && c.RoutingKeyFile == "" && c.ServiceKeyFile == "" { return fmt.Errorf("missing service or routing key in PagerDuty config") } + if len(c.RoutingKey) > 0 && len(c.RoutingKeyFile) > 0 { + return fmt.Errorf("at most one of routing_key & routing_key_file must be configured") + } + if len(c.ServiceKey) > 0 && len(c.ServiceKeyFile) > 0 { + return fmt.Errorf("at most one of service_key & service_key_file must be configured") + } if c.Details == nil { c.Details = make(map[string]string) } + if c.Source == "" { + c.Source = c.Client + } for k, v := range DefaultPagerdutyDetails { if _, ok := c.Details[k]; !ok { c.Details[k] = v @@ -452,7 +530,7 @@ func (c *WechatConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { } if !wechatTypeMatcher.MatchString(c.MessageType) { - return errors.Errorf("WeChat message type %q does not match valid options %s", c.MessageType, wechatValidTypesRe) + return errors.Errorf("weChat message type %q does not match valid options %s", c.MessageType, wechatValidTypesRe) } return nil @@ -492,18 +570,25 @@ func (c *OpsGenieConfig) UnmarshalYAML(unmarshal func(interface{}) error) error return err } - if c.APIURL != nil && len(c.APIKeyFile) > 0 { + if c.APIKey != "" && len(c.APIKeyFile) > 0 { return fmt.Errorf("at most one of api_key & api_key_file must be configured") } for _, r := range c.Responders { if r.ID == "" && r.Username == "" && r.Name == "" { - return errors.Errorf("OpsGenieConfig responder %v has to have at least one of id, username or name specified", r) + return errors.Errorf("opsGenieConfig responder %v has to have at least one of id, username or name specified", r) } - r.Type = strings.ToLower(r.Type) - if !opsgenieTypeMatcher.MatchString(r.Type) { - return errors.Errorf("OpsGenieConfig responder %v type does not match valid options %s", r, opsgenieValidTypesRe) + if strings.Contains(r.Type, "{{") { + _, err := template.New("").Parse(r.Type) + if err != nil { + return errors.Errorf("opsGenieConfig responder %v type is not a valid template: %v", r, err) + } + } else { + r.Type = strings.ToLower(r.Type) + if !opsgenieTypeMatcher.MatchString(r.Type) { + return errors.Errorf("opsGenieConfig responder %v type does not match valid options %s", r, opsgenieValidTypesRe) + } } } @@ -527,7 +612,7 @@ type VictorOpsConfig struct { HTTPConfig *commoncfg.HTTPClientConfig `yaml:"http_config,omitempty" json:"http_config,omitempty"` APIKey Secret `yaml:"api_key,omitempty" json:"api_key,omitempty"` - APIKeyFile Secret `yaml:"api_key_file,omitempty" json:"api_key_file,omitempty"` + APIKeyFile string `yaml:"api_key_file,omitempty" json:"api_key_file,omitempty"` APIURL *URL `yaml:"api_url" json:"api_url"` RoutingKey string `yaml:"routing_key" json:"routing_key"` MessageType string `yaml:"message_type" json:"message_type"` @@ -547,12 +632,15 @@ func (c *VictorOpsConfig) UnmarshalYAML(unmarshal func(interface{}) error) error if c.RoutingKey == "" { return fmt.Errorf("missing Routing key in VictorOps config") } + if c.APIKey != "" && len(c.APIKeyFile) > 0 { + return fmt.Errorf("at most one of api_key & api_key_file must be configured") + } reservedFields := []string{"routing_key", "message_type", "state_message", "entity_display_name", "monitoring_tool", "entity_id", "entity_state"} for _, v := range reservedFields { if _, ok := c.CustomFields[v]; ok { - return fmt.Errorf("VictorOps config contains custom field %s which cannot be used as it conflicts with the fixed/static fields", v) + return fmt.Errorf("victorOps config contains custom field %s which cannot be used as it conflicts with the fixed/static fields", v) } } @@ -662,9 +750,6 @@ func (c *TelegramConfig) UnmarshalYAML(unmarshal func(interface{}) error) error if c.ChatID == 0 { return fmt.Errorf("missing chat_id on telegram_config") } - if c.APIUrl == nil { - return fmt.Errorf("missing api_url on telegram_config") - } if c.ParseMode != "" && c.ParseMode != "Markdown" && c.ParseMode != "MarkdownV2" && diff --git a/vendor/github.com/prometheus/alertmanager/pkg/labels/parse.go b/vendor/github.com/prometheus/alertmanager/pkg/labels/parse.go index 0a242d506..a125d59d8 100644 --- a/vendor/github.com/prometheus/alertmanager/pkg/labels/parse.go +++ b/vendor/github.com/prometheus/alertmanager/pkg/labels/parse.go @@ -45,11 +45,12 @@ var ( // this comma and whitespace will be trimmed. // // Examples for valid input strings: -// {foo = "bar", dings != "bums", } -// foo=bar,dings!=bums -// foo=bar, dings!=bums -// {quote="She said: \"Hi, ladies! That's gender-neutral…\""} -// statuscode=~"5.." +// +// {foo = "bar", dings != "bums", } +// foo=bar,dings!=bums +// foo=bar, dings!=bums +// {quote="She said: \"Hi, ladies! That's gender-neutral…\""} +// statuscode=~"5.." // // See ParseMatcher for details on how an individual Matcher is parsed. func ParseMatchers(s string) ([]*Matcher, error) { @@ -127,7 +128,7 @@ func ParseMatcher(s string) (_ *Matcher, err error) { expectTrailingQuote bool ) - if rawValue[0] == '"' { + if strings.HasPrefix(rawValue, "\"") { rawValue = strings.TrimPrefix(rawValue, "\"") expectTrailingQuote = true } diff --git a/vendor/github.com/prometheus/alertmanager/template/default.tmpl b/vendor/github.com/prometheus/alertmanager/template/default.tmpl index 4ce5de708..82626b1a4 100644 --- a/vendor/github.com/prometheus/alertmanager/template/default.tmpl +++ b/vendor/github.com/prometheus/alertmanager/template/default.tmpl @@ -111,4 +111,27 @@ Alerts Firing: Alerts Resolved: {{ template "__text_alert_list" .Alerts.Resolved }} {{ end }} -{{ end }} \ No newline at end of file +{{ end }} + +{{ define "discord.default.title" }}{{ template "__subject" . }}{{ end }} +{{ define "discord.default.message" }} +{{ if gt (len .Alerts.Firing) 0 }} +Alerts Firing: +{{ template "__text_alert_list" .Alerts.Firing }} +{{ end }} +{{ if gt (len .Alerts.Resolved) 0 }} +Alerts Resolved: +{{ template "__text_alert_list" .Alerts.Resolved }} +{{ end }} +{{ end }} + +{{ define "webex.default.message" }}{{ .CommonAnnotations.SortedPairs.Values | join " " }} +{{ if gt (len .Alerts.Firing) 0 }} +Alerts Firing: +{{ template "__text_alert_list" .Alerts.Firing }} +{{ end }} +{{ if gt (len .Alerts.Resolved) 0 }} +Alerts Resolved: +{{ template "__text_alert_list" .Alerts.Resolved }} +{{ end }} +{{ end }} diff --git a/vendor/github.com/prometheus/alertmanager/template/email.html b/vendor/github.com/prometheus/alertmanager/template/email.html index 1f15ab8b4..4bd42f95b 100644 --- a/vendor/github.com/prometheus/alertmanager/template/email.html +++ b/vendor/github.com/prometheus/alertmanager/template/email.html @@ -325,13 +325,17 @@
{{ if gt (len .Alerts.Firing) 0 }} {{ else }} + {{ end }} {{ if gt (len .Alerts.Firing) 0 }} {{ else }} - + {{ end }}
+ {{ .Alerts | len }} alert{{ if gt (len .Alerts) 1 }}s{{ end }} for {{ range .GroupLabels.SortedPairs }} + {{ .Name }}={{ .Value }} + {{ end }} + - {{ end }} {{ .Alerts | len }} alert{{ if gt (len .Alerts) 1 }}s{{ end }} for {{ range .GroupLabels.SortedPairs }} {{ .Name }}={{ .Value }} {{ end }}
diff --git a/vendor/github.com/prometheus/alertmanager/template/email.tmpl b/vendor/github.com/prometheus/alertmanager/template/email.tmpl index 63a76525f..7477a304c 100644 --- a/vendor/github.com/prometheus/alertmanager/template/email.tmpl +++ b/vendor/github.com/prometheus/alertmanager/template/email.tmpl @@ -90,13 +90,17 @@ h4 {
+ {{ .Alerts | len }} alert{{ if gt (len .Alerts) 1 }}s{{ end }} for {{ range .GroupLabels.SortedPairs }} + {{ .Name }}={{ .Value }} + {{ end }} + - {{ end }} + {{ .Alerts | len }} alert{{ if gt (len .Alerts) 1 }}s{{ end }} for {{ range .GroupLabels.SortedPairs }} {{ .Name }}={{ .Value }} {{ end }}
diff --git a/vendor/github.com/prometheus/alertmanager/template/template.go b/vendor/github.com/prometheus/alertmanager/template/template.go index 2c1b62db3..3882187f3 100644 --- a/vendor/github.com/prometheus/alertmanager/template/template.go +++ b/vendor/github.com/prometheus/alertmanager/template/template.go @@ -16,7 +16,7 @@ package template import ( "bytes" tmplhtml "html/template" - "io/ioutil" + "io" "net/url" "path" "path/filepath" @@ -27,6 +27,8 @@ import ( "time" "github.com/prometheus/common/model" + "golang.org/x/text/cases" + "golang.org/x/text/language" "github.com/prometheus/alertmanager/asset" "github.com/prometheus/alertmanager/types" @@ -59,7 +61,7 @@ func FromGlobs(paths ...string) (*Template, error) { return nil, err } defer f.Close() - b, err := ioutil.ReadAll(f) + b, err := io.ReadAll(f) if err != nil { return nil, err } @@ -132,7 +134,7 @@ type FuncMap map[string]interface{} var DefaultFuncs = FuncMap{ "toUpper": strings.ToUpper, "toLower": strings.ToLower, - "title": strings.Title, + "title": cases.Title(language.AmericanEnglish).String, // join is equal to strings.Join but inverts the argument order // for easier pipelining in templates. "join": func(sep string, s []string) string { diff --git a/vendor/github.com/prometheus/alertmanager/timeinterval/timeinterval.go b/vendor/github.com/prometheus/alertmanager/timeinterval/timeinterval.go index 24c4a4ea0..a5018aaef 100644 --- a/vendor/github.com/prometheus/alertmanager/timeinterval/timeinterval.go +++ b/vendor/github.com/prometheus/alertmanager/timeinterval/timeinterval.go @@ -17,7 +17,9 @@ import ( "encoding/json" "errors" "fmt" + "os" "regexp" + "runtime" "strconv" "strings" "time" @@ -33,6 +35,7 @@ type TimeInterval struct { DaysOfMonth []DayOfMonthRange `yaml:"days_of_month,flow,omitempty" json:"days_of_month,omitempty"` Months []MonthRange `yaml:"months,flow,omitempty" json:"months,omitempty"` Years []YearRange `yaml:"years,flow,omitempty" json:"years,omitempty"` + Location *Location `yaml:"location,flow,omitempty" json:"location,omitempty"` } // TimeRange represents a range of minutes within a 1440 minute day, exclusive of the End minute. A day consists of 1440 minutes. @@ -68,6 +71,11 @@ type YearRange struct { InclusiveRange } +// A Location is a container for a time.Location, used for custom unmarshalling/validation logic. +type Location struct { + *time.Location +} + type yamlTimeRange struct { StartTime string `yaml:"start_time" json:"start_time"` EndTime string `yaml:"end_time" json:"end_time"` @@ -125,6 +133,7 @@ var daysOfWeek = map[string]int{ "friday": 5, "saturday": 6, } + var daysOfWeekInv = map[int]string{ 0: "sunday", 1: "monday", @@ -165,6 +174,34 @@ var monthsInv = map[int]string{ 12: "december", } +// UnmarshalYAML implements the Unmarshaller interface for Location. +func (tz *Location) UnmarshalYAML(unmarshal func(interface{}) error) error { + var str string + if err := unmarshal(&str); err != nil { + return err + } + + loc, err := time.LoadLocation(str) + if err != nil { + if runtime.GOOS == "windows" { + if zoneinfo := os.Getenv("ZONEINFO"); zoneinfo != "" { + return fmt.Errorf("%w (ZONEINFO=%q)", err, zoneinfo) + } + return fmt.Errorf("%w (on Windows platforms, you may have to pass the time zone database using the ZONEINFO environment variable, see https://pkg.go.dev/time#LoadLocation for details)", err) + } + return err + } + + *tz = Location{loc} + return nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface for Location. +// It delegates to the YAML unmarshaller as it can parse JSON and has validation logic. +func (tz *Location) UnmarshalJSON(in []byte) error { + return yaml.Unmarshal(in, tz) +} + // UnmarshalYAML implements the Unmarshaller interface for WeekdayRange. func (r *WeekdayRange) UnmarshalYAML(unmarshal func(interface{}) error) error { var str string @@ -316,7 +353,7 @@ func (r WeekdayRange) MarshalYAML() (interface{}, error) { } // MarshalText implements the econding.TextMarshaler interface for WeekdayRange. -// It converts the range into a colon-seperated string, or a single weekday if possible. +// It converts the range into a colon-separated string, or a single weekday if possible. // e.g. "monday:friday" or "saturday". func (r WeekdayRange) MarshalText() ([]byte, error) { beginStr, ok := daysOfWeekInv[r.Begin] @@ -362,6 +399,26 @@ func (tr TimeRange) MarshalJSON() (out []byte, err error) { return json.Marshal(yTr) } +// MarshalText implements the econding.TextMarshaler interface for Location. +// It marshals a Location back into a string that represents a time.Location. +func (tz Location) MarshalText() ([]byte, error) { + if tz.Location == nil { + return nil, fmt.Errorf("unable to convert nil location into string") + } + return []byte(tz.Location.String()), nil +} + +// MarshalYAML implements the yaml.Marshaler interface for Location. +func (tz Location) MarshalYAML() (interface{}, error) { + bytes, err := tz.MarshalText() + return string(bytes), err +} + +// MarshalJSON implements the json.Marshaler interface for Location. +func (tz Location) MarshalJSON() (out []byte, err error) { + return json.Marshal(tz.String()) +} + // MarshalText implements the encoding.TextMarshaler interface for InclusiveRange. // It converts the struct into a colon-separated string, or a single element if // appropriate. e.g. "monday:friday" or "monday" @@ -373,7 +430,7 @@ func (ir InclusiveRange) MarshalText() ([]byte, error) { return []byte(out), nil } -//MarshalYAML implements the yaml.Marshaler interface for InclusiveRange. +// MarshalYAML implements the yaml.Marshaler interface for InclusiveRange. func (ir InclusiveRange) MarshalYAML() (interface{}, error) { bytes, err := ir.MarshalText() return string(bytes), err @@ -382,8 +439,10 @@ func (ir InclusiveRange) MarshalYAML() (interface{}, error) { // TimeLayout specifies the layout to be used in time.Parse() calls for time intervals. const TimeLayout = "15:04" -var validTime string = "^((([01][0-9])|(2[0-3])):[0-5][0-9])$|(^24:00$)" -var validTimeRE *regexp.Regexp = regexp.MustCompile(validTime) +var ( + validTime = "^((([01][0-9])|(2[0-3])):[0-5][0-9])$|(^24:00$)" + validTimeRE = regexp.MustCompile(validTime) +) // Given a time, determines the number of days in the month that time occurs in. func daysInMonth(t time.Time) int { @@ -405,6 +464,9 @@ func clamp(n, min, max int) int { // ContainsTime returns true if the TimeInterval contains the given time, otherwise returns false. func (tp TimeInterval) ContainsTime(t time.Time) bool { + if tp.Location != nil { + t = t.In(tp.Location.Location) + } if tp.Times != nil { in := false for _, validMinutes := range tp.Times { diff --git a/vendor/github.com/prometheus/alertmanager/types/types.go b/vendor/github.com/prometheus/alertmanager/types/types.go index a412453d3..f3101f823 100644 --- a/vendor/github.com/prometheus/alertmanager/types/types.go +++ b/vendor/github.com/prometheus/alertmanager/types/types.go @@ -18,9 +18,10 @@ import ( "sync" "time" - "github.com/prometheus/alertmanager/pkg/labels" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + + "github.com/prometheus/alertmanager/pkg/labels" ) // AlertState is used as part of AlertStatus. @@ -52,18 +53,18 @@ type AlertStatus struct { // Marker helps to mark alerts as silenced and/or inhibited. // All methods are goroutine-safe. type Marker interface { - // SetSilenced replaces the previous SilencedBy by the provided IDs of + // SetActiveOrSilenced replaces the previous SilencedBy by the provided IDs of // active and pending silences, including the version number of the // silences state. The set of provided IDs is supposed to represent the // complete set of relevant silences. If no active silence IDs are provided and // InhibitedBy is already empty, it sets the provided alert to AlertStateActive. // Otherwise, it sets the provided alert to AlertStateSuppressed. - SetSilenced(alert model.Fingerprint, version int, activeSilenceIDs []string, pendingSilenceIDs []string) + SetActiveOrSilenced(alert model.Fingerprint, version int, activeSilenceIDs, pendingSilenceIDs []string) // SetInhibited replaces the previous InhibitedBy by the provided IDs of - // alerts. In contrast to SetSilenced, the set of provided IDs is not + // alerts. In contrast to SetActiveOrSilenced, the set of provided IDs is not // expected to represent the complete set of inhibiting alerts. (In // practice, this method is only called with one or zero IDs. However, - // this expectation might change in the future.) If no IDs are provided + // this expectation might change in the future. If no IDs are provided // and InhibitedBy is already empty, it sets the provided alert to // AlertStateActive. Otherwise, it sets the provided alert to // AlertStateSuppressed. @@ -85,7 +86,7 @@ type Marker interface { // result is based on. Unprocessed(model.Fingerprint) bool Active(model.Fingerprint) bool - Silenced(model.Fingerprint) (activeIDs []string, pendingIDs []string, version int, silenced bool) + Silenced(model.Fingerprint) (activeIDs, pendingIDs []string, version int, silenced bool) Inhibited(model.Fingerprint) ([]string, bool) } @@ -107,11 +108,11 @@ type memMarker struct { } func (m *memMarker) registerMetrics(r prometheus.Registerer) { - newAlertMetricByState := func(st AlertState) prometheus.GaugeFunc { + newMarkedAlertMetricByState := func(st AlertState) prometheus.GaugeFunc { return prometheus.NewGaugeFunc( prometheus.GaugeOpts{ - Name: "alertmanager_alerts", - Help: "How many alerts by state.", + Name: "alertmanager_marked_alerts", + Help: "How many alerts by state are currently marked in the Alertmanager regardless of their expiry.", ConstLabels: prometheus.Labels{"state": string(st)}, }, func() float64 { @@ -120,11 +121,13 @@ func (m *memMarker) registerMetrics(r prometheus.Registerer) { ) } - alertsActive := newAlertMetricByState(AlertStateActive) - alertsSuppressed := newAlertMetricByState(AlertStateSuppressed) + alertsActive := newMarkedAlertMetricByState(AlertStateActive) + alertsSuppressed := newMarkedAlertMetricByState(AlertStateSuppressed) + alertStateUnprocessed := newMarkedAlertMetricByState(AlertStateUnprocessed) r.MustRegister(alertsActive) r.MustRegister(alertsSuppressed) + r.MustRegister(alertStateUnprocessed) } // Count implements Marker. @@ -147,8 +150,8 @@ func (m *memMarker) Count(states ...AlertState) int { return count } -// SetSilenced implements Marker. -func (m *memMarker) SetSilenced(alert model.Fingerprint, version int, activeIDs []string, pendingIDs []string) { +// SetActiveOrSilenced implements Marker. +func (m *memMarker) SetActiveOrSilenced(alert model.Fingerprint, version int, activeIDs, pendingIDs []string) { m.mtx.Lock() defer m.mtx.Unlock() @@ -238,7 +241,7 @@ func (m *memMarker) Inhibited(alert model.Fingerprint) ([]string, bool) { // Silenced returns whether the alert for the given Fingerprint is in the // Silenced state, any associated silence IDs, and the silences state version // the result is based on. -func (m *memMarker) Silenced(alert model.Fingerprint) (activeIDs []string, pendingIDs []string, version int, silenced bool) { +func (m *memMarker) Silenced(alert model.Fingerprint) (activeIDs, pendingIDs []string, version int, silenced bool) { s := m.Status(alert) return s.SilencedBy, s.pendingSilences, s.silencesVersion, s.State == AlertStateSuppressed && len(s.SilencedBy) > 0 diff --git a/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go b/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go index 5f0ecef29..f74139c71 100644 --- a/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go +++ b/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go @@ -335,14 +335,15 @@ type RuleGroup struct { // that rules are returned in by the API. // // Rule types can be determined using a type switch: -// switch v := rule.(type) { -// case RecordingRule: -// fmt.Print("got a recording rule") -// case AlertingRule: -// fmt.Print("got a alerting rule") -// default: -// fmt.Printf("unknown rule type %s", v) -// } +// +// switch v := rule.(type) { +// case RecordingRule: +// fmt.Print("got a recording rule") +// case AlertingRule: +// fmt.Print("got a alerting rule") +// default: +// fmt.Printf("unknown rule type %s", v) +// } type Rules []interface{} // AlertingRule models a alerting rule. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go index de30de6da..a912b75a0 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go @@ -140,12 +140,13 @@ func (c *counter) get() float64 { } func (c *counter) Write(out *dto.Metric) error { - val := c.get() - + // Read the Exemplar first and the value second. This is to avoid a race condition + // where users see an exemplar for a not-yet-existing observation. var exemplar *dto.Exemplar if e := c.exemplar.Load(); e != nil { exemplar = e.(*dto.Exemplar) } + val := c.get() return populateMetric(CounterValue, val, c.labelPairs, exemplar, out) } @@ -245,7 +246,8 @@ func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) { // WithLabelValues works as GetMetricWithLabelValues, but panics where // GetMetricWithLabelValues would have returned an error. Not returning an // error allows shortcuts like -// myVec.WithLabelValues("404", "GET").Add(42) +// +// myVec.WithLabelValues("404", "GET").Add(42) func (v *CounterVec) WithLabelValues(lvs ...string) Counter { c, err := v.GetMetricWithLabelValues(lvs...) if err != nil { @@ -256,7 +258,8 @@ func (v *CounterVec) WithLabelValues(lvs ...string) Counter { // With works as GetMetricWith, but panics where GetMetricWithLabels would have // returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) +// +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) func (v *CounterVec) With(labels Labels) Counter { c, err := v.GetMetricWith(labels) if err != nil { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go index 98450125d..811072cbd 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go @@ -21,55 +21,66 @@ // All exported functions and methods are safe to be used concurrently unless // specified otherwise. // -// A Basic Example +// # A Basic Example // // As a starting point, a very basic usage example: // -// package main -// -// import ( -// "log" -// "net/http" -// -// "github.com/prometheus/client_golang/prometheus" -// "github.com/prometheus/client_golang/prometheus/promhttp" -// ) -// -// var ( -// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{ -// Name: "cpu_temperature_celsius", -// Help: "Current temperature of the CPU.", -// }) -// hdFailures = prometheus.NewCounterVec( -// prometheus.CounterOpts{ -// Name: "hd_errors_total", -// Help: "Number of hard-disk errors.", -// }, -// []string{"device"}, -// ) -// ) -// -// func init() { -// // Metrics have to be registered to be exposed: -// prometheus.MustRegister(cpuTemp) -// prometheus.MustRegister(hdFailures) -// } -// -// func main() { -// cpuTemp.Set(65.3) -// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() -// -// // The Handler function provides a default handler to expose metrics -// // via an HTTP server. "/metrics" is the usual endpoint for that. -// http.Handle("/metrics", promhttp.Handler()) -// log.Fatal(http.ListenAndServe(":8080", nil)) -// } -// +// package main +// +// import ( +// "log" +// "net/http" +// +// "github.com/prometheus/client_golang/prometheus" +// "github.com/prometheus/client_golang/prometheus/promhttp" +// ) +// +// type metrics struct { +// cpuTemp prometheus.Gauge +// hdFailures *prometheus.CounterVec +// } +// +// func NewMetrics(reg prometheus.Registerer) *metrics { +// m := &metrics{ +// cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{ +// Name: "cpu_temperature_celsius", +// Help: "Current temperature of the CPU.", +// }), +// hdFailures: prometheus.NewCounterVec( +// prometheus.CounterOpts{ +// Name: "hd_errors_total", +// Help: "Number of hard-disk errors.", +// }, +// []string{"device"}, +// ), +// } +// reg.MustRegister(m.cpuTemp) +// reg.MustRegister(m.hdFailures) +// return m +// } +// +// func main() { +// // Create a non-global registry. +// reg := prometheus.NewRegistry() +// +// // Create new metrics and register them using the custom registry. +// m := NewMetrics(reg) +// // Set values for the new created metrics. +// m.cpuTemp.Set(65.3) +// m.hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() +// +// // Expose metrics and custom registry via an HTTP server +// // using the HandleFor function. "/metrics" is the usual endpoint for that. +// http.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg})) +// log.Fatal(http.ListenAndServe(":8080", nil)) +// } // // This is a complete program that exports two metrics, a Gauge and a Counter, // the latter with a label attached to turn it into a (one-dimensional) vector. +// It register the metrics using a custom registry and exposes them via an HTTP server +// on the /metrics endpoint. // -// Metrics +// # Metrics // // The number of exported identifiers in this package might appear a bit // overwhelming. However, in addition to the basic plumbing shown in the example @@ -100,7 +111,7 @@ // To create instances of Metrics and their vector versions, you need a suitable // …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts. // -// Custom Collectors and constant Metrics +// # Custom Collectors and constant Metrics // // While you could create your own implementations of Metric, most likely you // will only ever implement the Collector interface on your own. At a first @@ -141,7 +152,7 @@ // a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting // shortcuts. // -// Advanced Uses of the Registry +// # Advanced Uses of the Registry // // While MustRegister is the by far most common way of registering a Collector, // sometimes you might want to handle the errors the registration might cause. @@ -176,23 +187,23 @@ // NewProcessCollector). With a custom registry, you are in control and decide // yourself about the Collectors to register. // -// HTTP Exposition +// # HTTP Exposition // // The Registry implements the Gatherer interface. The caller of the Gather // method can then expose the gathered metrics in some way. Usually, the metrics // are served via HTTP on the /metrics endpoint. That's happening in the example // above. The tools to expose metrics via HTTP are in the promhttp sub-package. // -// Pushing to the Pushgateway +// # Pushing to the Pushgateway // // Function for pushing to the Pushgateway can be found in the push sub-package. // -// Graphite Bridge +// # Graphite Bridge // // Functions and examples to push metrics from a Gatherer to Graphite can be // found in the graphite sub-package. // -// Other Means of Exposition +// # Other Means of Exposition // // More ways of exposing metrics can easily be added by following the approaches // of the existing implementations. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go index bd0733d6a..21271a5bb 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go @@ -210,7 +210,8 @@ func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { // WithLabelValues works as GetMetricWithLabelValues, but panics where // GetMetricWithLabelValues would have returned an error. Not returning an // error allows shortcuts like -// myVec.WithLabelValues("404", "GET").Add(42) +// +// myVec.WithLabelValues("404", "GET").Add(42) func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge { g, err := v.GetMetricWithLabelValues(lvs...) if err != nil { @@ -221,7 +222,8 @@ func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge { // With works as GetMetricWith, but panics where GetMetricWithLabels would have // returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) +// +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) func (v *GaugeVec) With(labels Labels) Gauge { g, err := v.GetMetricWith(labels) if err != nil { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index 0d47fecdc..4c873a01c 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -28,19 +28,216 @@ import ( dto "github.com/prometheus/client_model/go" ) +// nativeHistogramBounds for the frac of observed values. Only relevant for +// schema > 0. The position in the slice is the schema. (0 is never used, just +// here for convenience of using the schema directly as the index.) +// +// TODO(beorn7): Currently, we do a binary search into these slices. There are +// ways to turn it into a small number of simple array lookups. It probably only +// matters for schema 5 and beyond, but should be investigated. See this comment +// as a starting point: +// https://github.com/open-telemetry/opentelemetry-specification/issues/1776#issuecomment-870164310 +var nativeHistogramBounds = [][]float64{ + // Schema "0": + {0.5}, + // Schema 1: + {0.5, 0.7071067811865475}, + // Schema 2: + {0.5, 0.5946035575013605, 0.7071067811865475, 0.8408964152537144}, + // Schema 3: + { + 0.5, 0.5452538663326288, 0.5946035575013605, 0.6484197773255048, + 0.7071067811865475, 0.7711054127039704, 0.8408964152537144, 0.9170040432046711, + }, + // Schema 4: + { + 0.5, 0.5221368912137069, 0.5452538663326288, 0.5693943173783458, + 0.5946035575013605, 0.620928906036742, 0.6484197773255048, 0.6771277734684463, + 0.7071067811865475, 0.7384130729697496, 0.7711054127039704, 0.805245165974627, + 0.8408964152537144, 0.8781260801866495, 0.9170040432046711, 0.9576032806985735, + }, + // Schema 5: + { + 0.5, 0.5109485743270583, 0.5221368912137069, 0.5335702003384117, + 0.5452538663326288, 0.5571933712979462, 0.5693943173783458, 0.5818624293887887, + 0.5946035575013605, 0.6076236799902344, 0.620928906036742, 0.6345254785958666, + 0.6484197773255048, 0.6626183215798706, 0.6771277734684463, 0.6919549409819159, + 0.7071067811865475, 0.7225904034885232, 0.7384130729697496, 0.7545822137967112, + 0.7711054127039704, 0.7879904225539431, 0.805245165974627, 0.8228777390769823, + 0.8408964152537144, 0.8593096490612387, 0.8781260801866495, 0.8973545375015533, + 0.9170040432046711, 0.9370838170551498, 0.9576032806985735, 0.9785720620876999, + }, + // Schema 6: + { + 0.5, 0.5054446430258502, 0.5109485743270583, 0.5165124395106142, + 0.5221368912137069, 0.5278225891802786, 0.5335702003384117, 0.5393803988785598, + 0.5452538663326288, 0.5511912916539204, 0.5571933712979462, 0.5632608093041209, + 0.5693943173783458, 0.5755946149764913, 0.5818624293887887, 0.5881984958251406, + 0.5946035575013605, 0.6010783657263515, 0.6076236799902344, 0.6142402680534349, + 0.620928906036742, 0.6276903785123455, 0.6345254785958666, 0.6414350080393891, + 0.6484197773255048, 0.6554806057623822, 0.6626183215798706, 0.6698337620266515, + 0.6771277734684463, 0.6845012114872953, 0.6919549409819159, 0.6994898362691555, + 0.7071067811865475, 0.7148066691959849, 0.7225904034885232, 0.7304588970903234, + 0.7384130729697496, 0.7464538641456323, 0.7545822137967112, 0.762799075372269, + 0.7711054127039704, 0.7795022001189185, 0.7879904225539431, 0.7965710756711334, + 0.805245165974627, 0.8140137109286738, 0.8228777390769823, 0.8318382901633681, + 0.8408964152537144, 0.8500531768592616, 0.8593096490612387, 0.8686669176368529, + 0.8781260801866495, 0.8876882462632604, 0.8973545375015533, 0.9071260877501991, + 0.9170040432046711, 0.9269895625416926, 0.9370838170551498, 0.9472879907934827, + 0.9576032806985735, 0.9680308967461471, 0.9785720620876999, 0.9892280131939752, + }, + // Schema 7: + { + 0.5, 0.5027149505564014, 0.5054446430258502, 0.5081891574554764, + 0.5109485743270583, 0.5137229745593818, 0.5165124395106142, 0.5193170509806894, + 0.5221368912137069, 0.5249720429003435, 0.5278225891802786, 0.5306886136446309, + 0.5335702003384117, 0.5364674337629877, 0.5393803988785598, 0.5423091811066545, + 0.5452538663326288, 0.5482145409081883, 0.5511912916539204, 0.5541842058618393, + 0.5571933712979462, 0.5602188762048033, 0.5632608093041209, 0.5663192597993595, + 0.5693943173783458, 0.572486072215902, 0.5755946149764913, 0.5787200368168754, + 0.5818624293887887, 0.585021884841625, 0.5881984958251406, 0.5913923554921704, + 0.5946035575013605, 0.5978321960199137, 0.6010783657263515, 0.6043421618132907, + 0.6076236799902344, 0.6109230164863786, 0.6142402680534349, 0.6175755319684665, + 0.620928906036742, 0.6243004885946023, 0.6276903785123455, 0.6310986751971253, + 0.6345254785958666, 0.637970889198196, 0.6414350080393891, 0.6449179367033329, + 0.6484197773255048, 0.6519406325959679, 0.6554806057623822, 0.659039800633032, + 0.6626183215798706, 0.6662162735415805, 0.6698337620266515, 0.6734708931164728, + 0.6771277734684463, 0.6808045103191123, 0.6845012114872953, 0.688217985377265, + 0.6919549409819159, 0.6957121878859629, 0.6994898362691555, 0.7032879969095076, + 0.7071067811865475, 0.7109463010845827, 0.7148066691959849, 0.718687998724491, + 0.7225904034885232, 0.7265139979245261, 0.7304588970903234, 0.7344252166684908, + 0.7384130729697496, 0.7424225829363761, 0.7464538641456323, 0.7505070348132126, + 0.7545822137967112, 0.7586795205991071, 0.762799075372269, 0.7669409989204777, + 0.7711054127039704, 0.7752924388424999, 0.7795022001189185, 0.7837348199827764, + 0.7879904225539431, 0.7922691326262467, 0.7965710756711334, 0.8008963778413465, + 0.805245165974627, 0.8096175675974316, 0.8140137109286738, 0.8184337248834821, + 0.8228777390769823, 0.8273458838280969, 0.8318382901633681, 0.8363550898207981, + 0.8408964152537144, 0.8454623996346523, 0.8500531768592616, 0.8546688815502312, + 0.8593096490612387, 0.8639756154809185, 0.8686669176368529, 0.8733836930995842, + 0.8781260801866495, 0.8828942179666361, 0.8876882462632604, 0.8925083056594671, + 0.8973545375015533, 0.9022270839033115, 0.9071260877501991, 0.9120516927035263, + 0.9170040432046711, 0.9219832844793128, 0.9269895625416926, 0.9320230241988943, + 0.9370838170551498, 0.9421720895161669, 0.9472879907934827, 0.9524316709088368, + 0.9576032806985735, 0.9628029718180622, 0.9680308967461471, 0.9732872087896164, + 0.9785720620876999, 0.9838856116165875, 0.9892280131939752, 0.9945994234836328, + }, + // Schema 8: + { + 0.5, 0.5013556375251013, 0.5027149505564014, 0.5040779490592088, + 0.5054446430258502, 0.5068150424757447, 0.5081891574554764, 0.509566998038869, + 0.5109485743270583, 0.5123338964485679, 0.5137229745593818, 0.5151158188430205, + 0.5165124395106142, 0.5179128468009786, 0.5193170509806894, 0.520725062344158, + 0.5221368912137069, 0.5235525479396449, 0.5249720429003435, 0.526395386502313, + 0.5278225891802786, 0.5292536613972564, 0.5306886136446309, 0.5321274564422321, + 0.5335702003384117, 0.5350168559101208, 0.5364674337629877, 0.5379219445313954, + 0.5393803988785598, 0.5408428074966075, 0.5423091811066545, 0.5437795304588847, + 0.5452538663326288, 0.5467321995364429, 0.5482145409081883, 0.549700901315111, + 0.5511912916539204, 0.5526857228508706, 0.5541842058618393, 0.5556867516724088, + 0.5571933712979462, 0.5587040757836845, 0.5602188762048033, 0.5617377836665098, + 0.5632608093041209, 0.564787964283144, 0.5663192597993595, 0.5678547070789026, + 0.5693943173783458, 0.5709381019847808, 0.572486072215902, 0.5740382394200894, + 0.5755946149764913, 0.5771552102951081, 0.5787200368168754, 0.5802891060137493, + 0.5818624293887887, 0.5834400184762408, 0.585021884841625, 0.5866080400818185, + 0.5881984958251406, 0.5897932637314379, 0.5913923554921704, 0.5929957828304968, + 0.5946035575013605, 0.5962156912915756, 0.5978321960199137, 0.5994530835371903, + 0.6010783657263515, 0.6027080545025619, 0.6043421618132907, 0.6059806996384005, + 0.6076236799902344, 0.6092711149137041, 0.6109230164863786, 0.6125793968185725, + 0.6142402680534349, 0.6159056423670379, 0.6175755319684665, 0.6192499490999082, + 0.620928906036742, 0.622612415087629, 0.6243004885946023, 0.6259931389331581, + 0.6276903785123455, 0.6293922197748583, 0.6310986751971253, 0.6328097572894031, + 0.6345254785958666, 0.6362458516947014, 0.637970889198196, 0.6397006037528346, + 0.6414350080393891, 0.6431741147730128, 0.6449179367033329, 0.6466664866145447, + 0.6484197773255048, 0.6501778216898253, 0.6519406325959679, 0.6537082229673385, + 0.6554806057623822, 0.6572577939746774, 0.659039800633032, 0.6608266388015788, + 0.6626183215798706, 0.6644148621029772, 0.6662162735415805, 0.6680225691020727, + 0.6698337620266515, 0.6716498655934177, 0.6734708931164728, 0.6752968579460171, + 0.6771277734684463, 0.6789636531064505, 0.6808045103191123, 0.6826503586020058, + 0.6845012114872953, 0.6863570825438342, 0.688217985377265, 0.690083933630119, + 0.6919549409819159, 0.6938310211492645, 0.6957121878859629, 0.6975984549830999, + 0.6994898362691555, 0.7013863456101023, 0.7032879969095076, 0.7051948041086352, + 0.7071067811865475, 0.7090239421602076, 0.7109463010845827, 0.7128738720527471, + 0.7148066691959849, 0.7167447066838943, 0.718687998724491, 0.7206365595643126, + 0.7225904034885232, 0.7245495448210174, 0.7265139979245261, 0.7284837772007218, + 0.7304588970903234, 0.7324393720732029, 0.7344252166684908, 0.7364164454346837, + 0.7384130729697496, 0.7404151139112358, 0.7424225829363761, 0.7444354947621984, + 0.7464538641456323, 0.7484777058836176, 0.7505070348132126, 0.7525418658117031, + 0.7545822137967112, 0.7566280937263048, 0.7586795205991071, 0.7607365094544071, + 0.762799075372269, 0.7648672334736434, 0.7669409989204777, 0.7690203869158282, + 0.7711054127039704, 0.7731960915705107, 0.7752924388424999, 0.7773944698885442, + 0.7795022001189185, 0.7816156449856788, 0.7837348199827764, 0.7858597406461707, + 0.7879904225539431, 0.7901268813264122, 0.7922691326262467, 0.7944171921585818, + 0.7965710756711334, 0.7987307989543135, 0.8008963778413465, 0.8030678282083853, + 0.805245165974627, 0.8074284071024302, 0.8096175675974316, 0.8118126635086642, + 0.8140137109286738, 0.8162207259936375, 0.8184337248834821, 0.820652723822003, + 0.8228777390769823, 0.8251087869603088, 0.8273458838280969, 0.8295890460808079, + 0.8318382901633681, 0.8340936325652911, 0.8363550898207981, 0.8386226785089391, + 0.8408964152537144, 0.8431763167241966, 0.8454623996346523, 0.8477546807446661, + 0.8500531768592616, 0.8523579048290255, 0.8546688815502312, 0.8569861239649629, + 0.8593096490612387, 0.8616394738731368, 0.8639756154809185, 0.8663180910111553, + 0.8686669176368529, 0.871022112577578, 0.8733836930995842, 0.8757516765159389, + 0.8781260801866495, 0.8805069215187917, 0.8828942179666361, 0.8852879870317771, + 0.8876882462632604, 0.890095013257712, 0.8925083056594671, 0.8949281411607002, + 0.8973545375015533, 0.8997875124702672, 0.9022270839033115, 0.9046732696855155, + 0.9071260877501991, 0.909585556079304, 0.9120516927035263, 0.9145245157024483, + 0.9170040432046711, 0.9194902933879467, 0.9219832844793128, 0.9244830347552253, + 0.9269895625416926, 0.92950288621441, 0.9320230241988943, 0.9345499949706191, + 0.9370838170551498, 0.93962450902828, 0.9421720895161669, 0.9447265771954693, + 0.9472879907934827, 0.9498563490882775, 0.9524316709088368, 0.9550139751351947, + 0.9576032806985735, 0.9601996065815236, 0.9628029718180622, 0.9654133954938133, + 0.9680308967461471, 0.9706554947643201, 0.9732872087896164, 0.9759260581154889, + 0.9785720620876999, 0.9812252401044634, 0.9838856116165875, 0.9865531961276168, + 0.9892280131939752, 0.9919100824251095, 0.9945994234836328, 0.9972960560854698, + }, +} + +// The nativeHistogramBounds above can be generated with the code below. +// +// TODO(beorn7): It's tempting to actually use `go generate` to generate the +// code above. However, this could lead to slightly different numbers on +// different architectures. We still need to come to terms if we are fine with +// that, or if we might prefer to specify precise numbers in the standard. +// +// var nativeHistogramBounds [][]float64 = make([][]float64, 9) +// +// func init() { +// // Populate nativeHistogramBounds. +// numBuckets := 1 +// for i := range nativeHistogramBounds { +// bounds := []float64{0.5} +// factor := math.Exp2(math.Exp2(float64(-i))) +// for j := 0; j < numBuckets-1; j++ { +// var bound float64 +// if (j+1)%2 == 0 { +// // Use previously calculated value for increased precision. +// bound = nativeHistogramBounds[i-1][j/2+1] +// } else { +// bound = bounds[j] * factor +// } +// bounds = append(bounds, bound) +// } +// numBuckets *= 2 +// nativeHistogramBounds[i] = bounds +// } +// } + // A Histogram counts individual observations from an event or sample stream in -// configurable buckets. Similar to a summary, it also provides a sum of -// observations and an observation count. +// configurable static buckets (or in dynamic sparse buckets as part of the +// experimental Native Histograms, see below for more details). Similar to a +// Summary, it also provides a sum of observations and an observation count. // // On the Prometheus server, quantiles can be calculated from a Histogram using -// the histogram_quantile function in the query language. +// the histogram_quantile PromQL function. +// +// Note that Histograms, in contrast to Summaries, can be aggregated in PromQL +// (see the documentation for detailed procedures). However, Histograms require +// the user to pre-define suitable buckets, and they are in general less +// accurate. (Both problems are addressed by the experimental Native +// Histograms. To use them, configure a NativeHistogramBucketFactor in the +// HistogramOpts. They also require a Prometheus server v2.40+ with the +// corresponding feature flag enabled.) // -// Note that Histograms, in contrast to Summaries, can be aggregated with the -// Prometheus query language (see the documentation for detailed -// procedures). However, Histograms require the user to pre-define suitable -// buckets, and they are in general less accurate. The Observe method of a -// Histogram has a very low performance overhead in comparison with the Observe -// method of a Summary. +// The Observe method of a Histogram has a very low performance overhead in +// comparison with the Observe method of a Summary. // // To create Histogram instances, use NewHistogram. type Histogram interface { @@ -50,7 +247,8 @@ type Histogram interface { // Observe adds a single observation to the histogram. Observations are // usually positive or zero. Negative observations are accepted but // prevent current versions of Prometheus from properly detecting - // counter resets in the sum of observations. See + // counter resets in the sum of observations. (The experimental Native + // Histograms handle negative observations properly.) See // https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations // for details. Observe(float64) @@ -64,18 +262,28 @@ const bucketLabel = "le" // tailored to broadly measure the response time (in seconds) of a network // service. Most likely, however, you will be required to define buckets // customized to your use case. -var ( - DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} +var DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} - errBucketLabelNotAllowed = fmt.Errorf( - "%q is not allowed as label name in histograms", bucketLabel, - ) +// DefNativeHistogramZeroThreshold is the default value for +// NativeHistogramZeroThreshold in the HistogramOpts. +// +// The value is 2^-128 (or 0.5*2^-127 in the actual IEEE 754 representation), +// which is a bucket boundary at all possible resolutions. +const DefNativeHistogramZeroThreshold = 2.938735877055719e-39 + +// NativeHistogramZeroThresholdZero can be used as NativeHistogramZeroThreshold +// in the HistogramOpts to create a zero bucket of width zero, i.e. a zero +// bucket that only receives observations of precisely zero. +const NativeHistogramZeroThresholdZero = -1 + +var errBucketLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in histograms", bucketLabel, ) -// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest -// bucket has an upper bound of 'start'. The final +Inf bucket is not counted -// and not included in the returned slice. The returned slice is meant to be -// used for the Buckets field of HistogramOpts. +// LinearBuckets creates 'count' regular buckets, each 'width' wide, where the +// lowest bucket has an upper bound of 'start'. The final +Inf bucket is not +// counted and not included in the returned slice. The returned slice is meant +// to be used for the Buckets field of HistogramOpts. // // The function panics if 'count' is zero or negative. func LinearBuckets(start, width float64, count int) []float64 { @@ -90,11 +298,11 @@ func LinearBuckets(start, width float64, count int) []float64 { return buckets } -// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an -// upper bound of 'start' and each following bucket's upper bound is 'factor' -// times the previous bucket's upper bound. The final +Inf bucket is not counted -// and not included in the returned slice. The returned slice is meant to be -// used for the Buckets field of HistogramOpts. +// ExponentialBuckets creates 'count' regular buckets, where the lowest bucket +// has an upper bound of 'start' and each following bucket's upper bound is +// 'factor' times the previous bucket's upper bound. The final +Inf bucket is +// not counted and not included in the returned slice. The returned slice is +// meant to be used for the Buckets field of HistogramOpts. // // The function panics if 'count' is 0 or negative, if 'start' is 0 or negative, // or if 'factor' is less than or equal 1. @@ -180,8 +388,85 @@ type HistogramOpts struct { // element in the slice is the upper inclusive bound of a bucket. The // values must be sorted in strictly increasing order. There is no need // to add a highest bucket with +Inf bound, it will be added - // implicitly. The default value is DefBuckets. + // implicitly. If Buckets is left as nil or set to a slice of length + // zero, it is replaced by default buckets. The default buckets are + // DefBuckets if no buckets for a native histogram (see below) are used, + // otherwise the default is no buckets. (In other words, if you want to + // use both reguler buckets and buckets for a native histogram, you have + // to define the regular buckets here explicitly.) Buckets []float64 + + // If NativeHistogramBucketFactor is greater than one, so-called sparse + // buckets are used (in addition to the regular buckets, if defined + // above). A Histogram with sparse buckets will be ingested as a Native + // Histogram by a Prometheus server with that feature enabled (requires + // Prometheus v2.40+). Sparse buckets are exponential buckets covering + // the whole float64 range (with the exception of the “zero” bucket, see + // SparseBucketsZeroThreshold below). From any one bucket to the next, + // the width of the bucket grows by a constant + // factor. NativeHistogramBucketFactor provides an upper bound for this + // factor (exception see below). The smaller + // NativeHistogramBucketFactor, the more buckets will be used and thus + // the more costly the histogram will become. A generally good trade-off + // between cost and accuracy is a value of 1.1 (each bucket is at most + // 10% wider than the previous one), which will result in each power of + // two divided into 8 buckets (e.g. there will be 8 buckets between 1 + // and 2, same as between 2 and 4, and 4 and 8, etc.). + // + // Details about the actually used factor: The factor is calculated as + // 2^(2^n), where n is an integer number between (and including) -8 and + // 4. n is chosen so that the resulting factor is the largest that is + // still smaller or equal to NativeHistogramBucketFactor. Note that the + // smallest possible factor is therefore approx. 1.00271 (i.e. 2^(2^-8) + // ). If NativeHistogramBucketFactor is greater than 1 but smaller than + // 2^(2^-8), then the actually used factor is still 2^(2^-8) even though + // it is larger than the provided NativeHistogramBucketFactor. + // + // NOTE: Native Histograms are still an experimental feature. Their + // behavior might still change without a major version + // bump. Subsequently, all NativeHistogram... options here might still + // change their behavior or name (or might completely disappear) without + // a major version bump. + NativeHistogramBucketFactor float64 + // All observations with an absolute value of less or equal + // NativeHistogramZeroThreshold are accumulated into a “zero” + // bucket. For best results, this should be close to a bucket + // boundary. This is usually the case if picking a power of two. If + // NativeHistogramZeroThreshold is left at zero, + // DefSparseBucketsZeroThreshold is used as the threshold. To configure + // a zero bucket with an actual threshold of zero (i.e. only + // observations of precisely zero will go into the zero bucket), set + // NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero + // constant (or any negative float value). + NativeHistogramZeroThreshold float64 + + // The remaining fields define a strategy to limit the number of + // populated sparse buckets. If NativeHistogramMaxBucketNumber is left + // at zero, the number of buckets is not limited. (Note that this might + // lead to unbounded memory consumption if the values observed by the + // Histogram are sufficiently wide-spread. In particular, this could be + // used as a DoS attack vector. Where the observed values depend on + // external inputs, it is highly recommended to set a + // NativeHistogramMaxBucketNumber.) Once the set + // NativeHistogramMaxBucketNumber is exceeded, the following strategy is + // enacted: First, if the last reset (or the creation) of the histogram + // is at least NativeHistogramMinResetDuration ago, then the whole + // histogram is reset to its initial state (including regular + // buckets). If less time has passed, or if + // NativeHistogramMinResetDuration is zero, no reset is + // performed. Instead, the zero threshold is increased sufficiently to + // reduce the number of buckets to or below + // NativeHistogramMaxBucketNumber, but not to more than + // NativeHistogramMaxZeroThreshold. Thus, if + // NativeHistogramMaxZeroThreshold is already at or below the current + // zero threshold, nothing happens at this step. After that, if the + // number of buckets still exceeds NativeHistogramMaxBucketNumber, the + // resolution of the histogram is reduced by doubling the width of the + // sparse buckets (up to a growth factor between one bucket to the next + // of 2^(2^4) = 65536, see above). + NativeHistogramMaxBucketNumber uint32 + NativeHistogramMinResetDuration time.Duration + NativeHistogramMaxZeroThreshold float64 } // NewHistogram creates a new Histogram based on the provided HistogramOpts. It @@ -218,16 +503,29 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr } } - if len(opts.Buckets) == 0 { - opts.Buckets = DefBuckets - } - h := &histogram{ - desc: desc, - upperBounds: opts.Buckets, - labelPairs: MakeLabelPairs(desc, labelValues), - counts: [2]*histogramCounts{{}, {}}, - now: time.Now, + desc: desc, + upperBounds: opts.Buckets, + labelPairs: MakeLabelPairs(desc, labelValues), + nativeHistogramMaxBuckets: opts.NativeHistogramMaxBucketNumber, + nativeHistogramMaxZeroThreshold: opts.NativeHistogramMaxZeroThreshold, + nativeHistogramMinResetDuration: opts.NativeHistogramMinResetDuration, + lastResetTime: time.Now(), + now: time.Now, + } + if len(h.upperBounds) == 0 && opts.NativeHistogramBucketFactor <= 1 { + h.upperBounds = DefBuckets + } + if opts.NativeHistogramBucketFactor <= 1 { + h.nativeHistogramSchema = math.MinInt32 // To mark that there are no sparse buckets. + } else { + switch { + case opts.NativeHistogramZeroThreshold > 0: + h.nativeHistogramZeroThreshold = opts.NativeHistogramZeroThreshold + case opts.NativeHistogramZeroThreshold == 0: + h.nativeHistogramZeroThreshold = DefNativeHistogramZeroThreshold + } // Leave h.nativeHistogramZeroThreshold at 0 otherwise. + h.nativeHistogramSchema = pickSchema(opts.NativeHistogramBucketFactor) } for i, upperBound := range h.upperBounds { if i < len(h.upperBounds)-1 { @@ -246,8 +544,16 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr } // Finally we know the final length of h.upperBounds and can make buckets // for both counts as well as exemplars: - h.counts[0].buckets = make([]uint64, len(h.upperBounds)) - h.counts[1].buckets = make([]uint64, len(h.upperBounds)) + h.counts[0] = &histogramCounts{ + buckets: make([]uint64, len(h.upperBounds)), + nativeHistogramZeroThresholdBits: math.Float64bits(h.nativeHistogramZeroThreshold), + nativeHistogramSchema: h.nativeHistogramSchema, + } + h.counts[1] = &histogramCounts{ + buckets: make([]uint64, len(h.upperBounds)), + nativeHistogramZeroThresholdBits: math.Float64bits(h.nativeHistogramZeroThreshold), + nativeHistogramSchema: h.nativeHistogramSchema, + } h.exemplars = make([]atomic.Value, len(h.upperBounds)+1) h.init(h) // Init self-collection. @@ -255,13 +561,98 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr } type histogramCounts struct { + // Order in this struct matters for the alignment required by atomic + // operations, see http://golang.org/pkg/sync/atomic/#pkg-note-BUG + // sumBits contains the bits of the float64 representing the sum of all - // observations. sumBits and count have to go first in the struct to - // guarantee alignment for atomic operations. - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + // observations. sumBits uint64 count uint64 + + // nativeHistogramZeroBucket counts all (positive and negative) + // observations in the zero bucket (with an absolute value less or equal + // the current threshold, see next field. + nativeHistogramZeroBucket uint64 + // nativeHistogramZeroThresholdBits is the bit pattern of the current + // threshold for the zero bucket. It's initially equal to + // nativeHistogramZeroThreshold but may change according to the bucket + // count limitation strategy. + nativeHistogramZeroThresholdBits uint64 + // nativeHistogramSchema may change over time according to the bucket + // count limitation strategy and therefore has to be saved here. + nativeHistogramSchema int32 + // Number of (positive and negative) sparse buckets. + nativeHistogramBucketsNumber uint32 + + // Regular buckets. buckets []uint64 + + // The sparse buckets for native histograms are implemented with a + // sync.Map for now. A dedicated data structure will likely be more + // efficient. There are separate maps for negative and positive + // observations. The map's value is an *int64, counting observations in + // that bucket. (Note that we don't use uint64 as an int64 won't + // overflow in practice, and working with signed numbers from the + // beginning simplifies the handling of deltas.) The map's key is the + // index of the bucket according to the used + // nativeHistogramSchema. Index 0 is for an upper bound of 1. + nativeHistogramBucketsPositive, nativeHistogramBucketsNegative sync.Map +} + +// observe manages the parts of observe that only affects +// histogramCounts. doSparse is true if sparse buckets should be done, +// too. +func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool) { + if bucket < len(hc.buckets) { + atomic.AddUint64(&hc.buckets[bucket], 1) + } + atomicAddFloat(&hc.sumBits, v) + if doSparse && !math.IsNaN(v) { + var ( + key int + schema = atomic.LoadInt32(&hc.nativeHistogramSchema) + zeroThreshold = math.Float64frombits(atomic.LoadUint64(&hc.nativeHistogramZeroThresholdBits)) + bucketCreated, isInf bool + ) + if math.IsInf(v, 0) { + // Pretend v is MaxFloat64 but later increment key by one. + if math.IsInf(v, +1) { + v = math.MaxFloat64 + } else { + v = -math.MaxFloat64 + } + isInf = true + } + frac, exp := math.Frexp(math.Abs(v)) + if schema > 0 { + bounds := nativeHistogramBounds[schema] + key = sort.SearchFloat64s(bounds, frac) + (exp-1)*len(bounds) + } else { + key = exp + if frac == 0.5 { + key-- + } + div := 1 << -schema + key = (key + div - 1) / div + } + if isInf { + key++ + } + switch { + case v > zeroThreshold: + bucketCreated = addToBucket(&hc.nativeHistogramBucketsPositive, key, 1) + case v < -zeroThreshold: + bucketCreated = addToBucket(&hc.nativeHistogramBucketsNegative, key, 1) + default: + atomic.AddUint64(&hc.nativeHistogramZeroBucket, 1) + } + if bucketCreated { + atomic.AddUint32(&hc.nativeHistogramBucketsNumber, 1) + } + } + // Increment count last as we take it as a signal that the observation + // is complete. + atomic.AddUint64(&hc.count, 1) } type histogram struct { @@ -276,7 +667,7 @@ type histogram struct { // perspective of the histogram) swap the hot–cold under the writeMtx // lock. A cooldown is awaited (while locked) by comparing the number of // observations with the initiation count. Once they match, then the - // last observation on the now cool one has completed. All cool fields must + // last observation on the now cool one has completed. All cold fields must // be merged into the new hot before releasing writeMtx. // // Fields with atomic access first! See alignment constraint: @@ -284,8 +675,10 @@ type histogram struct { countAndHotIdx uint64 selfCollector - desc *Desc - writeMtx sync.Mutex // Only used in the Write method. + desc *Desc + + // Only used in the Write method and for sparse bucket management. + mtx sync.Mutex // Two counts, one is "hot" for lock-free observations, the other is // "cold" for writing out a dto.Metric. It has to be an array of @@ -293,9 +686,15 @@ type histogram struct { // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. counts [2]*histogramCounts - upperBounds []float64 - labelPairs []*dto.LabelPair - exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar. + upperBounds []float64 + labelPairs []*dto.LabelPair + exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar. + nativeHistogramSchema int32 // The initial schema. Set to math.MinInt32 if no sparse buckets are used. + nativeHistogramZeroThreshold float64 // The initial zero threshold. + nativeHistogramMaxZeroThreshold float64 + nativeHistogramMaxBuckets uint32 + nativeHistogramMinResetDuration time.Duration + lastResetTime time.Time // Protected by mtx. now func() time.Time // To mock out time.Now() for testing. } @@ -319,8 +718,8 @@ func (h *histogram) Write(out *dto.Metric) error { // the hot path, i.e. Observe is called much more often than Write. The // complication of making Write lock-free isn't worth it, if possible at // all. - h.writeMtx.Lock() - defer h.writeMtx.Unlock() + h.mtx.Lock() + defer h.mtx.Unlock() // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0) // without touching the count bits. See the struct comments for a full @@ -333,16 +732,16 @@ func (h *histogram) Write(out *dto.Metric) error { hotCounts := h.counts[n>>63] coldCounts := h.counts[(^n)>>63] - // Await cooldown. - for count != atomic.LoadUint64(&coldCounts.count) { - runtime.Gosched() // Let observations get work done. - } + waitForCooldown(count, coldCounts) his := &dto.Histogram{ Bucket: make([]*dto.Bucket, len(h.upperBounds)), SampleCount: proto.Uint64(count), SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), } + out.Histogram = his + out.Label = h.labelPairs + var cumCount uint64 for i, upperBound := range h.upperBounds { cumCount += atomic.LoadUint64(&coldCounts.buckets[i]) @@ -363,25 +762,21 @@ func (h *histogram) Write(out *dto.Metric) error { } his.Bucket = append(his.Bucket, b) } - - out.Histogram = his - out.Label = h.labelPairs - - // Finally add all the cold counts to the new hot counts and reset the cold counts. - atomic.AddUint64(&hotCounts.count, count) - atomic.StoreUint64(&coldCounts.count, 0) - for { - oldBits := atomic.LoadUint64(&hotCounts.sumBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum()) - if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { - atomic.StoreUint64(&coldCounts.sumBits, 0) - break - } - } - for i := range h.upperBounds { - atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i])) - atomic.StoreUint64(&coldCounts.buckets[i], 0) + if h.nativeHistogramSchema > math.MinInt32 { + his.ZeroThreshold = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.nativeHistogramZeroThresholdBits))) + his.Schema = proto.Int32(atomic.LoadInt32(&coldCounts.nativeHistogramSchema)) + zeroBucket := atomic.LoadUint64(&coldCounts.nativeHistogramZeroBucket) + + defer func() { + coldCounts.nativeHistogramBucketsPositive.Range(addAndReset(&hotCounts.nativeHistogramBucketsPositive, &hotCounts.nativeHistogramBucketsNumber)) + coldCounts.nativeHistogramBucketsNegative.Range(addAndReset(&hotCounts.nativeHistogramBucketsNegative, &hotCounts.nativeHistogramBucketsNumber)) + }() + + his.ZeroCount = proto.Uint64(zeroBucket) + his.NegativeSpan, his.NegativeDelta = makeBuckets(&coldCounts.nativeHistogramBucketsNegative) + his.PositiveSpan, his.PositiveDelta = makeBuckets(&coldCounts.nativeHistogramBucketsPositive) } + addAndResetCounts(hotCounts, coldCounts) return nil } @@ -402,25 +797,216 @@ func (h *histogram) findBucket(v float64) int { // observe is the implementation for Observe without the findBucket part. func (h *histogram) observe(v float64, bucket int) { + // Do not add to sparse buckets for NaN observations. + doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v) // We increment h.countAndHotIdx so that the counter in the lower // 63 bits gets incremented. At the same time, we get the new value // back, which we can use to find the currently-hot counts. n := atomic.AddUint64(&h.countAndHotIdx, 1) hotCounts := h.counts[n>>63] + hotCounts.observe(v, bucket, doSparse) + if doSparse { + h.limitBuckets(hotCounts, v, bucket) + } +} - if bucket < len(h.upperBounds) { - atomic.AddUint64(&hotCounts.buckets[bucket], 1) +// limitSparsebuckets applies a strategy to limit the number of populated sparse +// buckets. It's generally best effort, and there are situations where the +// number can go higher (if even the lowest resolution isn't enough to reduce +// the number sufficiently, or if the provided counts aren't fully updated yet +// by a concurrently happening Write call). +func (h *histogram) limitBuckets(counts *histogramCounts, value float64, bucket int) { + if h.nativeHistogramMaxBuckets == 0 { + return // No limit configured. } - for { - oldBits := atomic.LoadUint64(&hotCounts.sumBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + v) - if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { - break + if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&counts.nativeHistogramBucketsNumber) { + return // Bucket limit not exceeded yet. + } + + h.mtx.Lock() + defer h.mtx.Unlock() + + // The hot counts might have been swapped just before we acquired the + // lock. Re-fetch the hot counts first... + n := atomic.LoadUint64(&h.countAndHotIdx) + hotIdx := n >> 63 + coldIdx := (^n) >> 63 + hotCounts := h.counts[hotIdx] + coldCounts := h.counts[coldIdx] + // ...and then check again if we really have to reduce the bucket count. + if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&hotCounts.nativeHistogramBucketsNumber) { + return // Bucket limit not exceeded after all. + } + // Try the various strategies in order. + if h.maybeReset(hotCounts, coldCounts, coldIdx, value, bucket) { + return + } + if h.maybeWidenZeroBucket(hotCounts, coldCounts) { + return + } + h.doubleBucketWidth(hotCounts, coldCounts) +} + +// maybeReset resests the whole histogram if at least h.nativeHistogramMinResetDuration +// has been passed. It returns true if the histogram has been reset. The caller +// must have locked h.mtx. +func (h *histogram) maybeReset(hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int) bool { + // We are using the possibly mocked h.now() rather than + // time.Since(h.lastResetTime) to enable testing. + if h.nativeHistogramMinResetDuration == 0 || h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration { + return false + } + // Completely reset coldCounts. + h.resetCounts(cold) + // Repeat the latest observation to not lose it completely. + cold.observe(value, bucket, true) + // Make coldCounts the new hot counts while ressetting countAndHotIdx. + n := atomic.SwapUint64(&h.countAndHotIdx, (coldIdx<<63)+1) + count := n & ((1 << 63) - 1) + waitForCooldown(count, hot) + // Finally, reset the formerly hot counts, too. + h.resetCounts(hot) + h.lastResetTime = h.now() + return true +} + +// maybeWidenZeroBucket widens the zero bucket until it includes the existing +// buckets closest to the zero bucket (which could be two, if an equidistant +// negative and a positive bucket exists, but usually it's only one bucket to be +// merged into the new wider zero bucket). h.nativeHistogramMaxZeroThreshold +// limits how far the zero bucket can be extended, and if that's not enough to +// include an existing bucket, the method returns false. The caller must have +// locked h.mtx. +func (h *histogram) maybeWidenZeroBucket(hot, cold *histogramCounts) bool { + currentZeroThreshold := math.Float64frombits(atomic.LoadUint64(&hot.nativeHistogramZeroThresholdBits)) + if currentZeroThreshold >= h.nativeHistogramMaxZeroThreshold { + return false + } + // Find the key of the bucket closest to zero. + smallestKey := findSmallestKey(&hot.nativeHistogramBucketsPositive) + smallestNegativeKey := findSmallestKey(&hot.nativeHistogramBucketsNegative) + if smallestNegativeKey < smallestKey { + smallestKey = smallestNegativeKey + } + if smallestKey == math.MaxInt32 { + return false + } + newZeroThreshold := getLe(smallestKey, atomic.LoadInt32(&hot.nativeHistogramSchema)) + if newZeroThreshold > h.nativeHistogramMaxZeroThreshold { + return false // New threshold would exceed the max threshold. + } + atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold)) + // Remove applicable buckets. + if _, loaded := cold.nativeHistogramBucketsNegative.LoadAndDelete(smallestKey); loaded { + atomicDecUint32(&cold.nativeHistogramBucketsNumber) + } + if _, loaded := cold.nativeHistogramBucketsPositive.LoadAndDelete(smallestKey); loaded { + atomicDecUint32(&cold.nativeHistogramBucketsNumber) + } + // Make cold counts the new hot counts. + n := atomic.AddUint64(&h.countAndHotIdx, 1<<63) + count := n & ((1 << 63) - 1) + // Swap the pointer names to represent the new roles and make + // the rest less confusing. + hot, cold = cold, hot + waitForCooldown(count, cold) + // Add all the now cold counts to the new hot counts... + addAndResetCounts(hot, cold) + // ...adjust the new zero threshold in the cold counts, too... + atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold)) + // ...and then merge the newly deleted buckets into the wider zero + // bucket. + mergeAndDeleteOrAddAndReset := func(hotBuckets, coldBuckets *sync.Map) func(k, v interface{}) bool { + return func(k, v interface{}) bool { + key := k.(int) + bucket := v.(*int64) + if key == smallestKey { + // Merge into hot zero bucket... + atomic.AddUint64(&hot.nativeHistogramZeroBucket, uint64(atomic.LoadInt64(bucket))) + // ...and delete from cold counts. + coldBuckets.Delete(key) + atomicDecUint32(&cold.nativeHistogramBucketsNumber) + } else { + // Add to corresponding hot bucket... + if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) { + atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1) + } + // ...and reset cold bucket. + atomic.StoreInt64(bucket, 0) + } + return true } } - // Increment count last as we take it as a signal that the observation - // is complete. - atomic.AddUint64(&hotCounts.count, 1) + + cold.nativeHistogramBucketsPositive.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsPositive, &cold.nativeHistogramBucketsPositive)) + cold.nativeHistogramBucketsNegative.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsNegative, &cold.nativeHistogramBucketsNegative)) + return true +} + +// doubleBucketWidth doubles the bucket width (by decrementing the schema +// number). Note that very sparse buckets could lead to a low reduction of the +// bucket count (or even no reduction at all). The method does nothing if the +// schema is already -4. +func (h *histogram) doubleBucketWidth(hot, cold *histogramCounts) { + coldSchema := atomic.LoadInt32(&cold.nativeHistogramSchema) + if coldSchema == -4 { + return // Already at lowest resolution. + } + coldSchema-- + atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema) + // Play it simple and just delete all cold buckets. + atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0) + deleteSyncMap(&cold.nativeHistogramBucketsNegative) + deleteSyncMap(&cold.nativeHistogramBucketsPositive) + // Make coldCounts the new hot counts. + n := atomic.AddUint64(&h.countAndHotIdx, 1<<63) + count := n & ((1 << 63) - 1) + // Swap the pointer names to represent the new roles and make + // the rest less confusing. + hot, cold = cold, hot + waitForCooldown(count, cold) + // Add all the now cold counts to the new hot counts... + addAndResetCounts(hot, cold) + // ...adjust the schema in the cold counts, too... + atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema) + // ...and then merge the cold buckets into the wider hot buckets. + merge := func(hotBuckets *sync.Map) func(k, v interface{}) bool { + return func(k, v interface{}) bool { + key := k.(int) + bucket := v.(*int64) + // Adjust key to match the bucket to merge into. + if key > 0 { + key++ + } + key /= 2 + // Add to corresponding hot bucket. + if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) { + atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1) + } + return true + } + } + + cold.nativeHistogramBucketsPositive.Range(merge(&hot.nativeHistogramBucketsPositive)) + cold.nativeHistogramBucketsNegative.Range(merge(&hot.nativeHistogramBucketsNegative)) + // Play it simple again and just delete all cold buckets. + atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0) + deleteSyncMap(&cold.nativeHistogramBucketsNegative) + deleteSyncMap(&cold.nativeHistogramBucketsPositive) +} + +func (h *histogram) resetCounts(counts *histogramCounts) { + atomic.StoreUint64(&counts.sumBits, 0) + atomic.StoreUint64(&counts.count, 0) + atomic.StoreUint64(&counts.nativeHistogramZeroBucket, 0) + atomic.StoreUint64(&counts.nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold)) + atomic.StoreInt32(&counts.nativeHistogramSchema, h.nativeHistogramSchema) + atomic.StoreUint32(&counts.nativeHistogramBucketsNumber, 0) + for i := range h.upperBounds { + atomic.StoreUint64(&counts.buckets[i], 0) + } + deleteSyncMap(&counts.nativeHistogramBucketsNegative) + deleteSyncMap(&counts.nativeHistogramBucketsPositive) } // updateExemplar replaces the exemplar for the provided bucket. With empty @@ -516,7 +1102,8 @@ func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) { // WithLabelValues works as GetMetricWithLabelValues, but panics where // GetMetricWithLabelValues would have returned an error. Not returning an // error allows shortcuts like -// myVec.WithLabelValues("404", "GET").Observe(42.21) +// +// myVec.WithLabelValues("404", "GET").Observe(42.21) func (v *HistogramVec) WithLabelValues(lvs ...string) Observer { h, err := v.GetMetricWithLabelValues(lvs...) if err != nil { @@ -527,7 +1114,8 @@ func (v *HistogramVec) WithLabelValues(lvs ...string) Observer { // With works as GetMetricWith but panics where GetMetricWithLabels would have // returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) +// +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) func (v *HistogramVec) With(labels Labels) Observer { h, err := v.GetMetricWith(labels) if err != nil { @@ -613,7 +1201,7 @@ func (h *constHistogram) Write(out *dto.Metric) error { // to send it to Prometheus in the Collect method. // // buckets is a map of upper bounds to cumulative counts, excluding the +Inf -// bucket. +// bucket. The +Inf bucket is implicit, and its value is equal to the provided count. // // NewConstHistogram returns an error if the length of labelValues is not // consistent with the variable labels in Desc or if Desc is invalid. @@ -668,3 +1256,229 @@ func (s buckSort) Swap(i, j int) { func (s buckSort) Less(i, j int) bool { return s[i].GetUpperBound() < s[j].GetUpperBound() } + +// pickSchema returns the largest number n between -4 and 8 such that +// 2^(2^-n) is less or equal the provided bucketFactor. +// +// Special cases: +// - bucketFactor <= 1: panics. +// - bucketFactor < 2^(2^-8) (but > 1): still returns 8. +func pickSchema(bucketFactor float64) int32 { + if bucketFactor <= 1 { + panic(fmt.Errorf("bucketFactor %f is <=1", bucketFactor)) + } + floor := math.Floor(math.Log2(math.Log2(bucketFactor))) + switch { + case floor <= -8: + return 8 + case floor >= 4: + return -4 + default: + return -int32(floor) + } +} + +func makeBuckets(buckets *sync.Map) ([]*dto.BucketSpan, []int64) { + var ii []int + buckets.Range(func(k, v interface{}) bool { + ii = append(ii, k.(int)) + return true + }) + sort.Ints(ii) + + if len(ii) == 0 { + return nil, nil + } + + var ( + spans []*dto.BucketSpan + deltas []int64 + prevCount int64 + nextI int + ) + + appendDelta := func(count int64) { + *spans[len(spans)-1].Length++ + deltas = append(deltas, count-prevCount) + prevCount = count + } + + for n, i := range ii { + v, _ := buckets.Load(i) + count := atomic.LoadInt64(v.(*int64)) + // Multiple spans with only small gaps in between are probably + // encoded more efficiently as one larger span with a few empty + // buckets. Needs some research to find the sweet spot. For now, + // we assume that gaps of one ore two buckets should not create + // a new span. + iDelta := int32(i - nextI) + if n == 0 || iDelta > 2 { + // We have to create a new span, either because we are + // at the very beginning, or because we have found a gap + // of more than two buckets. + spans = append(spans, &dto.BucketSpan{ + Offset: proto.Int32(iDelta), + Length: proto.Uint32(0), + }) + } else { + // We have found a small gap (or no gap at all). + // Insert empty buckets as needed. + for j := int32(0); j < iDelta; j++ { + appendDelta(0) + } + } + appendDelta(count) + nextI = i + 1 + } + return spans, deltas +} + +// addToBucket increments the sparse bucket at key by the provided amount. It +// returns true if a new sparse bucket had to be created for that. +func addToBucket(buckets *sync.Map, key int, increment int64) bool { + if existingBucket, ok := buckets.Load(key); ok { + // Fast path without allocation. + atomic.AddInt64(existingBucket.(*int64), increment) + return false + } + // Bucket doesn't exist yet. Slow path allocating new counter. + newBucket := increment // TODO(beorn7): Check if this is sufficient to not let increment escape. + if actualBucket, loaded := buckets.LoadOrStore(key, &newBucket); loaded { + // The bucket was created concurrently in another goroutine. + // Have to increment after all. + atomic.AddInt64(actualBucket.(*int64), increment) + return false + } + return true +} + +// addAndReset returns a function to be used with sync.Map.Range of spare +// buckets in coldCounts. It increments the buckets in the provided hotBuckets +// according to the buckets ranged through. It then resets all buckets ranged +// through to 0 (but leaves them in place so that they don't need to get +// recreated on the next scrape). +func addAndReset(hotBuckets *sync.Map, bucketNumber *uint32) func(k, v interface{}) bool { + return func(k, v interface{}) bool { + bucket := v.(*int64) + if addToBucket(hotBuckets, k.(int), atomic.LoadInt64(bucket)) { + atomic.AddUint32(bucketNumber, 1) + } + atomic.StoreInt64(bucket, 0) + return true + } +} + +func deleteSyncMap(m *sync.Map) { + m.Range(func(k, v interface{}) bool { + m.Delete(k) + return true + }) +} + +func findSmallestKey(m *sync.Map) int { + result := math.MaxInt32 + m.Range(func(k, v interface{}) bool { + key := k.(int) + if key < result { + result = key + } + return true + }) + return result +} + +func getLe(key int, schema int32) float64 { + // Here a bit of context about the behavior for the last bucket counting + // regular numbers (called simply "last bucket" below) and the bucket + // counting observations of ±Inf (called "inf bucket" below, with a key + // one higher than that of the "last bucket"): + // + // If we apply the usual formula to the last bucket, its upper bound + // would be calculated as +Inf. The reason is that the max possible + // regular float64 number (math.MaxFloat64) doesn't coincide with one of + // the calculated bucket boundaries. So the calculated boundary has to + // be larger than math.MaxFloat64, and the only float64 larger than + // math.MaxFloat64 is +Inf. However, we want to count actual + // observations of ±Inf in the inf bucket. Therefore, we have to treat + // the upper bound of the last bucket specially and set it to + // math.MaxFloat64. (The upper bound of the inf bucket, with its key + // being one higher than that of the last bucket, naturally comes out as + // +Inf by the usual formula. So that's fine.) + // + // math.MaxFloat64 has a frac of 0.9999999999999999 and an exp of + // 1024. If there were a float64 number following math.MaxFloat64, it + // would have a frac of 1.0 and an exp of 1024, or equivalently a frac + // of 0.5 and an exp of 1025. However, since frac must be smaller than + // 1, and exp must be smaller than 1025, either representation overflows + // a float64. (Which, in turn, is the reason that math.MaxFloat64 is the + // largest possible float64. Q.E.D.) However, the formula for + // calculating the upper bound from the idx and schema of the last + // bucket results in precisely that. It is either frac=1.0 & exp=1024 + // (for schema < 0) or frac=0.5 & exp=1025 (for schema >=0). (This is, + // by the way, a power of two where the exponent itself is a power of + // two, 2¹⁰ in fact, which coinicides with a bucket boundary in all + // schemas.) So these are the special cases we have to catch below. + if schema < 0 { + exp := key << -schema + if exp == 1024 { + // This is the last bucket before the overflow bucket + // (for ±Inf observations). Return math.MaxFloat64 as + // explained above. + return math.MaxFloat64 + } + return math.Ldexp(1, exp) + } + + fracIdx := key & ((1 << schema) - 1) + frac := nativeHistogramBounds[schema][fracIdx] + exp := (key >> schema) + 1 + if frac == 0.5 && exp == 1025 { + // This is the last bucket before the overflow bucket (for ±Inf + // observations). Return math.MaxFloat64 as explained above. + return math.MaxFloat64 + } + return math.Ldexp(frac, exp) +} + +// waitForCooldown returns after the count field in the provided histogramCounts +// has reached the provided count value. +func waitForCooldown(count uint64, counts *histogramCounts) { + for count != atomic.LoadUint64(&counts.count) { + runtime.Gosched() // Let observations get work done. + } +} + +// atomicAddFloat adds the provided float atomically to another float +// represented by the bit pattern the bits pointer is pointing to. +func atomicAddFloat(bits *uint64, v float64) { + for { + loadedBits := atomic.LoadUint64(bits) + newBits := math.Float64bits(math.Float64frombits(loadedBits) + v) + if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) { + break + } + } +} + +// atomicDecUint32 atomically decrements the uint32 p points to. See +// https://pkg.go.dev/sync/atomic#AddUint32 to understand how this is done. +func atomicDecUint32(p *uint32) { + atomic.AddUint32(p, ^uint32(0)) +} + +// addAndResetCounts adds certain fields (count, sum, conventional buckets, zero +// bucket) from the cold counts to the corresponding fields in the hot +// counts. Those fields are then reset to 0 in the cold counts. +func addAndResetCounts(hot, cold *histogramCounts) { + atomic.AddUint64(&hot.count, atomic.LoadUint64(&cold.count)) + atomic.StoreUint64(&cold.count, 0) + coldSum := math.Float64frombits(atomic.LoadUint64(&cold.sumBits)) + atomicAddFloat(&hot.sumBits, coldSum) + atomic.StoreUint64(&cold.sumBits, 0) + for i := range hot.buckets { + atomic.AddUint64(&hot.buckets[i], atomic.LoadUint64(&cold.buckets[i])) + atomic.StoreUint64(&cold.buckets[i], 0) + } + atomic.AddUint64(&hot.nativeHistogramZeroBucket, atomic.LoadUint64(&cold.nativeHistogramZeroBucket)) + atomic.StoreUint64(&cold.nativeHistogramZeroBucket, 0) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go new file mode 100644 index 000000000..1ed5abe74 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go @@ -0,0 +1,60 @@ +// Copyright (c) 2015 Björn Rabenstein +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. +// +// The code in this package is copy/paste to avoid a dependency. Hence this file +// carries the copyright of the original repo. +// https://github.com/beorn7/floats +package internal + +import ( + "math" +) + +// minNormalFloat64 is the smallest positive normal value of type float64. +var minNormalFloat64 = math.Float64frombits(0x0010000000000000) + +// AlmostEqualFloat64 returns true if a and b are equal within a relative error +// of epsilon. See http://floating-point-gui.de/errors/comparison/ for the +// details of the applied method. +func AlmostEqualFloat64(a, b, epsilon float64) bool { + if a == b { + return true + } + absA := math.Abs(a) + absB := math.Abs(b) + diff := math.Abs(a - b) + if a == 0 || b == 0 || absA+absB < minNormalFloat64 { + return diff < epsilon*minNormalFloat64 + } + return diff/math.Min(absA+absB, math.MaxFloat64) < epsilon +} + +// AlmostEqualFloat64s is the slice form of AlmostEqualFloat64. +func AlmostEqualFloat64s(a, b []float64, epsilon float64) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if !AlmostEqualFloat64(a[i], b[i], epsilon) { + return false + } + } + return true +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go index fd45cadc0..fd0750f2c 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go @@ -201,12 +201,15 @@ func (m *SequenceMatcher) isBJunk(s string) bool { // If IsJunk is not defined: // // Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where -// alo <= i <= i+k <= ahi -// blo <= j <= j+k <= bhi +// +// alo <= i <= i+k <= ahi +// blo <= j <= j+k <= bhi +// // and for all (i',j',k') meeting those conditions, -// k >= k' -// i <= i' -// and if i == i', j <= j' +// +// k >= k' +// i <= i' +// and if i == i', j <= j' // // In other words, of all maximal matching blocks, return one that // starts earliest in a, and of all those maximal matching blocks that diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go index 6eee198fe..c1b8fad36 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go @@ -25,7 +25,8 @@ import ( // Labels represents a collection of label name -> value mappings. This type is // commonly used with the With(Labels) and GetMetricWith(Labels) methods of // metric vector Collectors, e.g.: -// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +// +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) // // The other use-case is the specification of constant label pairs in Opts or to // create a Desc. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go index f0941f6f0..b5119c504 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -187,7 +187,7 @@ func (m *withExemplarsMetric) Write(pb *dto.Metric) error { } else { // The +Inf bucket should be explicitly added if there is an exemplar for it, similar to non-const histogram logic in https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go#L357-L365. b := &dto.Bucket{ - CumulativeCount: proto.Uint64(pb.Histogram.Bucket[len(pb.Histogram.GetBucket())-1].GetCumulativeCount()), + CumulativeCount: proto.Uint64(pb.Histogram.GetSampleCount()), UpperBound: proto.Float64(math.Inf(1)), Exemplar: e, } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go b/vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go index f8d50d1f9..8031e8704 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go @@ -14,114 +14,114 @@ // Package promauto provides alternative constructors for the fundamental // Prometheus metric types and their …Vec and …Func variants. The difference to // their counterparts in the prometheus package is that the promauto -// constructors return Collectors that are already registered with a -// registry. There are two sets of constructors. The constructors in the first -// set are top-level functions, while the constructors in the other set are -// methods of the Factory type. The top-level function return Collectors -// registered with the global registry (prometheus.DefaultRegisterer), while the -// methods return Collectors registered with the registry the Factory was -// constructed with. All constructors panic if the registration fails. +// constructors register the Collectors with a registry before returning them. +// There are two sets of constructors. The constructors in the first set are +// top-level functions, while the constructors in the other set are methods of +// the Factory type. The top-level function return Collectors registered with +// the global registry (prometheus.DefaultRegisterer), while the methods return +// Collectors registered with the registry the Factory was constructed with. All +// constructors panic if the registration fails. // // The following example is a complete program to create a histogram of normally // distributed random numbers from the math/rand package: // -// package main +// package main // -// import ( -// "math/rand" -// "net/http" +// import ( +// "math/rand" +// "net/http" // -// "github.com/prometheus/client_golang/prometheus" -// "github.com/prometheus/client_golang/prometheus/promauto" -// "github.com/prometheus/client_golang/prometheus/promhttp" -// ) +// "github.com/prometheus/client_golang/prometheus" +// "github.com/prometheus/client_golang/prometheus/promauto" +// "github.com/prometheus/client_golang/prometheus/promhttp" +// ) // -// var histogram = promauto.NewHistogram(prometheus.HistogramOpts{ -// Name: "random_numbers", -// Help: "A histogram of normally distributed random numbers.", -// Buckets: prometheus.LinearBuckets(-3, .1, 61), -// }) +// var histogram = promauto.NewHistogram(prometheus.HistogramOpts{ +// Name: "random_numbers", +// Help: "A histogram of normally distributed random numbers.", +// Buckets: prometheus.LinearBuckets(-3, .1, 61), +// }) // -// func Random() { -// for { -// histogram.Observe(rand.NormFloat64()) -// } -// } +// func Random() { +// for { +// histogram.Observe(rand.NormFloat64()) +// } +// } // -// func main() { -// go Random() -// http.Handle("/metrics", promhttp.Handler()) -// http.ListenAndServe(":1971", nil) -// } +// func main() { +// go Random() +// http.Handle("/metrics", promhttp.Handler()) +// http.ListenAndServe(":1971", nil) +// } // // Prometheus's version of a minimal hello-world program: // -// package main +// package main // -// import ( -// "fmt" -// "net/http" +// import ( +// "fmt" +// "net/http" // -// "github.com/prometheus/client_golang/prometheus" -// "github.com/prometheus/client_golang/prometheus/promauto" -// "github.com/prometheus/client_golang/prometheus/promhttp" -// ) +// "github.com/prometheus/client_golang/prometheus" +// "github.com/prometheus/client_golang/prometheus/promauto" +// "github.com/prometheus/client_golang/prometheus/promhttp" +// ) // -// func main() { -// http.Handle("/", promhttp.InstrumentHandlerCounter( -// promauto.NewCounterVec( -// prometheus.CounterOpts{ -// Name: "hello_requests_total", -// Help: "Total number of hello-world requests by HTTP code.", -// }, -// []string{"code"}, -// ), -// http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { -// fmt.Fprint(w, "Hello, world!") -// }), -// )) -// http.Handle("/metrics", promhttp.Handler()) -// http.ListenAndServe(":1971", nil) -// } +// func main() { +// http.Handle("/", promhttp.InstrumentHandlerCounter( +// promauto.NewCounterVec( +// prometheus.CounterOpts{ +// Name: "hello_requests_total", +// Help: "Total number of hello-world requests by HTTP code.", +// }, +// []string{"code"}, +// ), +// http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { +// fmt.Fprint(w, "Hello, world!") +// }), +// )) +// http.Handle("/metrics", promhttp.Handler()) +// http.ListenAndServe(":1971", nil) +// } // // A Factory is created with the With(prometheus.Registerer) function, which // enables two usage pattern. With(prometheus.Registerer) can be called once per // line: // -// var ( -// reg = prometheus.NewRegistry() -// randomNumbers = promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ -// Name: "random_numbers", -// Help: "A histogram of normally distributed random numbers.", -// Buckets: prometheus.LinearBuckets(-3, .1, 61), -// }) -// requestCount = promauto.With(reg).NewCounterVec( -// prometheus.CounterOpts{ -// Name: "http_requests_total", -// Help: "Total number of HTTP requests by status code and method.", -// }, -// []string{"code", "method"}, -// ) -// ) +// var ( +// reg = prometheus.NewRegistry() +// randomNumbers = promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ +// Name: "random_numbers", +// Help: "A histogram of normally distributed random numbers.", +// Buckets: prometheus.LinearBuckets(-3, .1, 61), +// }) +// requestCount = promauto.With(reg).NewCounterVec( +// prometheus.CounterOpts{ +// Name: "http_requests_total", +// Help: "Total number of HTTP requests by status code and method.", +// }, +// []string{"code", "method"}, +// ) +// ) // // Or it can be used to create a Factory once to be used multiple times: // -// var ( -// reg = prometheus.NewRegistry() -// factory = promauto.With(reg) -// randomNumbers = factory.NewHistogram(prometheus.HistogramOpts{ -// Name: "random_numbers", -// Help: "A histogram of normally distributed random numbers.", -// Buckets: prometheus.LinearBuckets(-3, .1, 61), -// }) -// requestCount = factory.NewCounterVec( -// prometheus.CounterOpts{ -// Name: "http_requests_total", -// Help: "Total number of HTTP requests by status code and method.", -// }, -// []string{"code", "method"}, -// ) -// ) +// var ( +// reg = prometheus.NewRegistry() +// factory = promauto.With(reg) +// randomNumbers = factory.NewHistogram(prometheus.HistogramOpts{ +// Name: "random_numbers", +// Help: "A histogram of normally distributed random numbers.", +// Buckets: prometheus.LinearBuckets(-3, .1, 61), +// }) +// requestCount = factory.NewCounterVec( +// prometheus.CounterOpts{ +// Name: "http_requests_total", +// Help: "Total number of HTTP requests by status code and method.", +// }, +// []string{"code", "method"}, +// ) +// ) // // This appears very handy. So why are these constructors locked away in a // separate package? diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go index 097aff2df..210867816 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go @@ -73,12 +73,11 @@ func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.Rou return func(r *http.Request) (*http.Response, error) { resp, err := next.RoundTrip(r) if err == nil { - exemplarAdd( + addWithExemplar( counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)), 1, rtOpts.getExemplarFn(r.Context()), ) - counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)).Inc() } return resp, err } @@ -117,7 +116,7 @@ func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundT start := time.Now() resp, err := next.RoundTrip(r) if err == nil { - exemplarObserve( + observeWithExemplar( obs.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)), time.Since(start).Seconds(), rtOpts.getExemplarFn(r.Context()), diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go index bfe500987..cca67a78a 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go @@ -28,7 +28,9 @@ import ( // magicString is used for the hacky label test in checkLabels. Remove once fixed. const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa" -func exemplarObserve(obs prometheus.Observer, val float64, labels map[string]string) { +// observeWithExemplar is a wrapper for [prometheus.ExemplarAdder.ExemplarObserver], +// which falls back to [prometheus.Observer.Observe] if no labels are provided. +func observeWithExemplar(obs prometheus.Observer, val float64, labels map[string]string) { if labels == nil { obs.Observe(val) return @@ -36,7 +38,9 @@ func exemplarObserve(obs prometheus.Observer, val float64, labels map[string]str obs.(prometheus.ExemplarObserver).ObserveWithExemplar(val, labels) } -func exemplarAdd(obs prometheus.Counter, val float64, labels map[string]string) { +// addWithExemplar is a wrapper for [prometheus.ExemplarAdder.AddWithExemplar], +// which falls back to [prometheus.Counter.Add] if no labels are provided. +func addWithExemplar(obs prometheus.Counter, val float64, labels map[string]string) { if labels == nil { obs.Add(val) return @@ -91,7 +95,7 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op d := newDelegator(w, nil) next.ServeHTTP(d, r) - exemplarObserve( + observeWithExemplar( obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()), @@ -103,7 +107,7 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op now := time.Now() next.ServeHTTP(w, r) - exemplarObserve( + observeWithExemplar( obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()), @@ -141,7 +145,7 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler, d := newDelegator(w, nil) next.ServeHTTP(d, r) - exemplarAdd( + addWithExemplar( counter.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), 1, hOpts.getExemplarFn(r.Context()), @@ -151,7 +155,7 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler, return func(w http.ResponseWriter, r *http.Request) { next.ServeHTTP(w, r) - exemplarAdd( + addWithExemplar( counter.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)), 1, hOpts.getExemplarFn(r.Context()), @@ -192,7 +196,7 @@ func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Ha return func(w http.ResponseWriter, r *http.Request) { now := time.Now() d := newDelegator(w, func(status int) { - exemplarObserve( + observeWithExemplar( obs.With(labels(code, method, r.Method, status, hOpts.extraMethods...)), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()), @@ -233,7 +237,7 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler, d := newDelegator(w, nil) next.ServeHTTP(d, r) size := computeApproximateRequestSize(r) - exemplarObserve( + observeWithExemplar( obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), float64(size), hOpts.getExemplarFn(r.Context()), @@ -244,7 +248,7 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler, return func(w http.ResponseWriter, r *http.Request) { next.ServeHTTP(w, r) size := computeApproximateRequestSize(r) - exemplarObserve( + observeWithExemplar( obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)), float64(size), hOpts.getExemplarFn(r.Context()), @@ -282,7 +286,7 @@ func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { d := newDelegator(w, nil) next.ServeHTTP(d, r) - exemplarObserve( + observeWithExemplar( obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), float64(d.Written()), hOpts.getExemplarFn(r.Context()), diff --git a/vendor/github.com/prometheus/client_golang/prometheus/push/push.go b/vendor/github.com/prometheus/client_golang/prometheus/push/push.go index 06dee376e..29f6cd309 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/push/push.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/push/push.go @@ -15,17 +15,17 @@ // builder approach. Create a Pusher with New and then add the various options // by using its methods, finally calling Add or Push, like this: // -// // Easy case: -// push.New("http://example.org/metrics", "my_job").Gatherer(myRegistry).Push() +// // Easy case: +// push.New("http://example.org/metrics", "my_job").Gatherer(myRegistry).Push() // -// // Complex case: -// push.New("http://example.org/metrics", "my_job"). -// Collector(myCollector1). -// Collector(myCollector2). -// Grouping("zone", "xy"). -// Client(&myHTTPClient). -// BasicAuth("top", "secret"). -// Add() +// // Complex case: +// push.New("http://example.org/metrics", "my_job"). +// Collector(myCollector1). +// Collector(myCollector2). +// Grouping("zone", "xy"). +// Client(&myHTTPClient). +// BasicAuth("top", "secret"). +// Add() // // See the examples section for more detailed examples. // diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index 325f665ff..09e34d307 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -252,9 +252,12 @@ func (errs MultiError) MaybeUnwrap() error { } // Registry registers Prometheus collectors, collects their metrics, and gathers -// them into MetricFamilies for exposition. It implements both Registerer and -// Gatherer. The zero value is not usable. Create instances with NewRegistry or -// NewPedanticRegistry. +// them into MetricFamilies for exposition. It implements Registerer, Gatherer, +// and Collector. The zero value is not usable. Create instances with +// NewRegistry or NewPedanticRegistry. +// +// Registry implements Collector to allow it to be used for creating groups of +// metrics. See the Grouping example for how this can be done. type Registry struct { mtx sync.RWMutex collectorsByID map[uint64]Collector // ID is a hash of the descIDs. @@ -556,6 +559,31 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) { return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() } +// Describe implements Collector. +func (r *Registry) Describe(ch chan<- *Desc) { + r.mtx.RLock() + defer r.mtx.RUnlock() + + // Only report the checked Collectors; unchecked collectors don't report any + // Desc. + for _, c := range r.collectorsByID { + c.Describe(ch) + } +} + +// Collect implements Collector. +func (r *Registry) Collect(ch chan<- Metric) { + r.mtx.RLock() + defer r.mtx.RUnlock() + + for _, c := range r.collectorsByID { + c.Collect(ch) + } + for _, c := range r.uncheckedCollectors { + c.Collect(ch) + } +} + // WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the // Prometheus text format, and writes it to a temporary file. Upon success, the // temporary file is renamed to the provided filename. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index c5fa8ed7c..7bc448a89 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -603,7 +603,8 @@ func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) { // WithLabelValues works as GetMetricWithLabelValues, but panics where // GetMetricWithLabelValues would have returned an error. Not returning an // error allows shortcuts like -// myVec.WithLabelValues("404", "GET").Observe(42.21) +// +// myVec.WithLabelValues("404", "GET").Observe(42.21) func (v *SummaryVec) WithLabelValues(lvs ...string) Observer { s, err := v.GetMetricWithLabelValues(lvs...) if err != nil { @@ -614,7 +615,8 @@ func (v *SummaryVec) WithLabelValues(lvs ...string) Observer { // With works as GetMetricWith, but panics where GetMetricWithLabels would have // returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) +// +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) func (v *SummaryVec) With(labels Labels) Observer { s, err := v.GetMetricWith(labels) if err != nil { @@ -701,7 +703,8 @@ func (s *constSummary) Write(out *dto.Metric) error { // // quantiles maps ranks to quantile values. For example, a median latency of // 0.23s and a 99th percentile latency of 0.56s would be expressed as: -// map[float64]float64{0.5: 0.23, 0.99: 0.56} +// +// map[float64]float64{0.5: 0.23, 0.99: 0.56} // // NewConstSummary returns an error if the length of labelValues is not // consistent with the variable labels in Desc or if Desc is invalid. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go index 8d5f10523..f28a76f3a 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/timer.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/timer.go @@ -25,11 +25,12 @@ type Timer struct { // NewTimer creates a new Timer. The provided Observer is used to observe a // duration in seconds. Timer is usually used to time a function call in the // following way: -// func TimeMe() { -// timer := NewTimer(myHistogram) -// defer timer.ObserveDuration() -// // Do actual work. -// } +// +// func TimeMe() { +// timer := NewTimer(myHistogram) +// defer timer.ObserveDuration() +// // Do actual work. +// } func NewTimer(o Observer) *Timer { return &Timer{ begin: time.Now(), diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go index 2f4930d9d..35904ea19 100644 --- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go +++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -1,5 +1,5 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// source: metrics.proto +// source: io/prometheus/client/metrics.proto package io_prometheus_client @@ -24,11 +24,18 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type MetricType int32 const ( - MetricType_COUNTER MetricType = 0 - MetricType_GAUGE MetricType = 1 - MetricType_SUMMARY MetricType = 2 - MetricType_UNTYPED MetricType = 3 + // COUNTER must use the Metric field "counter". + MetricType_COUNTER MetricType = 0 + // GAUGE must use the Metric field "gauge". + MetricType_GAUGE MetricType = 1 + // SUMMARY must use the Metric field "summary". + MetricType_SUMMARY MetricType = 2 + // UNTYPED must use the Metric field "untyped". + MetricType_UNTYPED MetricType = 3 + // HISTOGRAM must use the Metric field "histogram". MetricType_HISTOGRAM MetricType = 4 + // GAUGE_HISTOGRAM must use the Metric field "histogram". + MetricType_GAUGE_HISTOGRAM MetricType = 5 ) var MetricType_name = map[int32]string{ @@ -37,14 +44,16 @@ var MetricType_name = map[int32]string{ 2: "SUMMARY", 3: "UNTYPED", 4: "HISTOGRAM", + 5: "GAUGE_HISTOGRAM", } var MetricType_value = map[string]int32{ - "COUNTER": 0, - "GAUGE": 1, - "SUMMARY": 2, - "UNTYPED": 3, - "HISTOGRAM": 4, + "COUNTER": 0, + "GAUGE": 1, + "SUMMARY": 2, + "UNTYPED": 3, + "HISTOGRAM": 4, + "GAUGE_HISTOGRAM": 5, } func (x MetricType) Enum() *MetricType { @@ -67,7 +76,7 @@ func (x *MetricType) UnmarshalJSON(data []byte) error { } func (MetricType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{0} + return fileDescriptor_d1e5ddb18987a258, []int{0} } type LabelPair struct { @@ -82,7 +91,7 @@ func (m *LabelPair) Reset() { *m = LabelPair{} } func (m *LabelPair) String() string { return proto.CompactTextString(m) } func (*LabelPair) ProtoMessage() {} func (*LabelPair) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{0} + return fileDescriptor_d1e5ddb18987a258, []int{0} } func (m *LabelPair) XXX_Unmarshal(b []byte) error { @@ -128,7 +137,7 @@ func (m *Gauge) Reset() { *m = Gauge{} } func (m *Gauge) String() string { return proto.CompactTextString(m) } func (*Gauge) ProtoMessage() {} func (*Gauge) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{1} + return fileDescriptor_d1e5ddb18987a258, []int{1} } func (m *Gauge) XXX_Unmarshal(b []byte) error { @@ -168,7 +177,7 @@ func (m *Counter) Reset() { *m = Counter{} } func (m *Counter) String() string { return proto.CompactTextString(m) } func (*Counter) ProtoMessage() {} func (*Counter) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{2} + return fileDescriptor_d1e5ddb18987a258, []int{2} } func (m *Counter) XXX_Unmarshal(b []byte) error { @@ -215,7 +224,7 @@ func (m *Quantile) Reset() { *m = Quantile{} } func (m *Quantile) String() string { return proto.CompactTextString(m) } func (*Quantile) ProtoMessage() {} func (*Quantile) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{3} + return fileDescriptor_d1e5ddb18987a258, []int{3} } func (m *Quantile) XXX_Unmarshal(b []byte) error { @@ -263,7 +272,7 @@ func (m *Summary) Reset() { *m = Summary{} } func (m *Summary) String() string { return proto.CompactTextString(m) } func (*Summary) ProtoMessage() {} func (*Summary) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{4} + return fileDescriptor_d1e5ddb18987a258, []int{4} } func (m *Summary) XXX_Unmarshal(b []byte) error { @@ -316,7 +325,7 @@ func (m *Untyped) Reset() { *m = Untyped{} } func (m *Untyped) String() string { return proto.CompactTextString(m) } func (*Untyped) ProtoMessage() {} func (*Untyped) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{5} + return fileDescriptor_d1e5ddb18987a258, []int{5} } func (m *Untyped) XXX_Unmarshal(b []byte) error { @@ -345,9 +354,34 @@ func (m *Untyped) GetValue() float64 { } type Histogram struct { - SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` - SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` - Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` + SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` + // Buckets for the conventional histogram. + Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` + // schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8. + // They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and + // then each power of two is divided into 2^n logarithmic buckets. + // Or in other words, each bucket boundary is the previous boundary times 2^(2^-n). + // In the future, more bucket schemas may be added using numbers < -4 or > 8. + Schema *int32 `protobuf:"zigzag32,5,opt,name=schema" json:"schema,omitempty"` + ZeroThreshold *float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold" json:"zero_threshold,omitempty"` + ZeroCount *uint64 `protobuf:"varint,7,opt,name=zero_count,json=zeroCount" json:"zero_count,omitempty"` + ZeroCountFloat *float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat" json:"zero_count_float,omitempty"` + // Negative buckets for the native histogram. + NegativeSpan []*BucketSpan `protobuf:"bytes,9,rep,name=negative_span,json=negativeSpan" json:"negative_span,omitempty"` + // Use either "negative_delta" or "negative_count", the former for + // regular histograms with integer counts, the latter for float + // histograms. + NegativeDelta []int64 `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"` + NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"` + // Positive buckets for the native histogram. + PositiveSpan []*BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan" json:"positive_span,omitempty"` + // Use either "positive_delta" or "positive_count", the former for + // regular histograms with integer counts, the latter for float + // histograms. + PositiveDelta []int64 `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"` + PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -357,7 +391,7 @@ func (m *Histogram) Reset() { *m = Histogram{} } func (m *Histogram) String() string { return proto.CompactTextString(m) } func (*Histogram) ProtoMessage() {} func (*Histogram) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{6} + return fileDescriptor_d1e5ddb18987a258, []int{6} } func (m *Histogram) XXX_Unmarshal(b []byte) error { @@ -385,6 +419,13 @@ func (m *Histogram) GetSampleCount() uint64 { return 0 } +func (m *Histogram) GetSampleCountFloat() float64 { + if m != nil && m.SampleCountFloat != nil { + return *m.SampleCountFloat + } + return 0 +} + func (m *Histogram) GetSampleSum() float64 { if m != nil && m.SampleSum != nil { return *m.SampleSum @@ -399,8 +440,81 @@ func (m *Histogram) GetBucket() []*Bucket { return nil } +func (m *Histogram) GetSchema() int32 { + if m != nil && m.Schema != nil { + return *m.Schema + } + return 0 +} + +func (m *Histogram) GetZeroThreshold() float64 { + if m != nil && m.ZeroThreshold != nil { + return *m.ZeroThreshold + } + return 0 +} + +func (m *Histogram) GetZeroCount() uint64 { + if m != nil && m.ZeroCount != nil { + return *m.ZeroCount + } + return 0 +} + +func (m *Histogram) GetZeroCountFloat() float64 { + if m != nil && m.ZeroCountFloat != nil { + return *m.ZeroCountFloat + } + return 0 +} + +func (m *Histogram) GetNegativeSpan() []*BucketSpan { + if m != nil { + return m.NegativeSpan + } + return nil +} + +func (m *Histogram) GetNegativeDelta() []int64 { + if m != nil { + return m.NegativeDelta + } + return nil +} + +func (m *Histogram) GetNegativeCount() []float64 { + if m != nil { + return m.NegativeCount + } + return nil +} + +func (m *Histogram) GetPositiveSpan() []*BucketSpan { + if m != nil { + return m.PositiveSpan + } + return nil +} + +func (m *Histogram) GetPositiveDelta() []int64 { + if m != nil { + return m.PositiveDelta + } + return nil +} + +func (m *Histogram) GetPositiveCount() []float64 { + if m != nil { + return m.PositiveCount + } + return nil +} + +// A Bucket of a conventional histogram, each of which is treated as +// an individual counter-like time series by Prometheus. type Bucket struct { CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` + CumulativeCountFloat *float64 `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat" json:"cumulative_count_float,omitempty"` UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -412,7 +526,7 @@ func (m *Bucket) Reset() { *m = Bucket{} } func (m *Bucket) String() string { return proto.CompactTextString(m) } func (*Bucket) ProtoMessage() {} func (*Bucket) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{7} + return fileDescriptor_d1e5ddb18987a258, []int{7} } func (m *Bucket) XXX_Unmarshal(b []byte) error { @@ -440,6 +554,13 @@ func (m *Bucket) GetCumulativeCount() uint64 { return 0 } +func (m *Bucket) GetCumulativeCountFloat() float64 { + if m != nil && m.CumulativeCountFloat != nil { + return *m.CumulativeCountFloat + } + return 0 +} + func (m *Bucket) GetUpperBound() float64 { if m != nil && m.UpperBound != nil { return *m.UpperBound @@ -454,6 +575,59 @@ func (m *Bucket) GetExemplar() *Exemplar { return nil } +// A BucketSpan defines a number of consecutive buckets in a native +// histogram with their offset. Logically, it would be more +// straightforward to include the bucket counts in the Span. However, +// the protobuf representation is more compact in the way the data is +// structured here (with all the buckets in a single array separate +// from the Spans). +type BucketSpan struct { + Offset *int32 `protobuf:"zigzag32,1,opt,name=offset" json:"offset,omitempty"` + Length *uint32 `protobuf:"varint,2,opt,name=length" json:"length,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BucketSpan) Reset() { *m = BucketSpan{} } +func (m *BucketSpan) String() string { return proto.CompactTextString(m) } +func (*BucketSpan) ProtoMessage() {} +func (*BucketSpan) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{8} +} + +func (m *BucketSpan) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BucketSpan.Unmarshal(m, b) +} +func (m *BucketSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BucketSpan.Marshal(b, m, deterministic) +} +func (m *BucketSpan) XXX_Merge(src proto.Message) { + xxx_messageInfo_BucketSpan.Merge(m, src) +} +func (m *BucketSpan) XXX_Size() int { + return xxx_messageInfo_BucketSpan.Size(m) +} +func (m *BucketSpan) XXX_DiscardUnknown() { + xxx_messageInfo_BucketSpan.DiscardUnknown(m) +} + +var xxx_messageInfo_BucketSpan proto.InternalMessageInfo + +func (m *BucketSpan) GetOffset() int32 { + if m != nil && m.Offset != nil { + return *m.Offset + } + return 0 +} + +func (m *BucketSpan) GetLength() uint32 { + if m != nil && m.Length != nil { + return *m.Length + } + return 0 +} + type Exemplar struct { Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` @@ -467,7 +641,7 @@ func (m *Exemplar) Reset() { *m = Exemplar{} } func (m *Exemplar) String() string { return proto.CompactTextString(m) } func (*Exemplar) ProtoMessage() {} func (*Exemplar) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{8} + return fileDescriptor_d1e5ddb18987a258, []int{9} } func (m *Exemplar) XXX_Unmarshal(b []byte) error { @@ -526,7 +700,7 @@ func (m *Metric) Reset() { *m = Metric{} } func (m *Metric) String() string { return proto.CompactTextString(m) } func (*Metric) ProtoMessage() {} func (*Metric) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{9} + return fileDescriptor_d1e5ddb18987a258, []int{10} } func (m *Metric) XXX_Unmarshal(b []byte) error { @@ -610,7 +784,7 @@ func (m *MetricFamily) Reset() { *m = MetricFamily{} } func (m *MetricFamily) String() string { return proto.CompactTextString(m) } func (*MetricFamily) ProtoMessage() {} func (*MetricFamily) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{10} + return fileDescriptor_d1e5ddb18987a258, []int{11} } func (m *MetricFamily) XXX_Unmarshal(b []byte) error { @@ -669,55 +843,72 @@ func init() { proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped") proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram") proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket") + proto.RegisterType((*BucketSpan)(nil), "io.prometheus.client.BucketSpan") proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar") proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric") proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily") } -func init() { proto.RegisterFile("metrics.proto", fileDescriptor_6039342a2ba47b72) } - -var fileDescriptor_6039342a2ba47b72 = []byte{ - // 665 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xcd, 0x6e, 0xd3, 0x4c, - 0x14, 0xfd, 0xdc, 0x38, 0x3f, 0xbe, 0x69, 0x3f, 0xa2, 0x51, 0x17, 0x56, 0xa1, 0x24, 0x78, 0x55, - 0x58, 0x38, 0xa2, 0x6a, 0x05, 0x2a, 0xb0, 0x68, 0x4b, 0x48, 0x91, 0x48, 0x5b, 0x26, 0xc9, 0xa2, - 0xb0, 0x88, 0x1c, 0x77, 0x70, 0x2c, 0x3c, 0xb1, 0xb1, 0x67, 0x2a, 0xb2, 0x66, 0xc1, 0x16, 0x5e, - 0x81, 0x17, 0x05, 0xcd, 0x8f, 0x6d, 0x2a, 0xb9, 0x95, 0x40, 0xec, 0x66, 0xee, 0x3d, 0xe7, 0xfa, - 0xcc, 0xf8, 0x9c, 0x81, 0x0d, 0x4a, 0x58, 0x1a, 0xfa, 0x99, 0x9b, 0xa4, 0x31, 0x8b, 0xd1, 0x66, - 0x18, 0x8b, 0x15, 0x25, 0x6c, 0x41, 0x78, 0xe6, 0xfa, 0x51, 0x48, 0x96, 0x6c, 0xab, 0x1b, 0xc4, - 0x71, 0x10, 0x91, 0xbe, 0xc4, 0xcc, 0xf9, 0x87, 0x3e, 0x0b, 0x29, 0xc9, 0x98, 0x47, 0x13, 0x45, - 0x73, 0xf6, 0xc1, 0x7a, 0xe3, 0xcd, 0x49, 0x74, 0xee, 0x85, 0x29, 0x42, 0x60, 0x2e, 0x3d, 0x4a, - 0x6c, 0xa3, 0x67, 0xec, 0x58, 0x58, 0xae, 0xd1, 0x26, 0xd4, 0xaf, 0xbc, 0x88, 0x13, 0x7b, 0x4d, - 0x16, 0xd5, 0xc6, 0xd9, 0x86, 0xfa, 0xd0, 0xe3, 0xc1, 0x6f, 0x6d, 0xc1, 0x31, 0xf2, 0xf6, 0x7b, - 0x68, 0x1e, 0xc7, 0x7c, 0xc9, 0x48, 0x5a, 0x0d, 0x40, 0x07, 0xd0, 0x22, 0x9f, 0x09, 0x4d, 0x22, - 0x2f, 0x95, 0x83, 0xdb, 0xbb, 0xf7, 0xdd, 0xaa, 0x03, 0xb8, 0x03, 0x8d, 0xc2, 0x05, 0xde, 0x79, - 0x0e, 0xad, 0xb7, 0xdc, 0x5b, 0xb2, 0x30, 0x22, 0x68, 0x0b, 0x5a, 0x9f, 0xf4, 0x5a, 0x7f, 0xa0, - 0xd8, 0x5f, 0x57, 0x5e, 0x48, 0xfb, 0x6a, 0x40, 0x73, 0xcc, 0x29, 0xf5, 0xd2, 0x15, 0x7a, 0x00, - 0xeb, 0x99, 0x47, 0x93, 0x88, 0xcc, 0x7c, 0xa1, 0x56, 0x4e, 0x30, 0x71, 0x5b, 0xd5, 0xe4, 0x01, - 0xd0, 0x36, 0x80, 0x86, 0x64, 0x9c, 0xea, 0x49, 0x96, 0xaa, 0x8c, 0x39, 0x15, 0xe7, 0x28, 0xbe, - 0x5f, 0xeb, 0xd5, 0x6e, 0x3e, 0x47, 0xae, 0xb8, 0xd4, 0xe7, 0x74, 0xa1, 0x39, 0x5d, 0xb2, 0x55, - 0x42, 0x2e, 0x6f, 0xb8, 0xc5, 0x2f, 0x06, 0x58, 0x27, 0x61, 0xc6, 0xe2, 0x20, 0xf5, 0xe8, 0x3f, - 0x10, 0xbb, 0x07, 0x8d, 0x39, 0xf7, 0x3f, 0x12, 0xa6, 0xa5, 0xde, 0xab, 0x96, 0x7a, 0x24, 0x31, - 0x58, 0x63, 0x9d, 0x6f, 0x06, 0x34, 0x54, 0x09, 0x3d, 0x84, 0x8e, 0xcf, 0x29, 0x8f, 0x3c, 0x16, - 0x5e, 0x5d, 0x97, 0x71, 0xa7, 0xac, 0x2b, 0x29, 0x5d, 0x68, 0xf3, 0x24, 0x21, 0xe9, 0x6c, 0x1e, - 0xf3, 0xe5, 0xa5, 0xd6, 0x02, 0xb2, 0x74, 0x24, 0x2a, 0xd7, 0x1c, 0x50, 0xfb, 0x43, 0x07, 0x7c, - 0x37, 0xa0, 0x95, 0x97, 0xd1, 0x3e, 0xd4, 0x23, 0xe1, 0x60, 0xdb, 0x90, 0x87, 0xea, 0x56, 0x4f, - 0x29, 0x4c, 0x8e, 0x15, 0xba, 0xda, 0x1d, 0xe8, 0x29, 0x58, 0x45, 0x42, 0xb4, 0xac, 0x2d, 0x57, - 0x65, 0xc8, 0xcd, 0x33, 0xe4, 0x4e, 0x72, 0x04, 0x2e, 0xc1, 0xce, 0xcf, 0x35, 0x68, 0x8c, 0x64, - 0x22, 0xff, 0x56, 0xd1, 0x63, 0xa8, 0x07, 0x22, 0x53, 0x3a, 0x10, 0x77, 0xab, 0x69, 0x32, 0x76, - 0x58, 0x21, 0xd1, 0x13, 0x68, 0xfa, 0x2a, 0x67, 0x5a, 0xec, 0x76, 0x35, 0x49, 0x87, 0x11, 0xe7, - 0x68, 0x41, 0xcc, 0x54, 0x08, 0x6c, 0xf3, 0x36, 0xa2, 0x4e, 0x0a, 0xce, 0xd1, 0x82, 0xc8, 0x95, - 0x69, 0xed, 0xfa, 0x6d, 0x44, 0xed, 0x6c, 0x9c, 0xa3, 0xd1, 0x0b, 0xb0, 0x16, 0xb9, 0x97, 0xed, - 0xa6, 0xa4, 0xde, 0x70, 0x31, 0x85, 0xe5, 0x71, 0xc9, 0x10, 0xee, 0x2f, 0xee, 0x7a, 0x46, 0x33, - 0xbb, 0xd1, 0x33, 0x76, 0x6a, 0xb8, 0x5d, 0xd4, 0x46, 0x99, 0xf3, 0xc3, 0x80, 0x75, 0xf5, 0x07, - 0x5e, 0x79, 0x34, 0x8c, 0x56, 0x95, 0xcf, 0x19, 0x02, 0x73, 0x41, 0xa2, 0x44, 0xbf, 0x66, 0x72, - 0x8d, 0xf6, 0xc0, 0x14, 0x1a, 0xe5, 0x15, 0xfe, 0xbf, 0xdb, 0xab, 0x56, 0xa5, 0x26, 0x4f, 0x56, - 0x09, 0xc1, 0x12, 0x2d, 0xd2, 0xa4, 0x5e, 0x60, 0xdb, 0xbc, 0x2d, 0x4d, 0x8a, 0x87, 0x35, 0xf6, - 0xd1, 0x08, 0xa0, 0x9c, 0x84, 0xda, 0xd0, 0x3c, 0x3e, 0x9b, 0x9e, 0x4e, 0x06, 0xb8, 0xf3, 0x1f, - 0xb2, 0xa0, 0x3e, 0x3c, 0x9c, 0x0e, 0x07, 0x1d, 0x43, 0xd4, 0xc7, 0xd3, 0xd1, 0xe8, 0x10, 0x5f, - 0x74, 0xd6, 0xc4, 0x66, 0x7a, 0x3a, 0xb9, 0x38, 0x1f, 0xbc, 0xec, 0xd4, 0xd0, 0x06, 0x58, 0x27, - 0xaf, 0xc7, 0x93, 0xb3, 0x21, 0x3e, 0x1c, 0x75, 0xcc, 0x23, 0x0c, 0x95, 0xef, 0xfe, 0xbb, 0x83, - 0x20, 0x64, 0x0b, 0x3e, 0x77, 0xfd, 0x98, 0xf6, 0xcb, 0x6e, 0x5f, 0x75, 0x67, 0x34, 0xbe, 0x24, - 0x51, 0x3f, 0x88, 0x9f, 0x85, 0xf1, 0xac, 0xec, 0xce, 0x54, 0xf7, 0x57, 0x00, 0x00, 0x00, 0xff, - 0xff, 0xd0, 0x84, 0x91, 0x73, 0x59, 0x06, 0x00, 0x00, +func init() { + proto.RegisterFile("io/prometheus/client/metrics.proto", fileDescriptor_d1e5ddb18987a258) +} + +var fileDescriptor_d1e5ddb18987a258 = []byte{ + // 896 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xdd, 0x8e, 0xdb, 0x44, + 0x18, 0xc5, 0x9b, 0x5f, 0x7f, 0xd9, 0x6c, 0xd3, 0x61, 0x55, 0x59, 0x0b, 0xcb, 0x06, 0x4b, 0x48, + 0x0b, 0x42, 0x8e, 0x40, 0x5b, 0x81, 0x0a, 0x5c, 0xec, 0xb6, 0xe9, 0x16, 0x89, 0xb4, 0x65, 0x92, + 0x5c, 0x14, 0x2e, 0xac, 0x49, 0x32, 0xeb, 0x58, 0x78, 0x3c, 0xc6, 0x1e, 0x57, 0x2c, 0x2f, 0xc0, + 0x35, 0xaf, 0xc0, 0xc3, 0xf0, 0x22, 0x3c, 0x08, 0x68, 0xfe, 0xec, 0xdd, 0xe2, 0x94, 0xd2, 0x3b, + 0x7f, 0x67, 0xce, 0xf7, 0xcd, 0x39, 0xe3, 0xc9, 0x71, 0xc0, 0x8f, 0xf9, 0x24, 0xcb, 0x39, 0xa3, + 0x62, 0x4b, 0xcb, 0x62, 0xb2, 0x4e, 0x62, 0x9a, 0x8a, 0x09, 0xa3, 0x22, 0x8f, 0xd7, 0x45, 0x90, + 0xe5, 0x5c, 0x70, 0x74, 0x18, 0xf3, 0xa0, 0xe6, 0x04, 0x9a, 0x73, 0x74, 0x12, 0x71, 0x1e, 0x25, + 0x74, 0xa2, 0x38, 0xab, 0xf2, 0x6a, 0x22, 0x62, 0x46, 0x0b, 0x41, 0x58, 0xa6, 0xdb, 0xfc, 0xfb, + 0xe0, 0x7e, 0x47, 0x56, 0x34, 0x79, 0x4e, 0xe2, 0x1c, 0x21, 0x68, 0xa7, 0x84, 0x51, 0xcf, 0x19, + 0x3b, 0xa7, 0x2e, 0x56, 0xcf, 0xe8, 0x10, 0x3a, 0x2f, 0x49, 0x52, 0x52, 0x6f, 0x4f, 0x81, 0xba, + 0xf0, 0x8f, 0xa1, 0x73, 0x49, 0xca, 0xe8, 0xc6, 0xb2, 0xec, 0x71, 0xec, 0xf2, 0x8f, 0xd0, 0x7b, + 0xc8, 0xcb, 0x54, 0xd0, 0xbc, 0x99, 0x80, 0x1e, 0x40, 0x9f, 0xfe, 0x42, 0x59, 0x96, 0x90, 0x5c, + 0x0d, 0x1e, 0x7c, 0xfe, 0x41, 0xd0, 0x64, 0x20, 0x98, 0x1a, 0x16, 0xae, 0xf8, 0xfe, 0xd7, 0xd0, + 0xff, 0xbe, 0x24, 0xa9, 0x88, 0x13, 0x8a, 0x8e, 0xa0, 0xff, 0xb3, 0x79, 0x36, 0x1b, 0x54, 0xf5, + 0x6d, 0xe5, 0x95, 0xb4, 0xdf, 0x1c, 0xe8, 0xcd, 0x4b, 0xc6, 0x48, 0x7e, 0x8d, 0x3e, 0x84, 0xfd, + 0x82, 0xb0, 0x2c, 0xa1, 0xe1, 0x5a, 0xaa, 0x55, 0x13, 0xda, 0x78, 0xa0, 0x31, 0x65, 0x00, 0x1d, + 0x03, 0x18, 0x4a, 0x51, 0x32, 0x33, 0xc9, 0xd5, 0xc8, 0xbc, 0x64, 0xd2, 0x47, 0xb5, 0x7f, 0x6b, + 0xdc, 0xda, 0xed, 0xc3, 0x2a, 0xae, 0xf5, 0xf9, 0x27, 0xd0, 0x5b, 0xa6, 0xe2, 0x3a, 0xa3, 0x9b, + 0x1d, 0xa7, 0xf8, 0x57, 0x1b, 0xdc, 0x27, 0x71, 0x21, 0x78, 0x94, 0x13, 0xf6, 0x26, 0x62, 0x3f, + 0x05, 0x74, 0x93, 0x12, 0x5e, 0x25, 0x9c, 0x08, 0xaf, 0xad, 0x66, 0x8e, 0x6e, 0x10, 0x1f, 0x4b, + 0xfc, 0xbf, 0xac, 0x9d, 0x41, 0x77, 0x55, 0xae, 0x7f, 0xa2, 0xc2, 0x18, 0x7b, 0xbf, 0xd9, 0xd8, + 0x85, 0xe2, 0x60, 0xc3, 0x45, 0xf7, 0xa0, 0x5b, 0xac, 0xb7, 0x94, 0x11, 0xaf, 0x33, 0x76, 0x4e, + 0xef, 0x62, 0x53, 0xa1, 0x8f, 0xe0, 0xe0, 0x57, 0x9a, 0xf3, 0x50, 0x6c, 0x73, 0x5a, 0x6c, 0x79, + 0xb2, 0xf1, 0xba, 0x6a, 0xc3, 0xa1, 0x44, 0x17, 0x16, 0x94, 0x9a, 0x14, 0x4d, 0x5b, 0xec, 0x29, + 0x8b, 0xae, 0x44, 0xb4, 0xc1, 0x53, 0x18, 0xd5, 0xcb, 0xc6, 0x5e, 0x5f, 0xcd, 0x39, 0xa8, 0x48, + 0xda, 0xdc, 0x14, 0x86, 0x29, 0x8d, 0x88, 0x88, 0x5f, 0xd2, 0xb0, 0xc8, 0x48, 0xea, 0xb9, 0xca, + 0xc4, 0xf8, 0x75, 0x26, 0xe6, 0x19, 0x49, 0xf1, 0xbe, 0x6d, 0x93, 0x95, 0x94, 0x5d, 0x8d, 0xd9, + 0xd0, 0x44, 0x10, 0x0f, 0xc6, 0xad, 0x53, 0x84, 0xab, 0xe1, 0x8f, 0x24, 0x78, 0x8b, 0xa6, 0xa5, + 0x0f, 0xc6, 0x2d, 0xe9, 0xce, 0xa2, 0x5a, 0xfe, 0x14, 0x86, 0x19, 0x2f, 0xe2, 0x5a, 0xd4, 0xfe, + 0x9b, 0x8a, 0xb2, 0x6d, 0x56, 0x54, 0x35, 0x46, 0x8b, 0x1a, 0x6a, 0x51, 0x16, 0xad, 0x44, 0x55, + 0x34, 0x2d, 0xea, 0x40, 0x8b, 0xb2, 0xa8, 0x12, 0xe5, 0xff, 0xe9, 0x40, 0x57, 0x6f, 0x85, 0x3e, + 0x86, 0xd1, 0xba, 0x64, 0x65, 0x72, 0xd3, 0x88, 0xbe, 0x66, 0x77, 0x6a, 0x5c, 0x5b, 0x39, 0x83, + 0x7b, 0xaf, 0x52, 0x6f, 0x5d, 0xb7, 0xc3, 0x57, 0x1a, 0xf4, 0x5b, 0x39, 0x81, 0x41, 0x99, 0x65, + 0x34, 0x0f, 0x57, 0xbc, 0x4c, 0x37, 0xe6, 0xce, 0x81, 0x82, 0x2e, 0x24, 0x72, 0x2b, 0x17, 0x5a, + 0xff, 0x3b, 0x17, 0xa0, 0x3e, 0x32, 0x79, 0x11, 0xf9, 0xd5, 0x55, 0x41, 0xb5, 0x83, 0xbb, 0xd8, + 0x54, 0x12, 0x4f, 0x68, 0x1a, 0x89, 0xad, 0xda, 0x7d, 0x88, 0x4d, 0xe5, 0xff, 0xee, 0x40, 0xdf, + 0x0e, 0x45, 0xf7, 0xa1, 0x93, 0xc8, 0x54, 0xf4, 0x1c, 0xf5, 0x82, 0x4e, 0x9a, 0x35, 0x54, 0xc1, + 0x89, 0x35, 0xbb, 0x39, 0x71, 0xd0, 0x97, 0xe0, 0x56, 0xa9, 0x6b, 0x4c, 0x1d, 0x05, 0x3a, 0x97, + 0x03, 0x9b, 0xcb, 0xc1, 0xc2, 0x32, 0x70, 0x4d, 0xf6, 0xff, 0xde, 0x83, 0xee, 0x4c, 0xa5, 0xfc, + 0xdb, 0x2a, 0xfa, 0x0c, 0x3a, 0x91, 0xcc, 0x69, 0x13, 0xb2, 0xef, 0x35, 0xb7, 0xa9, 0x28, 0xc7, + 0x9a, 0x89, 0xbe, 0x80, 0xde, 0x5a, 0x67, 0xb7, 0x11, 0x7b, 0xdc, 0xdc, 0x64, 0x02, 0x1e, 0x5b, + 0xb6, 0x6c, 0x2c, 0x74, 0xb0, 0xaa, 0x3b, 0xb0, 0xb3, 0xd1, 0xa4, 0x2f, 0xb6, 0x6c, 0xd9, 0x58, + 0xea, 0x20, 0x54, 0xa1, 0xb1, 0xb3, 0xd1, 0xa4, 0x25, 0xb6, 0x6c, 0xf4, 0x0d, 0xb8, 0x5b, 0x9b, + 0x8f, 0x2a, 0x2c, 0x76, 0x1e, 0x4c, 0x15, 0xa3, 0xb8, 0xee, 0x90, 0x89, 0x5a, 0x9d, 0x75, 0xc8, + 0x0a, 0x95, 0x48, 0x2d, 0x3c, 0xa8, 0xb0, 0x59, 0xe1, 0xff, 0xe1, 0xc0, 0xbe, 0x7e, 0x03, 0x8f, + 0x09, 0x8b, 0x93, 0xeb, 0xc6, 0x4f, 0x24, 0x82, 0xf6, 0x96, 0x26, 0x99, 0xf9, 0x42, 0xaa, 0x67, + 0x74, 0x06, 0x6d, 0xa9, 0x51, 0x1d, 0xe1, 0xc1, 0xae, 0x5f, 0xb8, 0x9e, 0xbc, 0xb8, 0xce, 0x28, + 0x56, 0x6c, 0x99, 0xb9, 0xfa, 0xab, 0xee, 0xb5, 0x5f, 0x97, 0xb9, 0xba, 0x0f, 0x1b, 0xee, 0x27, + 0x2b, 0x80, 0x7a, 0x12, 0x1a, 0x40, 0xef, 0xe1, 0xb3, 0xe5, 0xd3, 0xc5, 0x14, 0x8f, 0xde, 0x41, + 0x2e, 0x74, 0x2e, 0xcf, 0x97, 0x97, 0xd3, 0x91, 0x23, 0xf1, 0xf9, 0x72, 0x36, 0x3b, 0xc7, 0x2f, + 0x46, 0x7b, 0xb2, 0x58, 0x3e, 0x5d, 0xbc, 0x78, 0x3e, 0x7d, 0x34, 0x6a, 0xa1, 0x21, 0xb8, 0x4f, + 0xbe, 0x9d, 0x2f, 0x9e, 0x5d, 0xe2, 0xf3, 0xd9, 0xa8, 0x8d, 0xde, 0x85, 0x3b, 0xaa, 0x27, 0xac, + 0xc1, 0xce, 0x05, 0x86, 0xc6, 0x3f, 0x18, 0x3f, 0x3c, 0x88, 0x62, 0xb1, 0x2d, 0x57, 0xc1, 0x9a, + 0xb3, 0x7f, 0xff, 0x45, 0x09, 0x19, 0xdf, 0xd0, 0x64, 0x12, 0xf1, 0xaf, 0x62, 0x1e, 0xd6, 0xab, + 0xa1, 0x5e, 0xfd, 0x27, 0x00, 0x00, 0xff, 0xff, 0x16, 0x77, 0x81, 0x98, 0xd7, 0x08, 0x00, 0x00, } diff --git a/vendor/github.com/prometheus/common/config/config.go b/vendor/github.com/prometheus/common/config/config.go index fffda4a7e..0b91f20d5 100644 --- a/vendor/github.com/prometheus/common/config/config.go +++ b/vendor/github.com/prometheus/common/config/config.go @@ -18,6 +18,7 @@ package config import ( "encoding/json" + "net/http" "path/filepath" ) @@ -34,7 +35,7 @@ func (s Secret) MarshalYAML() (interface{}, error) { return nil, nil } -//UnmarshalYAML implements the yaml.Unmarshaler interface for Secrets. +// UnmarshalYAML implements the yaml.Unmarshaler interface for Secrets. func (s *Secret) UnmarshalYAML(unmarshal func(interface{}) error) error { type plain Secret return unmarshal((*plain)(s)) @@ -48,6 +49,29 @@ func (s Secret) MarshalJSON() ([]byte, error) { return json.Marshal(secretToken) } +type Header map[string][]Secret + +func (h *Header) HTTPHeader() http.Header { + if h == nil || *h == nil { + return nil + } + + header := make(http.Header) + + for name, values := range *h { + var s []string + if values != nil { + s = make([]string, 0, len(values)) + for _, value := range values { + s = append(s, string(value)) + } + } + header[name] = s + } + + return header +} + // DirectorySetter is a config type that contains file paths that may // be relative to the file containing the config. type DirectorySetter interface { diff --git a/vendor/github.com/prometheus/common/config/http_config.go b/vendor/github.com/prometheus/common/config/http_config.go index b47347e4b..731632064 100644 --- a/vendor/github.com/prometheus/common/config/http_config.go +++ b/vendor/github.com/prometheus/common/config/http_config.go @@ -21,15 +21,17 @@ import ( "crypto/x509" "encoding/json" "fmt" - "io/ioutil" "net" "net/http" "net/url" + "os" + "path/filepath" "strings" "sync" "time" "github.com/mwitkow/go-conntrack" + "golang.org/x/net/http/httpproxy" "golang.org/x/net/http2" "golang.org/x/oauth2" "golang.org/x/oauth2/clientcredentials" @@ -79,12 +81,9 @@ func (tv *TLSVersion) UnmarshalYAML(unmarshal func(interface{}) error) error { return fmt.Errorf("unknown TLS version: %s", s) } -func (tv *TLSVersion) MarshalYAML() (interface{}, error) { - if tv != nil || *tv == 0 { - return []byte("null"), nil - } +func (tv TLSVersion) MarshalYAML() (interface{}, error) { for s, v := range TLSVersions { - if *tv == v { + if tv == v { return s, nil } } @@ -105,16 +104,26 @@ func (tv *TLSVersion) UnmarshalJSON(data []byte) error { } // MarshalJSON implements the json.Marshaler interface for TLSVersion. -func (tv *TLSVersion) MarshalJSON() ([]byte, error) { - if tv != nil || *tv == 0 { - return []byte("null"), nil +func (tv TLSVersion) MarshalJSON() ([]byte, error) { + for s, v := range TLSVersions { + if tv == v { + return json.Marshal(s) + } + } + return nil, fmt.Errorf("unknown TLS version: %d", tv) +} + +// String implements the fmt.Stringer interface for TLSVersion. +func (tv *TLSVersion) String() string { + if tv == nil || *tv == 0 { + return "" } for s, v := range TLSVersions { if *tv == v { - return []byte(s), nil + return s } } - return nil, fmt.Errorf("unknown TLS version: %d", tv) + return fmt.Sprintf("%d", tv) } // BasicAuth contains basic HTTP authentication credentials. @@ -219,11 +228,26 @@ type OAuth2 struct { Scopes []string `yaml:"scopes,omitempty" json:"scopes,omitempty"` TokenURL string `yaml:"token_url" json:"token_url"` EndpointParams map[string]string `yaml:"endpoint_params,omitempty" json:"endpoint_params,omitempty"` + TLSConfig TLSConfig `yaml:"tls_config,omitempty"` + ProxyConfig `yaml:",inline"` +} - // HTTP proxy server to use to connect to the targets. - ProxyURL URL `yaml:"proxy_url,omitempty" json:"proxy_url,omitempty"` - // TLSConfig is used to connect to the token URL. - TLSConfig TLSConfig `yaml:"tls_config,omitempty"` +// UnmarshalYAML implements the yaml.Unmarshaler interface +func (o *OAuth2) UnmarshalYAML(unmarshal func(interface{}) error) error { + type plain OAuth2 + if err := unmarshal((*plain)(o)); err != nil { + return err + } + return o.ProxyConfig.Validate() +} + +// UnmarshalJSON implements the json.Marshaler interface for URL. +func (o *OAuth2) UnmarshalJSON(data []byte) error { + type plain OAuth2 + if err := json.Unmarshal(data, (*plain)(o)); err != nil { + return err + } + return o.ProxyConfig.Validate() } // SetDirectory joins any relative file paths with dir. @@ -235,6 +259,30 @@ func (a *OAuth2) SetDirectory(dir string) { a.TLSConfig.SetDirectory(dir) } +// LoadHTTPConfig parses the YAML input s into a HTTPClientConfig. +func LoadHTTPConfig(s string) (*HTTPClientConfig, error) { + cfg := &HTTPClientConfig{} + err := yaml.UnmarshalStrict([]byte(s), cfg) + if err != nil { + return nil, err + } + return cfg, nil +} + +// LoadHTTPConfigFile parses the given YAML file into a HTTPClientConfig. +func LoadHTTPConfigFile(filename string) (*HTTPClientConfig, []byte, error) { + content, err := os.ReadFile(filename) + if err != nil { + return nil, nil, err + } + cfg, err := LoadHTTPConfig(string(content)) + if err != nil { + return nil, nil, err + } + cfg.SetDirectory(filepath.Dir(filepath.Dir(filename))) + return cfg, content, nil +} + // HTTPClientConfig configures an HTTP client. type HTTPClientConfig struct { // The HTTP basic authentication credentials for the targets. @@ -249,8 +297,6 @@ type HTTPClientConfig struct { // The bearer token file for the targets. Deprecated in favour of // Authorization.CredentialsFile. BearerTokenFile string `yaml:"bearer_token_file,omitempty" json:"bearer_token_file,omitempty"` - // HTTP proxy server to use to connect to the targets. - ProxyURL URL `yaml:"proxy_url,omitempty" json:"proxy_url,omitempty"` // TLSConfig to use to connect to the targets. TLSConfig TLSConfig `yaml:"tls_config,omitempty" json:"tls_config,omitempty"` // FollowRedirects specifies whether the client should follow HTTP 3xx redirects. @@ -261,6 +307,8 @@ type HTTPClientConfig struct { // The omitempty flag is not set, because it would be hidden from the // marshalled configuration when set to false. EnableHTTP2 bool `yaml:"enable_http2" json:"enable_http2"` + // Proxy configuration. + ProxyConfig `yaml:",inline"` } // SetDirectory joins any relative file paths with dir. @@ -276,7 +324,8 @@ func (c *HTTPClientConfig) SetDirectory(dir string) { } // Validate validates the HTTPClientConfig to check only one of BearerToken, -// BasicAuth and BearerTokenFile is configured. +// BasicAuth and BearerTokenFile is configured. It also validates that ProxyURL +// is set if ProxyConnectHeader is set. func (c *HTTPClientConfig) Validate() error { // Backwards compatibility with the bearer_token field. if len(c.BearerToken) > 0 && len(c.BearerTokenFile) > 0 { @@ -334,6 +383,9 @@ func (c *HTTPClientConfig) Validate() error { return fmt.Errorf("at most one of oauth2 client_secret & client_secret_file must be configured") } } + if err := c.ProxyConfig.Validate(); err != nil { + return err + } return nil } @@ -461,7 +513,8 @@ func NewRoundTripperFromConfig(cfg HTTPClientConfig, name string, optFuncs ...HT // The only timeout we care about is the configured scrape timeout. // It is applied on request. So we leave out any timings here. var rt http.RoundTripper = &http.Transport{ - Proxy: http.ProxyURL(cfg.ProxyURL.URL), + Proxy: cfg.ProxyConfig.Proxy(), + ProxyConnectHeader: cfg.ProxyConfig.GetProxyConnectHeader(), MaxIdleConns: 20000, MaxIdleConnsPerHost: 1000, // see https://github.com/golang/go/issues/13801 DisableKeepAlives: !opts.keepAlivesEnabled, @@ -527,7 +580,7 @@ func NewRoundTripperFromConfig(cfg HTTPClientConfig, name string, optFuncs ...HT return newRT(tlsConfig) } - return NewTLSRoundTripper(tlsConfig, cfg.TLSConfig.CAFile, newRT) + return NewTLSRoundTripper(tlsConfig, cfg.TLSConfig.CAFile, cfg.TLSConfig.CertFile, cfg.TLSConfig.KeyFile, newRT) } type authorizationCredentialsRoundTripper struct { @@ -571,7 +624,7 @@ func NewAuthorizationCredentialsFileRoundTripper(authType, authCredentialsFile s func (rt *authorizationCredentialsFileRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { if len(req.Header.Get("Authorization")) == 0 { - b, err := ioutil.ReadFile(rt.authCredentialsFile) + b, err := os.ReadFile(rt.authCredentialsFile) if err != nil { return nil, fmt.Errorf("unable to read authorization credentials file %s: %s", rt.authCredentialsFile, err) } @@ -609,7 +662,7 @@ func (rt *basicAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, e } req = cloneRequest(req) if rt.passwordFile != "" { - bs, err := ioutil.ReadFile(rt.passwordFile) + bs, err := os.ReadFile(rt.passwordFile) if err != nil { return nil, fmt.Errorf("unable to read basic auth password file %s: %s", rt.passwordFile, err) } @@ -651,7 +704,7 @@ func (rt *oauth2RoundTripper) RoundTrip(req *http.Request) (*http.Response, erro ) if rt.config.ClientSecretFile != "" { - data, err := ioutil.ReadFile(rt.config.ClientSecretFile) + data, err := os.ReadFile(rt.config.ClientSecretFile) if err != nil { return nil, fmt.Errorf("unable to read oauth2 client secret file %s: %s", rt.config.ClientSecretFile, err) } @@ -682,7 +735,8 @@ func (rt *oauth2RoundTripper) RoundTrip(req *http.Request) (*http.Response, erro tlsTransport := func(tlsConfig *tls.Config) (http.RoundTripper, error) { return &http.Transport{ TLSClientConfig: tlsConfig, - Proxy: http.ProxyURL(rt.config.ProxyURL.URL), + Proxy: rt.config.ProxyConfig.Proxy(), + ProxyConnectHeader: rt.config.ProxyConfig.GetProxyConnectHeader(), DisableKeepAlives: !rt.opts.keepAlivesEnabled, MaxIdleConns: 20, MaxIdleConnsPerHost: 1, // see https://github.com/golang/go/issues/13801 @@ -696,7 +750,7 @@ func (rt *oauth2RoundTripper) RoundTrip(req *http.Request) (*http.Response, erro if len(rt.config.TLSConfig.CAFile) == 0 { t, _ = tlsTransport(tlsConfig) } else { - t, err = NewTLSRoundTripper(tlsConfig, rt.config.TLSConfig.CAFile, tlsTransport) + t, err = NewTLSRoundTripper(tlsConfig, rt.config.TLSConfig.CAFile, rt.config.TLSConfig.CertFile, rt.config.TLSConfig.KeyFile, tlsTransport) if err != nil { return nil, err } @@ -766,6 +820,13 @@ func NewTLSConfig(cfg *TLSConfig) (*tls.Config, error) { tlsConfig := &tls.Config{ InsecureSkipVerify: cfg.InsecureSkipVerify, MinVersion: uint16(cfg.MinVersion), + MaxVersion: uint16(cfg.MaxVersion), + } + + if cfg.MaxVersion != 0 && cfg.MinVersion != 0 { + if cfg.MaxVersion < cfg.MinVersion { + return nil, fmt.Errorf("tls_config.max_version must be greater than or equal to tls_config.min_version if both are specified") + } } // If a CA cert is provided then let's read it in so we can validate the @@ -813,6 +874,8 @@ type TLSConfig struct { InsecureSkipVerify bool `yaml:"insecure_skip_verify" json:"insecure_skip_verify"` // Minimum TLS version. MinVersion TLSVersion `yaml:"min_version,omitempty" json:"min_version,omitempty"` + // Maximum TLS version. + MaxVersion TLSVersion `yaml:"max_version,omitempty" json:"max_version,omitempty"` } // SetDirectory joins any relative file paths with dir. @@ -825,18 +888,45 @@ func (c *TLSConfig) SetDirectory(dir string) { c.KeyFile = JoinDir(dir, c.KeyFile) } +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *TLSConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + type plain TLSConfig + return unmarshal((*plain)(c)) +} + +// readCertAndKey reads the cert and key files from the disk. +func readCertAndKey(certFile, keyFile string) ([]byte, []byte, error) { + certData, err := os.ReadFile(certFile) + if err != nil { + return nil, nil, err + } + + keyData, err := os.ReadFile(keyFile) + if err != nil { + return nil, nil, err + } + + return certData, keyData, nil +} + // getClientCertificate reads the pair of client cert and key from disk and returns a tls.Certificate. -func (c *TLSConfig) getClientCertificate(*tls.CertificateRequestInfo) (*tls.Certificate, error) { - cert, err := tls.LoadX509KeyPair(c.CertFile, c.KeyFile) +func (c *TLSConfig) getClientCertificate(_ *tls.CertificateRequestInfo) (*tls.Certificate, error) { + certData, keyData, err := readCertAndKey(c.CertFile, c.KeyFile) + if err != nil { + return nil, fmt.Errorf("unable to read specified client cert (%s) & key (%s): %s", c.CertFile, c.KeyFile, err) + } + + cert, err := tls.X509KeyPair(certData, keyData) if err != nil { return nil, fmt.Errorf("unable to use specified client cert (%s) & key (%s): %s", c.CertFile, c.KeyFile, err) } + return &cert, nil } // readCAFile reads the CA cert file from disk. func readCAFile(f string) ([]byte, error) { - data, err := ioutil.ReadFile(f) + data, err := os.ReadFile(f) if err != nil { return nil, fmt.Errorf("unable to load specified CA cert %s: %s", f, err) } @@ -856,23 +946,30 @@ func updateRootCA(cfg *tls.Config, b []byte) bool { // tlsRoundTripper is a RoundTripper that updates automatically its TLS // configuration whenever the content of the CA file changes. type tlsRoundTripper struct { - caFile string + caFile string + certFile string + keyFile string + // newRT returns a new RoundTripper. newRT func(*tls.Config) (http.RoundTripper, error) - mtx sync.RWMutex - rt http.RoundTripper - hashCAFile []byte - tlsConfig *tls.Config + mtx sync.RWMutex + rt http.RoundTripper + hashCAFile []byte + hashCertFile []byte + hashKeyFile []byte + tlsConfig *tls.Config } func NewTLSRoundTripper( cfg *tls.Config, - caFile string, + caFile, certFile, keyFile string, newRT func(*tls.Config) (http.RoundTripper, error), ) (http.RoundTripper, error) { t := &tlsRoundTripper{ caFile: caFile, + certFile: certFile, + keyFile: keyFile, newRT: newRT, tlsConfig: cfg, } @@ -882,7 +979,7 @@ func NewTLSRoundTripper( return nil, err } t.rt = rt - _, t.hashCAFile, err = t.getCAWithHash() + _, t.hashCAFile, t.hashCertFile, t.hashKeyFile, err = t.getTLSFilesWithHash() if err != nil { return nil, err } @@ -890,25 +987,36 @@ func NewTLSRoundTripper( return t, nil } -func (t *tlsRoundTripper) getCAWithHash() ([]byte, []byte, error) { - b, err := readCAFile(t.caFile) +func (t *tlsRoundTripper) getTLSFilesWithHash() ([]byte, []byte, []byte, []byte, error) { + b1, err := readCAFile(t.caFile) if err != nil { - return nil, nil, err + return nil, nil, nil, nil, err + } + h1 := sha256.Sum256(b1) + + var h2, h3 [32]byte + if t.certFile != "" { + b2, b3, err := readCertAndKey(t.certFile, t.keyFile) + if err != nil { + return nil, nil, nil, nil, err + } + h2, h3 = sha256.Sum256(b2), sha256.Sum256(b3) } - h := sha256.Sum256(b) - return b, h[:], nil + return b1, h1[:], h2[:], h3[:], nil } // RoundTrip implements the http.RoundTrip interface. func (t *tlsRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - b, h, err := t.getCAWithHash() + caData, caHash, certHash, keyHash, err := t.getTLSFilesWithHash() if err != nil { return nil, err } t.mtx.RLock() - equal := bytes.Equal(h[:], t.hashCAFile) + equal := bytes.Equal(caHash[:], t.hashCAFile) && + bytes.Equal(certHash[:], t.hashCertFile) && + bytes.Equal(keyHash[:], t.hashKeyFile) rt := t.rt t.mtx.RUnlock() if equal { @@ -917,8 +1025,10 @@ func (t *tlsRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { } // Create a new RoundTripper. + // The cert and key files are read separately by the client + // using GetClientCertificate. tlsConfig := t.tlsConfig.Clone() - if !updateRootCA(tlsConfig, b) { + if !updateRootCA(tlsConfig, caData) { return nil, fmt.Errorf("unable to use specified CA cert %s", t.caFile) } rt, err = t.newRT(tlsConfig) @@ -929,7 +1039,9 @@ func (t *tlsRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { t.mtx.Lock() t.rt = rt - t.hashCAFile = h[:] + t.hashCAFile = caHash[:] + t.hashCertFile = certHash[:] + t.hashKeyFile = keyHash[:] t.mtx.Unlock() return rt.RoundTrip(req) @@ -972,3 +1084,78 @@ func (c HTTPClientConfig) String() string { } return string(b) } + +type ProxyConfig struct { + // HTTP proxy server to use to connect to the targets. + ProxyURL URL `yaml:"proxy_url,omitempty" json:"proxy_url,omitempty"` + // NoProxy contains addresses that should not use a proxy. + NoProxy string `yaml:"no_proxy,omitempty" json:"no_proxy,omitempty"` + // ProxyFromEnvironment makes use of net/http ProxyFromEnvironment function + // to determine proxies. + ProxyFromEnvironment bool `yaml:"proxy_from_environment,omitempty" json:"proxy_from_environment,omitempty"` + // ProxyConnectHeader optionally specifies headers to send to + // proxies during CONNECT requests. Assume that at least _some_ of + // these headers are going to contain secrets and use Secret as the + // value type instead of string. + ProxyConnectHeader Header `yaml:"proxy_connect_header,omitempty" json:"proxy_connect_header,omitempty"` + + proxyFunc func(*http.Request) (*url.URL, error) +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *ProxyConfig) Validate() error { + if len(c.ProxyConnectHeader) > 0 && (!c.ProxyFromEnvironment && (c.ProxyURL.URL == nil || c.ProxyURL.String() == "")) { + return fmt.Errorf("if proxy_connect_header is configured, proxy_url or proxy_from_environment must also be configured") + } + if c.ProxyFromEnvironment && c.ProxyURL.URL != nil && c.ProxyURL.String() != "" { + return fmt.Errorf("if proxy_from_environment is configured, proxy_url must not be configured") + } + if c.ProxyFromEnvironment && c.NoProxy != "" { + return fmt.Errorf("if proxy_from_environment is configured, no_proxy must not be configured") + } + if c.ProxyURL.URL == nil && c.NoProxy != "" { + return fmt.Errorf("if no_proxy is configured, proxy_url must also be configured") + } + return nil +} + +// Proxy returns the Proxy URL for a request. +func (c *ProxyConfig) Proxy() (fn func(*http.Request) (*url.URL, error)) { + if c == nil { + return nil + } + defer func() { + fn = c.proxyFunc + }() + if c.proxyFunc != nil { + return + } + if c.ProxyFromEnvironment { + proxyFn := httpproxy.FromEnvironment().ProxyFunc() + c.proxyFunc = func(req *http.Request) (*url.URL, error) { + return proxyFn(req.URL) + } + return + } + if c.ProxyURL.URL != nil && c.ProxyURL.URL.String() != "" { + if c.NoProxy == "" { + c.proxyFunc = http.ProxyURL(c.ProxyURL.URL) + return + } + proxy := &httpproxy.Config{ + HTTPProxy: c.ProxyURL.String(), + HTTPSProxy: c.ProxyURL.String(), + NoProxy: c.NoProxy, + } + proxyFn := proxy.ProxyFunc() + c.proxyFunc = func(req *http.Request) (*url.URL, error) { + return proxyFn(req.URL) + } + } + return +} + +// ProxyConnectHeader() return the Proxy Connext Headers. +func (c *ProxyConfig) GetProxyConnectHeader() http.Header { + return c.ProxyConnectHeader.HTTPHeader() +} diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index 7657f841d..f4fc88455 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -115,32 +115,28 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error { // textDecoder implements the Decoder interface for the text protocol. type textDecoder struct { r io.Reader - p TextParser - fams []*dto.MetricFamily + fams map[string]*dto.MetricFamily + err error } // Decode implements the Decoder interface. func (d *textDecoder) Decode(v *dto.MetricFamily) error { - // TODO(fabxc): Wrap this as a line reader to make streaming safer. - if len(d.fams) == 0 { - // No cached metric families, read everything and parse metrics. - fams, err := d.p.TextToMetricFamilies(d.r) - if err != nil { - return err - } - if len(fams) == 0 { - return io.EOF - } - d.fams = make([]*dto.MetricFamily, 0, len(fams)) - for _, f := range fams { - d.fams = append(d.fams, f) + if d.err == nil { + // Read all metrics in one shot. + var p TextParser + d.fams, d.err = p.TextToMetricFamilies(d.r) + // If we don't get an error, store io.EOF for the end. + if d.err == nil { + d.err = io.EOF } } - - *v = *d.fams[0] - d.fams = d.fams[1:] - - return nil + // Pick off one MetricFamily per Decode until there's nothing left. + for key, fam := range d.fams { + *v = *fam + delete(d.fams, key) + return nil + } + return d.err } // SampleDecoder wraps a Decoder to extract samples from the metric families diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go index f819e4f8b..dfac962a4 100644 --- a/vendor/github.com/prometheus/common/expfmt/fuzz.go +++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go @@ -21,8 +21,8 @@ import "bytes" // Fuzz text metric parser with with github.com/dvyukov/go-fuzz: // -// go-fuzz-build github.com/prometheus/common/expfmt -// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz +// go-fuzz-build github.com/prometheus/common/expfmt +// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz // // Further input samples should go in the folder fuzz/corpus. func Fuzz(in []byte) int { diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index 9d94ae9ef..21cdddcf0 100644 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -46,20 +46,20 @@ import ( // missing features and peculiarities to avoid complications when switching from // Prometheus to OpenMetrics or vice versa: // -// - Counters are expected to have the `_total` suffix in their metric name. In -// the output, the suffix will be truncated from the `# TYPE` and `# HELP` -// line. A counter with a missing `_total` suffix is not an error. However, -// its type will be set to `unknown` in that case to avoid invalid OpenMetrics -// output. +// - Counters are expected to have the `_total` suffix in their metric name. In +// the output, the suffix will be truncated from the `# TYPE` and `# HELP` +// line. A counter with a missing `_total` suffix is not an error. However, +// its type will be set to `unknown` in that case to avoid invalid OpenMetrics +// output. // -// - No support for the following (optional) features: `# UNIT` line, `_created` -// line, info type, stateset type, gaugehistogram type. +// - No support for the following (optional) features: `# UNIT` line, `_created` +// line, info type, stateset type, gaugehistogram type. // -// - The size of exemplar labels is not checked (i.e. it's possible to create -// exemplars that are larger than allowed by the OpenMetrics specification). +// - The size of exemplar labels is not checked (i.e. it's possible to create +// exemplars that are larger than allowed by the OpenMetrics specification). // -// - The value of Counters is not checked. (OpenMetrics doesn't allow counters -// with a `NaN` value.) +// - The value of Counters is not checked. (OpenMetrics doesn't allow counters +// with a `NaN` value.) func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) { name := in.GetName() if name == "" { diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go index 5ba503b06..2946b8f1a 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -17,7 +17,6 @@ import ( "bufio" "fmt" "io" - "io/ioutil" "math" "strconv" "strings" @@ -44,7 +43,7 @@ const ( var ( bufPool = sync.Pool{ New: func() interface{} { - return bufio.NewWriter(ioutil.Discard) + return bufio.NewWriter(io.Discard) }, } numBufPool = sync.Pool{ diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index 84be0643e..ac2482782 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -142,9 +142,13 @@ func (p *TextParser) reset(in io.Reader) { func (p *TextParser) startOfLine() stateFn { p.lineCount++ if p.skipBlankTab(); p.err != nil { - // End of input reached. This is the only case where - // that is not an error but a signal that we are done. - p.err = nil + // This is the only place that we expect to see io.EOF, + // which is not an error but the signal that we are done. + // Any other error that happens to align with the start of + // a line is still an error. + if p.err == io.EOF { + p.err = nil + } return nil } switch p.currentByte { diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go index 26e92288c..a21b9d15d 100644 --- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go +++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go @@ -11,18 +11,18 @@ Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT @@ -35,8 +35,6 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - */ package goautoneg diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go index c909b8aa8..5727452c1 100644 --- a/vendor/github.com/prometheus/common/model/time.go +++ b/vendor/github.com/prometheus/common/model/time.go @@ -18,7 +18,6 @@ import ( "errors" "fmt" "math" - "regexp" "strconv" "strings" "time" @@ -183,54 +182,78 @@ func (d *Duration) Type() string { return "duration" } -var durationRE = regexp.MustCompile("^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$") +func isdigit(c byte) bool { return c >= '0' && c <= '9' } + +// Units are required to go in order from biggest to smallest. +// This guards against confusion from "1m1d" being 1 minute + 1 day, not 1 month + 1 day. +var unitMap = map[string]struct { + pos int + mult uint64 +}{ + "ms": {7, uint64(time.Millisecond)}, + "s": {6, uint64(time.Second)}, + "m": {5, uint64(time.Minute)}, + "h": {4, uint64(time.Hour)}, + "d": {3, uint64(24 * time.Hour)}, + "w": {2, uint64(7 * 24 * time.Hour)}, + "y": {1, uint64(365 * 24 * time.Hour)}, +} // ParseDuration parses a string into a time.Duration, assuming that a year // always has 365d, a week always has 7d, and a day always has 24h. -func ParseDuration(durationStr string) (Duration, error) { - switch durationStr { +func ParseDuration(s string) (Duration, error) { + switch s { case "0": // Allow 0 without a unit. return 0, nil case "": return 0, errors.New("empty duration string") } - matches := durationRE.FindStringSubmatch(durationStr) - if matches == nil { - return 0, fmt.Errorf("not a valid duration string: %q", durationStr) - } - var dur time.Duration - // Parse the match at pos `pos` in the regex and use `mult` to turn that - // into ms, then add that value to the total parsed duration. - var overflowErr error - m := func(pos int, mult time.Duration) { - if matches[pos] == "" { - return + orig := s + var dur uint64 + lastUnitPos := 0 + + for s != "" { + if !isdigit(s[0]) { + return 0, fmt.Errorf("not a valid duration string: %q", orig) + } + // Consume [0-9]* + i := 0 + for ; i < len(s) && isdigit(s[i]); i++ { + } + v, err := strconv.ParseUint(s[:i], 10, 0) + if err != nil { + return 0, fmt.Errorf("not a valid duration string: %q", orig) } - n, _ := strconv.Atoi(matches[pos]) + s = s[i:] + // Consume unit. + for i = 0; i < len(s) && !isdigit(s[i]); i++ { + } + if i == 0 { + return 0, fmt.Errorf("not a valid duration string: %q", orig) + } + u := s[:i] + s = s[i:] + unit, ok := unitMap[u] + if !ok { + return 0, fmt.Errorf("unknown unit %q in duration %q", u, orig) + } + if unit.pos <= lastUnitPos { // Units must go in order from biggest to smallest. + return 0, fmt.Errorf("not a valid duration string: %q", orig) + } + lastUnitPos = unit.pos // Check if the provided duration overflows time.Duration (> ~ 290years). - if n > int((1<<63-1)/mult/time.Millisecond) { - overflowErr = errors.New("duration out of range") + if v > 1<<63/unit.mult { + return 0, errors.New("duration out of range") } - d := time.Duration(n) * time.Millisecond - dur += d * mult - - if dur < 0 { - overflowErr = errors.New("duration out of range") + dur += v * unit.mult + if dur > 1<<63-1 { + return 0, errors.New("duration out of range") } } - - m(2, 1000*60*60*24*365) // y - m(4, 1000*60*60*24*7) // w - m(6, 1000*60*60*24) // d - m(8, 1000*60*60) // h - m(10, 1000*60) // m - m(12, 1000) // s - m(14, 1) // ms - - return Duration(dur), overflowErr + return Duration(dur), nil } func (d Duration) String() string { diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go index c9d8fb1a2..9eb440413 100644 --- a/vendor/github.com/prometheus/common/model/value.go +++ b/vendor/github.com/prometheus/common/model/value.go @@ -16,20 +16,12 @@ package model import ( "encoding/json" "fmt" - "math" "sort" "strconv" "strings" ) var ( - // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a - // non-existing sample pair. It is a SamplePair with timestamp Earliest and - // value 0.0. Note that the natural zero value of SamplePair has a timestamp - // of 0, which is possible to appear in a real SamplePair and thus not - // suitable to signal a non-existing SamplePair. - ZeroSamplePair = SamplePair{Timestamp: Earliest} - // ZeroSample is the pseudo zero-value of Sample used to signal a // non-existing sample. It is a Sample with timestamp Earliest, value 0.0, // and metric nil. Note that the natural zero value of Sample has a timestamp @@ -38,82 +30,14 @@ var ( ZeroSample = Sample{Timestamp: Earliest} ) -// A SampleValue is a representation of a value for a given sample at a given -// time. -type SampleValue float64 - -// MarshalJSON implements json.Marshaler. -func (v SampleValue) MarshalJSON() ([]byte, error) { - return json.Marshal(v.String()) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (v *SampleValue) UnmarshalJSON(b []byte) error { - if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { - return fmt.Errorf("sample value must be a quoted string") - } - f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) - if err != nil { - return err - } - *v = SampleValue(f) - return nil -} - -// Equal returns true if the value of v and o is equal or if both are NaN. Note -// that v==o is false if both are NaN. If you want the conventional float -// behavior, use == to compare two SampleValues. -func (v SampleValue) Equal(o SampleValue) bool { - if v == o { - return true - } - return math.IsNaN(float64(v)) && math.IsNaN(float64(o)) -} - -func (v SampleValue) String() string { - return strconv.FormatFloat(float64(v), 'f', -1, 64) -} - -// SamplePair pairs a SampleValue with a Timestamp. -type SamplePair struct { - Timestamp Time - Value SampleValue -} - -// MarshalJSON implements json.Marshaler. -func (s SamplePair) MarshalJSON() ([]byte, error) { - t, err := json.Marshal(s.Timestamp) - if err != nil { - return nil, err - } - v, err := json.Marshal(s.Value) - if err != nil { - return nil, err - } - return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *SamplePair) UnmarshalJSON(b []byte) error { - v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} - return json.Unmarshal(b, &v) -} - -// Equal returns true if this SamplePair and o have equal Values and equal -// Timestamps. The semantics of Value equality is defined by SampleValue.Equal. -func (s *SamplePair) Equal(o *SamplePair) bool { - return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) -} - -func (s SamplePair) String() string { - return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) -} - -// Sample is a sample pair associated with a metric. +// Sample is a sample pair associated with a metric. A single sample must either +// define Value or Histogram but not both. Histogram == nil implies the Value +// field is used, otherwise it should be ignored. type Sample struct { - Metric Metric `json:"metric"` - Value SampleValue `json:"value"` - Timestamp Time `json:"timestamp"` + Metric Metric `json:"metric"` + Value SampleValue `json:"value"` + Timestamp Time `json:"timestamp"` + Histogram *SampleHistogram `json:"histogram"` } // Equal compares first the metrics, then the timestamp, then the value. The @@ -129,11 +53,19 @@ func (s *Sample) Equal(o *Sample) bool { if !s.Timestamp.Equal(o.Timestamp) { return false } - + if s.Histogram != nil { + return s.Histogram.Equal(o.Histogram) + } return s.Value.Equal(o.Value) } func (s Sample) String() string { + if s.Histogram != nil { + return fmt.Sprintf("%s => %s", s.Metric, SampleHistogramPair{ + Timestamp: s.Timestamp, + Histogram: s.Histogram, + }) + } return fmt.Sprintf("%s => %s", s.Metric, SamplePair{ Timestamp: s.Timestamp, Value: s.Value, @@ -142,6 +74,19 @@ func (s Sample) String() string { // MarshalJSON implements json.Marshaler. func (s Sample) MarshalJSON() ([]byte, error) { + if s.Histogram != nil { + v := struct { + Metric Metric `json:"metric"` + Histogram SampleHistogramPair `json:"histogram"` + }{ + Metric: s.Metric, + Histogram: SampleHistogramPair{ + Timestamp: s.Timestamp, + Histogram: s.Histogram, + }, + } + return json.Marshal(&v) + } v := struct { Metric Metric `json:"metric"` Value SamplePair `json:"value"` @@ -152,21 +97,25 @@ func (s Sample) MarshalJSON() ([]byte, error) { Value: s.Value, }, } - return json.Marshal(&v) } // UnmarshalJSON implements json.Unmarshaler. func (s *Sample) UnmarshalJSON(b []byte) error { v := struct { - Metric Metric `json:"metric"` - Value SamplePair `json:"value"` + Metric Metric `json:"metric"` + Value SamplePair `json:"value"` + Histogram SampleHistogramPair `json:"histogram"` }{ Metric: s.Metric, Value: SamplePair{ Timestamp: s.Timestamp, Value: s.Value, }, + Histogram: SampleHistogramPair{ + Timestamp: s.Timestamp, + Histogram: s.Histogram, + }, } if err := json.Unmarshal(b, &v); err != nil { @@ -174,8 +123,13 @@ func (s *Sample) UnmarshalJSON(b []byte) error { } s.Metric = v.Metric - s.Timestamp = v.Value.Timestamp - s.Value = v.Value.Value + if v.Histogram.Histogram != nil { + s.Timestamp = v.Histogram.Timestamp + s.Histogram = v.Histogram.Histogram + } else { + s.Timestamp = v.Value.Timestamp + s.Value = v.Value.Value + } return nil } @@ -221,80 +175,76 @@ func (s Samples) Equal(o Samples) bool { // SampleStream is a stream of Values belonging to an attached COWMetric. type SampleStream struct { - Metric Metric `json:"metric"` - Values []SamplePair `json:"values"` + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` + Histograms []SampleHistogramPair `json:"histograms"` } func (ss SampleStream) String() string { - vals := make([]string, len(ss.Values)) + valuesLength := len(ss.Values) + vals := make([]string, valuesLength+len(ss.Histograms)) for i, v := range ss.Values { vals[i] = v.String() } + for i, v := range ss.Histograms { + vals[i+valuesLength] = v.String() + } return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n")) } -// Value is a generic interface for values resulting from a query evaluation. -type Value interface { - Type() ValueType - String() string +func (ss SampleStream) MarshalJSON() ([]byte, error) { + if len(ss.Histograms) > 0 && len(ss.Values) > 0 { + v := struct { + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` + Histograms []SampleHistogramPair `json:"histograms"` + }{ + Metric: ss.Metric, + Values: ss.Values, + Histograms: ss.Histograms, + } + return json.Marshal(&v) + } else if len(ss.Histograms) > 0 { + v := struct { + Metric Metric `json:"metric"` + Histograms []SampleHistogramPair `json:"histograms"` + }{ + Metric: ss.Metric, + Histograms: ss.Histograms, + } + return json.Marshal(&v) + } else { + v := struct { + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` + }{ + Metric: ss.Metric, + Values: ss.Values, + } + return json.Marshal(&v) + } } -func (Matrix) Type() ValueType { return ValMatrix } -func (Vector) Type() ValueType { return ValVector } -func (*Scalar) Type() ValueType { return ValScalar } -func (*String) Type() ValueType { return ValString } - -type ValueType int - -const ( - ValNone ValueType = iota - ValScalar - ValVector - ValMatrix - ValString -) - -// MarshalJSON implements json.Marshaler. -func (et ValueType) MarshalJSON() ([]byte, error) { - return json.Marshal(et.String()) -} +func (ss *SampleStream) UnmarshalJSON(b []byte) error { + v := struct { + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` + Histograms []SampleHistogramPair `json:"histograms"` + }{ + Metric: ss.Metric, + Values: ss.Values, + Histograms: ss.Histograms, + } -func (et *ValueType) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { + if err := json.Unmarshal(b, &v); err != nil { return err } - switch s { - case "": - *et = ValNone - case "scalar": - *et = ValScalar - case "vector": - *et = ValVector - case "matrix": - *et = ValMatrix - case "string": - *et = ValString - default: - return fmt.Errorf("unknown value type %q", s) - } - return nil -} -func (e ValueType) String() string { - switch e { - case ValNone: - return "" - case ValScalar: - return "scalar" - case ValVector: - return "vector" - case ValMatrix: - return "matrix" - case ValString: - return "string" - } - panic("ValueType.String: unhandled value type") + ss.Metric = v.Metric + ss.Values = v.Values + ss.Histograms = v.Histograms + + return nil } // Scalar is a scalar value evaluated at the set timestamp. diff --git a/vendor/github.com/prometheus/common/model/value_float.go b/vendor/github.com/prometheus/common/model/value_float.go new file mode 100644 index 000000000..0f615a705 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/value_float.go @@ -0,0 +1,100 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "math" + "strconv" +) + +var ( + // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a + // non-existing sample pair. It is a SamplePair with timestamp Earliest and + // value 0.0. Note that the natural zero value of SamplePair has a timestamp + // of 0, which is possible to appear in a real SamplePair and thus not + // suitable to signal a non-existing SamplePair. + ZeroSamplePair = SamplePair{Timestamp: Earliest} +) + +// A SampleValue is a representation of a value for a given sample at a given +// time. +type SampleValue float64 + +// MarshalJSON implements json.Marshaler. +func (v SampleValue) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (v *SampleValue) UnmarshalJSON(b []byte) error { + if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { + return fmt.Errorf("sample value must be a quoted string") + } + f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) + if err != nil { + return err + } + *v = SampleValue(f) + return nil +} + +// Equal returns true if the value of v and o is equal or if both are NaN. Note +// that v==o is false if both are NaN. If you want the conventional float +// behavior, use == to compare two SampleValues. +func (v SampleValue) Equal(o SampleValue) bool { + if v == o { + return true + } + return math.IsNaN(float64(v)) && math.IsNaN(float64(o)) +} + +func (v SampleValue) String() string { + return strconv.FormatFloat(float64(v), 'f', -1, 64) +} + +// SamplePair pairs a SampleValue with a Timestamp. +type SamplePair struct { + Timestamp Time + Value SampleValue +} + +func (s SamplePair) MarshalJSON() ([]byte, error) { + t, err := json.Marshal(s.Timestamp) + if err != nil { + return nil, err + } + v, err := json.Marshal(s.Value) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *SamplePair) UnmarshalJSON(b []byte) error { + v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} + return json.Unmarshal(b, &v) +} + +// Equal returns true if this SamplePair and o have equal Values and equal +// Timestamps. The semantics of Value equality is defined by SampleValue.Equal. +func (s *SamplePair) Equal(o *SamplePair) bool { + return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) +} + +func (s SamplePair) String() string { + return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) +} diff --git a/vendor/github.com/prometheus/common/model/value_histogram.go b/vendor/github.com/prometheus/common/model/value_histogram.go new file mode 100644 index 000000000..54bb038cf --- /dev/null +++ b/vendor/github.com/prometheus/common/model/value_histogram.go @@ -0,0 +1,178 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" +) + +type FloatString float64 + +func (v FloatString) String() string { + return strconv.FormatFloat(float64(v), 'f', -1, 64) +} + +func (v FloatString) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +func (v *FloatString) UnmarshalJSON(b []byte) error { + if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { + return fmt.Errorf("float value must be a quoted string") + } + f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) + if err != nil { + return err + } + *v = FloatString(f) + return nil +} + +type HistogramBucket struct { + Boundaries int32 + Lower FloatString + Upper FloatString + Count FloatString +} + +func (s HistogramBucket) MarshalJSON() ([]byte, error) { + b, err := json.Marshal(s.Boundaries) + if err != nil { + return nil, err + } + l, err := json.Marshal(s.Lower) + if err != nil { + return nil, err + } + u, err := json.Marshal(s.Upper) + if err != nil { + return nil, err + } + c, err := json.Marshal(s.Count) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("[%s,%s,%s,%s]", b, l, u, c)), nil +} + +func (s *HistogramBucket) UnmarshalJSON(buf []byte) error { + tmp := []interface{}{&s.Boundaries, &s.Lower, &s.Upper, &s.Count} + wantLen := len(tmp) + if err := json.Unmarshal(buf, &tmp); err != nil { + return err + } + if gotLen := len(tmp); gotLen != wantLen { + return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen) + } + return nil +} + +func (s *HistogramBucket) Equal(o *HistogramBucket) bool { + return s == o || (s.Boundaries == o.Boundaries && s.Lower == o.Lower && s.Upper == o.Upper && s.Count == o.Count) +} + +func (b HistogramBucket) String() string { + var sb strings.Builder + lowerInclusive := b.Boundaries == 1 || b.Boundaries == 3 + upperInclusive := b.Boundaries == 0 || b.Boundaries == 3 + if lowerInclusive { + sb.WriteRune('[') + } else { + sb.WriteRune('(') + } + fmt.Fprintf(&sb, "%g,%g", b.Lower, b.Upper) + if upperInclusive { + sb.WriteRune(']') + } else { + sb.WriteRune(')') + } + fmt.Fprintf(&sb, ":%v", b.Count) + return sb.String() +} + +type HistogramBuckets []*HistogramBucket + +func (s HistogramBuckets) Equal(o HistogramBuckets) bool { + if len(s) != len(o) { + return false + } + + for i, bucket := range s { + if !bucket.Equal(o[i]) { + return false + } + } + return true +} + +type SampleHistogram struct { + Count FloatString `json:"count"` + Sum FloatString `json:"sum"` + Buckets HistogramBuckets `json:"buckets"` +} + +func (s SampleHistogram) String() string { + return fmt.Sprintf("Count: %f, Sum: %f, Buckets: %v", s.Count, s.Sum, s.Buckets) +} + +func (s *SampleHistogram) Equal(o *SampleHistogram) bool { + return s == o || (s.Count == o.Count && s.Sum == o.Sum && s.Buckets.Equal(o.Buckets)) +} + +type SampleHistogramPair struct { + Timestamp Time + // Histogram should never be nil, it's only stored as pointer for efficiency. + Histogram *SampleHistogram +} + +func (s SampleHistogramPair) MarshalJSON() ([]byte, error) { + if s.Histogram == nil { + return nil, fmt.Errorf("histogram is nil") + } + t, err := json.Marshal(s.Timestamp) + if err != nil { + return nil, err + } + v, err := json.Marshal(s.Histogram) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil +} + +func (s *SampleHistogramPair) UnmarshalJSON(buf []byte) error { + tmp := []interface{}{&s.Timestamp, &s.Histogram} + wantLen := len(tmp) + if err := json.Unmarshal(buf, &tmp); err != nil { + return err + } + if gotLen := len(tmp); gotLen != wantLen { + return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen) + } + if s.Histogram == nil { + return fmt.Errorf("histogram is null") + } + return nil +} + +func (s SampleHistogramPair) String() string { + return fmt.Sprintf("%s @[%s]", s.Histogram, s.Timestamp) +} + +func (s *SampleHistogramPair) Equal(o *SampleHistogramPair) bool { + return s == o || (s.Histogram.Equal(o.Histogram) && s.Timestamp.Equal(o.Timestamp)) +} diff --git a/vendor/github.com/prometheus/common/model/value_type.go b/vendor/github.com/prometheus/common/model/value_type.go new file mode 100644 index 000000000..726c50ee6 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/value_type.go @@ -0,0 +1,83 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" +) + +// Value is a generic interface for values resulting from a query evaluation. +type Value interface { + Type() ValueType + String() string +} + +func (Matrix) Type() ValueType { return ValMatrix } +func (Vector) Type() ValueType { return ValVector } +func (*Scalar) Type() ValueType { return ValScalar } +func (*String) Type() ValueType { return ValString } + +type ValueType int + +const ( + ValNone ValueType = iota + ValScalar + ValVector + ValMatrix + ValString +) + +// MarshalJSON implements json.Marshaler. +func (et ValueType) MarshalJSON() ([]byte, error) { + return json.Marshal(et.String()) +} + +func (et *ValueType) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + switch s { + case "": + *et = ValNone + case "scalar": + *et = ValScalar + case "vector": + *et = ValVector + case "matrix": + *et = ValMatrix + case "string": + *et = ValString + default: + return fmt.Errorf("unknown value type %q", s) + } + return nil +} + +func (e ValueType) String() string { + switch e { + case ValNone: + return "" + case ValScalar: + return "scalar" + case ValVector: + return "vector" + case ValMatrix: + return "matrix" + case ValString: + return "string" + } + panic("ValueType.String: unhandled value type") +} diff --git a/vendor/github.com/prometheus/common/version/info.go b/vendor/github.com/prometheus/common/version/info.go index 3e2a7ee50..00caa0ba4 100644 --- a/vendor/github.com/prometheus/common/version/info.go +++ b/vendor/github.com/prometheus/common/version/info.go @@ -31,6 +31,8 @@ var ( BuildUser string BuildDate string GoVersion = runtime.Version() + GoOS = runtime.GOOS + GoArch = runtime.GOARCH ) // NewCollector returns a collector that exports metrics about current version @@ -41,14 +43,17 @@ func NewCollector(program string) prometheus.Collector { Namespace: program, Name: "build_info", Help: fmt.Sprintf( - "A metric with a constant '1' value labeled by version, revision, branch, and goversion from which %s was built.", + "A metric with a constant '1' value labeled by version, revision, branch, goversion from which %s was built, and the goos and goarch for the build.", program, ), ConstLabels: prometheus.Labels{ "version": Version, - "revision": Revision, + "revision": getRevision(), "branch": Branch, "goversion": GoVersion, + "goos": GoOS, + "goarch": GoArch, + "tags": getTags(), }, }, func() float64 { return 1 }, @@ -62,6 +67,7 @@ var versionInfoTmpl = ` build date: {{.buildDate}} go version: {{.goVersion}} platform: {{.platform}} + tags: {{.tags}} ` // Print returns version information. @@ -69,12 +75,13 @@ func Print(program string) string { m := map[string]string{ "program": program, "version": Version, - "revision": Revision, + "revision": getRevision(), "branch": Branch, "buildUser": BuildUser, "buildDate": BuildDate, "goVersion": GoVersion, - "platform": runtime.GOOS + "/" + runtime.GOARCH, + "platform": GoOS + "/" + GoArch, + "tags": getTags(), } t := template.Must(template.New("version").Parse(versionInfoTmpl)) @@ -87,10 +94,10 @@ func Print(program string) string { // Info returns version, branch and revision information. func Info() string { - return fmt.Sprintf("(version=%s, branch=%s, revision=%s)", Version, Branch, Revision) + return fmt.Sprintf("(version=%s, branch=%s, revision=%s)", Version, Branch, getRevision()) } -// BuildContext returns goVersion, buildUser and buildDate information. +// BuildContext returns goVersion, platform, buildUser and buildDate information. func BuildContext() string { - return fmt.Sprintf("(go=%s, user=%s, date=%s)", GoVersion, BuildUser, BuildDate) + return fmt.Sprintf("(go=%s, platform=%s, user=%s, date=%s, tags=%s)", GoVersion, GoOS+"/"+GoArch, BuildUser, BuildDate, getTags()) } diff --git a/vendor/github.com/prometheus/common/version/info_default.go b/vendor/github.com/prometheus/common/version/info_default.go new file mode 100644 index 000000000..8eb3a0bf1 --- /dev/null +++ b/vendor/github.com/prometheus/common/version/info_default.go @@ -0,0 +1,25 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !go1.18 +// +build !go1.18 + +package version + +func getRevision() string { + return Revision +} + +func getTags() string { + return "unknown" // Not available prior to Go 1.18 +} diff --git a/vendor/github.com/prometheus/common/version/info_go118.go b/vendor/github.com/prometheus/common/version/info_go118.go new file mode 100644 index 000000000..bfc7d4103 --- /dev/null +++ b/vendor/github.com/prometheus/common/version/info_go118.go @@ -0,0 +1,67 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package version + +import "runtime/debug" + +var computedRevision string +var computedTags string + +func getRevision() string { + if Revision != "" { + return Revision + } + return computedRevision +} + +func getTags() string { + return computedTags +} + +func init() { + computedRevision, computedTags = computeRevision() +} + +func computeRevision() (string, string) { + var ( + rev = "unknown" + tags = "unknown" + modified bool + ) + + buildInfo, ok := debug.ReadBuildInfo() + if !ok { + return rev, tags + } + for _, v := range buildInfo.Settings { + if v.Key == "vcs.revision" { + rev = v.Value + } + if v.Key == "vcs.modified" { + if v.Value == "true" { + modified = true + } + } + if v.Key == "-tags" { + tags = v.Value + } + } + if modified { + return rev + "-modified", tags + } + return rev, tags +} diff --git a/vendor/github.com/prometheus/exporter-toolkit/web/handler.go b/vendor/github.com/prometheus/exporter-toolkit/web/handler.go index ae3ebc03b..c607a163a 100644 --- a/vendor/github.com/prometheus/exporter-toolkit/web/handler.go +++ b/vendor/github.com/prometheus/exporter-toolkit/web/handler.go @@ -19,6 +19,7 @@ import ( "encoding/hex" "fmt" "net/http" + "strings" "sync" "github.com/go-kit/log" @@ -113,7 +114,12 @@ func (u *webHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { hashedPassword = "$2y$10$QOauhQNbBCuQDKes6eFzPeMqBSjb7Mr5DUmpZ/VcEd00UAV/LDeSi" } - cacheKey := hex.EncodeToString(append(append([]byte(user), []byte(hashedPassword)...), []byte(pass)...)) + cacheKey := strings.Join( + []string{ + hex.EncodeToString([]byte(user)), + hex.EncodeToString([]byte(hashedPassword)), + hex.EncodeToString([]byte(pass)), + }, ":") authOk, ok := u.cache.get(cacheKey) if !ok { @@ -122,7 +128,7 @@ func (u *webHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { err := bcrypt.CompareHashAndPassword([]byte(hashedPassword), []byte(pass)) u.bcryptMtx.Unlock() - authOk = err == nil + authOk = validUser && err == nil u.cache.set(cacheKey, authOk) } diff --git a/vendor/github.com/prometheus/exporter-toolkit/web/landing_page.css b/vendor/github.com/prometheus/exporter-toolkit/web/landing_page.css new file mode 100644 index 000000000..cc939eaf8 --- /dev/null +++ b/vendor/github.com/prometheus/exporter-toolkit/web/landing_page.css @@ -0,0 +1,13 @@ +body { + font-family: -apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Helvetica Neue,Arial,Noto Sans,Liberation Sans,sans-serif,Apple Color Emoji,Segoe UI Emoji,Segoe UI Symbol,Noto Color Emoji; + margin: 0; +} +header { + background-color: {{.HeaderColor}}; + color: #fff; + font-size: 2rem; + padding: 1rem; +} +main { + padding: 1rem; +} diff --git a/vendor/github.com/prometheus/exporter-toolkit/web/landing_page.go b/vendor/github.com/prometheus/exporter-toolkit/web/landing_page.go new file mode 100644 index 000000000..f0747ac36 --- /dev/null +++ b/vendor/github.com/prometheus/exporter-toolkit/web/landing_page.go @@ -0,0 +1,82 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !genassets +// +build !genassets + +//go:generate go run -tags genassets gen_assets.go + +package web + +import ( + "bytes" + _ "embed" + "net/http" + "text/template" +) + +// Config represents the configuration of the web listener. +type LandingConfig struct { + HeaderColor string // Used for the landing page header. + CSS string // CSS style tag for the landing page. + Name string // The name of the exporter, generally suffixed by _exporter. + Description string // A short description about the exporter. + Links []LandingLinks // Links displayed on the landing page. + Version string // The version displayed. +} + +type LandingLinks struct { + Address string // The URL the link points to. + Text string // The text of the link. + Description string // A descriptive textfor the link. +} + +type LandingPageHandler struct { + landingPage []byte +} + +var ( + //go:embed landing_page.html + landingPagehtmlContent string + //go:embed landing_page.css + landingPagecssContent string +) + +func NewLandingPage(c LandingConfig) (*LandingPageHandler, error) { + var buf bytes.Buffer + if c.CSS == "" { + if c.HeaderColor == "" { + // Default to Prometheus orange. + c.HeaderColor = "#e6522c" + } + cssTemplate := template.Must(template.New("landing css").Parse(landingPagecssContent)) + if err := cssTemplate.Execute(&buf, c); err != nil { + return nil, err + } + c.CSS = buf.String() + } + t := template.Must(template.New("landing page").Parse(landingPagehtmlContent)) + + buf.Reset() + if err := t.Execute(&buf, c); err != nil { + return nil, err + } + + return &LandingPageHandler{ + landingPage: buf.Bytes(), + }, nil +} + +func (h *LandingPageHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + w.Write(h.landingPage) +} diff --git a/vendor/github.com/prometheus/exporter-toolkit/web/landing_page.html b/vendor/github.com/prometheus/exporter-toolkit/web/landing_page.html new file mode 100644 index 000000000..68c401702 --- /dev/null +++ b/vendor/github.com/prometheus/exporter-toolkit/web/landing_page.html @@ -0,0 +1,24 @@ + + + + + {{.Name}} + + + +
+

{{.Name}}

+
+
+ {{if .Description}}

{{.Description}}

{{end}} + {{if .Version}}
Version: {{.Version}}
{{end}} +
+
    + {{ range .Links }} +
  • {{.Text}}{{if .Description}}: {{.Description}}{{end}}
  • + {{ end }} +
+
+
+ + diff --git a/vendor/github.com/prometheus/exporter-toolkit/web/tls_config.go b/vendor/github.com/prometheus/exporter-toolkit/web/tls_config.go index 328c5e0ef..47bbca174 100644 --- a/vendor/github.com/prometheus/exporter-toolkit/web/tls_config.go +++ b/vendor/github.com/prometheus/exporter-toolkit/web/tls_config.go @@ -16,16 +16,18 @@ package web import ( "crypto/tls" "crypto/x509" + "errors" "fmt" - "io/ioutil" "net" "net/http" + "os" "path/filepath" + "github.com/coreos/go-systemd/v22/activation" "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/pkg/errors" config_util "github.com/prometheus/common/config" + "golang.org/x/sync/errgroup" "gopkg.in/yaml.v2" ) @@ -34,47 +36,53 @@ var ( ) type Config struct { - TLSConfig TLSStruct `yaml:"tls_server_config"` - HTTPConfig HTTPStruct `yaml:"http_server_config"` + TLSConfig TLSConfig `yaml:"tls_server_config"` + HTTPConfig HTTPConfig `yaml:"http_server_config"` Users map[string]config_util.Secret `yaml:"basic_auth_users"` } -type TLSStruct struct { +type TLSConfig struct { TLSCertPath string `yaml:"cert_file"` TLSKeyPath string `yaml:"key_file"` ClientAuth string `yaml:"client_auth_type"` ClientCAs string `yaml:"client_ca_file"` - CipherSuites []cipher `yaml:"cipher_suites"` - CurvePreferences []curve `yaml:"curve_preferences"` - MinVersion tlsVersion `yaml:"min_version"` - MaxVersion tlsVersion `yaml:"max_version"` + CipherSuites []Cipher `yaml:"cipher_suites"` + CurvePreferences []Curve `yaml:"curve_preferences"` + MinVersion TLSVersion `yaml:"min_version"` + MaxVersion TLSVersion `yaml:"max_version"` PreferServerCipherSuites bool `yaml:"prefer_server_cipher_suites"` } +type FlagConfig struct { + WebListenAddresses *[]string + WebSystemdSocket *bool + WebConfigFile *string +} + // SetDirectory joins any relative file paths with dir. -func (t *TLSStruct) SetDirectory(dir string) { +func (t *TLSConfig) SetDirectory(dir string) { t.TLSCertPath = config_util.JoinDir(dir, t.TLSCertPath) t.TLSKeyPath = config_util.JoinDir(dir, t.TLSKeyPath) t.ClientCAs = config_util.JoinDir(dir, t.ClientCAs) } -type HTTPStruct struct { +type HTTPConfig struct { HTTP2 bool `yaml:"http2"` Header map[string]string `yaml:"headers,omitempty"` } func getConfig(configPath string) (*Config, error) { - content, err := ioutil.ReadFile(configPath) + content, err := os.ReadFile(configPath) if err != nil { return nil, err } c := &Config{ - TLSConfig: TLSStruct{ + TLSConfig: TLSConfig{ MinVersion: tls.VersionTLS12, MaxVersion: tls.VersionTLS13, PreferServerCipherSuites: true, }, - HTTPConfig: HTTPStruct{HTTP2: true}, + HTTPConfig: HTTPConfig{HTTP2: true}, } err = yaml.UnmarshalStrict(content, c) if err == nil { @@ -92,8 +100,8 @@ func getTLSConfig(configPath string) (*tls.Config, error) { return ConfigToTLSConfig(&c.TLSConfig) } -// ConfigToTLSConfig generates the golang tls.Config from the TLSStruct config. -func ConfigToTLSConfig(c *TLSStruct) (*tls.Config, error) { +// ConfigToTLSConfig generates the golang tls.Config from the TLSConfig struct. +func ConfigToTLSConfig(c *TLSConfig) (*tls.Config, error) { if c.TLSCertPath == "" && c.TLSKeyPath == "" && c.ClientAuth == "" && c.ClientCAs == "" { return nil, errNoTLSConfig } @@ -109,7 +117,7 @@ func ConfigToTLSConfig(c *TLSStruct) (*tls.Config, error) { loadCert := func() (*tls.Certificate, error) { cert, err := tls.LoadX509KeyPair(c.TLSCertPath, c.TLSKeyPath) if err != nil { - return nil, errors.Wrap(err, "failed to load X509KeyPair") + return nil, fmt.Errorf("failed to load X509KeyPair: %w", err) } return &cert, nil } @@ -147,7 +155,7 @@ func ConfigToTLSConfig(c *TLSStruct) (*tls.Config, error) { if c.ClientCAs != "" { clientCAPool := x509.NewCertPool() - clientCAFile, err := ioutil.ReadFile(c.ClientCAs) + clientCAFile, err := os.ReadFile(c.ClientCAs) if err != nil { return nil, err } @@ -177,22 +185,54 @@ func ConfigToTLSConfig(c *TLSStruct) (*tls.Config, error) { return cfg, nil } -// ListenAndServe starts the server on the given address. Based on the file -// tlsConfigPath, TLS or basic auth could be enabled. -func ListenAndServe(server *http.Server, tlsConfigPath string, logger log.Logger) error { - listener, err := net.Listen("tcp", server.Addr) - if err != nil { - return err +// ServeMultiple starts the server on the given listeners. The FlagConfig is +// also passed on to Serve. +func ServeMultiple(listeners []net.Listener, server *http.Server, flags *FlagConfig, logger log.Logger) error { + errs := new(errgroup.Group) + for _, l := range listeners { + l := l + errs.Go(func() error { + return Serve(l, server, flags, logger) + }) + } + return errs.Wait() +} + +// ListenAndServe starts the server on addresses given in WebListenAddresses in +// the FlagConfig or instead uses systemd socket activated listeners if +// WebSystemdSocket in the FlagConfig is true. The FlagConfig is also passed on +// to ServeMultiple. +func ListenAndServe(server *http.Server, flags *FlagConfig, logger log.Logger) error { + if *flags.WebSystemdSocket { + level.Info(logger).Log("msg", "Listening on systemd activated listeners instead of port listeners.") + listeners, err := activation.Listeners() + if err != nil { + return err + } + if len(listeners) < 1 { + return errors.New("no socket activation file descriptors found") + } + return ServeMultiple(listeners, server, flags, logger) } - defer listener.Close() - return Serve(listener, server, tlsConfigPath, logger) + listeners := make([]net.Listener, 0, len(*flags.WebListenAddresses)) + for _, address := range *flags.WebListenAddresses { + listener, err := net.Listen("tcp", address) + if err != nil { + return err + } + defer listener.Close() + listeners = append(listeners, listener) + } + return ServeMultiple(listeners, server, flags, logger) } -// Server starts the server on the given listener. Based on the file -// tlsConfigPath, TLS or basic auth could be enabled. -func Serve(l net.Listener, server *http.Server, tlsConfigPath string, logger log.Logger) error { +// Server starts the server on the given listener. Based on the file path +// WebConfigFile in the FlagConfig, TLS or basic auth could be enabled. +func Serve(l net.Listener, server *http.Server, flags *FlagConfig, logger log.Logger) error { + level.Info(logger).Log("msg", "Listening on", "address", l.Addr().String()) + tlsConfigPath := *flags.WebConfigFile if tlsConfigPath == "" { - level.Info(logger).Log("msg", "TLS is disabled.", "http2", false) + level.Info(logger).Log("msg", "TLS is disabled.", "http2", false, "address", l.Addr().String()) return server.Serve(l) } @@ -225,10 +265,10 @@ func Serve(l net.Listener, server *http.Server, tlsConfigPath string, logger log server.TLSNextProto = make(map[string]func(*http.Server, *tls.Conn, http.Handler)) } // Valid TLS config. - level.Info(logger).Log("msg", "TLS is enabled.", "http2", c.HTTPConfig.HTTP2) + level.Info(logger).Log("msg", "TLS is enabled.", "http2", c.HTTPConfig.HTTP2, "address", l.Addr().String()) case errNoTLSConfig: // No TLS config, back to plain HTTP. - level.Info(logger).Log("msg", "TLS is disabled.", "http2", false) + level.Info(logger).Log("msg", "TLS is disabled.", "http2", false, "address", l.Addr().String()) return server.Serve(l) default: // Invalid TLS config. @@ -269,9 +309,9 @@ func Validate(tlsConfigPath string) error { return err } -type cipher uint16 +type Cipher uint16 -func (c *cipher) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *Cipher) UnmarshalYAML(unmarshal func(interface{}) error) error { var s string err := unmarshal((*string)(&s)) if err != nil { @@ -279,27 +319,27 @@ func (c *cipher) UnmarshalYAML(unmarshal func(interface{}) error) error { } for _, cs := range tls.CipherSuites() { if cs.Name == s { - *c = (cipher)(cs.ID) + *c = (Cipher)(cs.ID) return nil } } return errors.New("unknown cipher: " + s) } -func (c cipher) MarshalYAML() (interface{}, error) { +func (c Cipher) MarshalYAML() (interface{}, error) { return tls.CipherSuiteName((uint16)(c)), nil } -type curve tls.CurveID +type Curve tls.CurveID -var curves = map[string]curve{ - "CurveP256": (curve)(tls.CurveP256), - "CurveP384": (curve)(tls.CurveP384), - "CurveP521": (curve)(tls.CurveP521), - "X25519": (curve)(tls.X25519), +var curves = map[string]Curve{ + "CurveP256": (Curve)(tls.CurveP256), + "CurveP384": (Curve)(tls.CurveP384), + "CurveP521": (Curve)(tls.CurveP521), + "X25519": (Curve)(tls.X25519), } -func (c *curve) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *Curve) UnmarshalYAML(unmarshal func(interface{}) error) error { var s string err := unmarshal((*string)(&s)) if err != nil { @@ -312,7 +352,7 @@ func (c *curve) UnmarshalYAML(unmarshal func(interface{}) error) error { return errors.New("unknown curve: " + s) } -func (c *curve) MarshalYAML() (interface{}, error) { +func (c *Curve) MarshalYAML() (interface{}, error) { for s, curveid := range curves { if *c == curveid { return s, nil @@ -321,16 +361,16 @@ func (c *curve) MarshalYAML() (interface{}, error) { return fmt.Sprintf("%v", c), nil } -type tlsVersion uint16 +type TLSVersion uint16 -var tlsVersions = map[string]tlsVersion{ - "TLS13": (tlsVersion)(tls.VersionTLS13), - "TLS12": (tlsVersion)(tls.VersionTLS12), - "TLS11": (tlsVersion)(tls.VersionTLS11), - "TLS10": (tlsVersion)(tls.VersionTLS10), +var tlsVersions = map[string]TLSVersion{ + "TLS13": (TLSVersion)(tls.VersionTLS13), + "TLS12": (TLSVersion)(tls.VersionTLS12), + "TLS11": (TLSVersion)(tls.VersionTLS11), + "TLS10": (TLSVersion)(tls.VersionTLS10), } -func (tv *tlsVersion) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (tv *TLSVersion) UnmarshalYAML(unmarshal func(interface{}) error) error { var s string err := unmarshal((*string)(&s)) if err != nil { @@ -343,7 +383,7 @@ func (tv *tlsVersion) UnmarshalYAML(unmarshal func(interface{}) error) error { return errors.New("unknown TLS version: " + s) } -func (tv *tlsVersion) MarshalYAML() (interface{}, error) { +func (tv *TLSVersion) MarshalYAML() (interface{}, error) { for s, v := range tlsVersions { if *tv == v { return s, nil @@ -356,6 +396,6 @@ func (tv *tlsVersion) MarshalYAML() (interface{}, error) { // tlsConfigPath, TLS or basic auth could be enabled. // // Deprecated: Use ListenAndServe instead. -func Listen(server *http.Server, tlsConfigPath string, logger log.Logger) error { - return ListenAndServe(server, tlsConfigPath, logger) +func Listen(server *http.Server, flags *FlagConfig, logger log.Logger) error { + return ListenAndServe(server, flags, logger) } diff --git a/vendor/github.com/prometheus/exporter-toolkit/web/web-config.yml b/vendor/github.com/prometheus/exporter-toolkit/web/web-config.yml index 7d40d9b70..984aa0dbf 100644 --- a/vendor/github.com/prometheus/exporter-toolkit/web/web-config.yml +++ b/vendor/github.com/prometheus/exporter-toolkit/web/web-config.yml @@ -3,4 +3,3 @@ tls_server_config: cert_file: server.crt key_file: server.key - diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index 6c8e3e219..e358db69c 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -55,19 +55,22 @@ ifneq ($(shell which gotestsum),) endif endif -PROMU_VERSION ?= 0.13.0 +PROMU_VERSION ?= 0.14.0 PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz +SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.45.2 +GOLANGCI_LINT_VERSION ?= v1.49.0 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386)) # If we're in CI and there is an Actions file, that means the linter # is being run in Actions, so we don't need to run it here. - ifeq (,$(CIRCLE_JOB)) + ifneq (,$(SKIP_GOLANGCI_LINT)) + GOLANGCI_LINT := + else ifeq (,$(CIRCLE_JOB)) GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint else ifeq (,$(wildcard .github/workflows/golangci-lint.yml)) GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go index ff6b927da..06968ca2e 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo.go @@ -380,6 +380,42 @@ func parseCPUInfoMips(info []byte) ([]CPUInfo, error) { return cpuinfo, nil } +func parseCPUInfoLoong(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + // find the first "processor" line + firstLine := firstNonEmptyLine(scanner) + if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") { + return nil, errors.New("invalid cpuinfo file: " + firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + cpuinfo := []CPUInfo{} + systemType := field[1] + i := 0 + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "processor": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + i = int(v) + cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor + cpuinfo[i].Processor = uint(v) + cpuinfo[i].VendorID = systemType + case "CPU Family": + cpuinfo[i].CPUFamily = field[1] + case "Model Name": + cpuinfo[i].ModelName = field[1] + } + } + return cpuinfo, nil +} + func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) { scanner := bufio.NewScanner(bytes.NewReader(info)) diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go b/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go new file mode 100644 index 000000000..d88442f0e --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go @@ -0,0 +1,19 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build linux +// +build linux + +package procfs + +var parseCPUInfo = parseCPUInfoLoong diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_others.go b/vendor/github.com/prometheus/procfs/cpuinfo_others.go index ea41bf2ca..a6b2b3127 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_others.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_others.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build linux && !386 && !amd64 && !arm && !arm64 && !mips && !mips64 && !mips64le && !mipsle && !ppc64 && !ppc64le && !riscv64 && !s390x -// +build linux,!386,!amd64,!arm,!arm64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x +//go:build linux && !386 && !amd64 && !arm && !arm64 && !loong64 && !mips && !mips64 && !mips64le && !mipsle && !ppc64 && !ppc64le && !riscv64 && !s390x +// +build linux,!386,!amd64,!arm,!arm64,!loong64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x package procfs diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go index d31a82600..f9d961e44 100644 --- a/vendor/github.com/prometheus/procfs/doc.go +++ b/vendor/github.com/prometheus/procfs/doc.go @@ -16,30 +16,29 @@ // // Example: // -// package main -// -// import ( -// "fmt" -// "log" -// -// "github.com/prometheus/procfs" -// ) -// -// func main() { -// p, err := procfs.Self() -// if err != nil { -// log.Fatalf("could not get process: %s", err) -// } -// -// stat, err := p.Stat() -// if err != nil { -// log.Fatalf("could not get process stat: %s", err) -// } -// -// fmt.Printf("command: %s\n", stat.Comm) -// fmt.Printf("cpu time: %fs\n", stat.CPUTime()) -// fmt.Printf("vsize: %dB\n", stat.VirtualMemory()) -// fmt.Printf("rss: %dB\n", stat.ResidentMemory()) -// } -// +// package main +// +// import ( +// "fmt" +// "log" +// +// "github.com/prometheus/procfs" +// ) +// +// func main() { +// p, err := procfs.Self() +// if err != nil { +// log.Fatalf("could not get process: %s", err) +// } +// +// stat, err := p.Stat() +// if err != nil { +// log.Fatalf("could not get process stat: %s", err) +// } +// +// fmt.Printf("command: %s\n", stat.Comm) +// fmt.Printf("cpu time: %fs\n", stat.CPUTime()) +// fmt.Printf("vsize: %dB\n", stat.VirtualMemory()) +// fmt.Printf("rss: %dB\n", stat.ResidentMemory()) +// } package procfs diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go index f7a828bb1..0c482c18c 100644 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -284,7 +284,8 @@ func parseMountStats(r io.Reader) ([]*Mount, error) { } // parseMount parses an entry in /proc/[pid]/mountstats in the format: -// device [device] mounted on [mount] with fstype [type] +// +// device [device] mounted on [mount] with fstype [type] func parseMount(ss []string) (*Mount, error) { if len(ss) < deviceEntryLen { return nil, fmt.Errorf("invalid device entry: %v", ss) diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go index a94f86dc4..06b7b8f21 100644 --- a/vendor/github.com/prometheus/procfs/net_softnet.go +++ b/vendor/github.com/prometheus/procfs/net_softnet.go @@ -27,8 +27,9 @@ import ( // For the proc file format details, // See: // * Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2343 -// * Linux 4.17 https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162 -// and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810. +// * Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086 +// * Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162 +// * Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169 // SoftnetStat contains a single row of data from /proc/net/softnet_stat. type SoftnetStat struct { @@ -38,6 +39,18 @@ type SoftnetStat struct { Dropped uint32 // Number of times processing packets ran out of quota. TimeSqueezed uint32 + // Number of collision occur while obtaining device lock while transmitting. + CPUCollision uint32 + // Number of times cpu woken up received_rps. + ReceivedRps uint32 + // number of times flow limit has been reached. + FlowLimitCount uint32 + // Softnet backlog status. + SoftnetBacklogLen uint32 + // CPU id owning this softnet_data. + Index uint32 + // softnet_data's Width. + Width int } var softNetProcFile = "net/softnet_stat" @@ -66,22 +79,57 @@ func parseSoftnet(r io.Reader) ([]SoftnetStat, error) { for s.Scan() { columns := strings.Fields(s.Text()) width := len(columns) + softnetStat := SoftnetStat{} if width < minColumns { return nil, fmt.Errorf("%d columns were detected, but at least %d were expected", width, minColumns) } - // We only parse the first three columns at the moment. - us, err := parseHexUint32s(columns[0:3]) - if err != nil { - return nil, err + // Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2347 + if width >= minColumns { + us, err := parseHexUint32s(columns[0:9]) + if err != nil { + return nil, err + } + + softnetStat.Processed = us[0] + softnetStat.Dropped = us[1] + softnetStat.TimeSqueezed = us[2] + softnetStat.CPUCollision = us[8] + } + + // Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086 + if width >= 10 { + us, err := parseHexUint32s(columns[9:10]) + if err != nil { + return nil, err + } + + softnetStat.ReceivedRps = us[0] } - stats = append(stats, SoftnetStat{ - Processed: us[0], - Dropped: us[1], - TimeSqueezed: us[2], - }) + // Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162 + if width >= 11 { + us, err := parseHexUint32s(columns[10:11]) + if err != nil { + return nil, err + } + + softnetStat.FlowLimitCount = us[0] + } + + // Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169 + if width >= 13 { + us, err := parseHexUint32s(columns[11:13]) + if err != nil { + return nil, err + } + + softnetStat.SoftnetBacklogLen = us[0] + softnetStat.Index = us[1] + } + softnetStat.Width = width + stats = append(stats, softnetStat) } return stats, nil diff --git a/vendor/github.com/prometheus/procfs/netstat.go b/vendor/github.com/prometheus/procfs/netstat.go index dcea9c5a6..5cc40aef5 100644 --- a/vendor/github.com/prometheus/procfs/netstat.go +++ b/vendor/github.com/prometheus/procfs/netstat.go @@ -15,6 +15,7 @@ package procfs import ( "bufio" + "io" "os" "path/filepath" "strconv" @@ -42,27 +43,43 @@ func (fs FS) NetStat() ([]NetStat, error) { return nil, err } - netStatFile := NetStat{ - Filename: filepath.Base(filePath), - Stats: make(map[string][]uint64), + procNetstat, err := parseNetstat(file) + if err != nil { + return nil, err + } + procNetstat.Filename = filepath.Base(filePath) + + netStatsTotal = append(netStatsTotal, procNetstat) + } + return netStatsTotal, nil +} + +// parseNetstat parses the metrics from `/proc/net/stat/` file +// and returns a NetStat structure. +func parseNetstat(r io.Reader) (NetStat, error) { + var ( + scanner = bufio.NewScanner(r) + netStat = NetStat{ + Stats: make(map[string][]uint64), } - scanner := bufio.NewScanner(file) - scanner.Scan() - // First string is always a header for stats - var headers []string - headers = append(headers, strings.Fields(scanner.Text())...) + ) + + scanner.Scan() - // Other strings represent per-CPU counters - for scanner.Scan() { - for num, counter := range strings.Fields(scanner.Text()) { - value, err := strconv.ParseUint(counter, 16, 64) - if err != nil { - return nil, err - } - netStatFile.Stats[headers[num]] = append(netStatFile.Stats[headers[num]], value) + // First string is always a header for stats + var headers []string + headers = append(headers, strings.Fields(scanner.Text())...) + + // Other strings represent per-CPU counters + for scanner.Scan() { + for num, counter := range strings.Fields(scanner.Text()) { + value, err := strconv.ParseUint(counter, 16, 64) + if err != nil { + return NetStat{}, err } + netStat.Stats[headers[num]] = append(netStat.Stats[headers[num]], value) } - netStatsTotal = append(netStatsTotal, netStatFile) } - return netStatsTotal, nil + + return netStat, nil } diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go index cca03327c..ea83a75ff 100644 --- a/vendor/github.com/prometheus/procfs/proc_cgroup.go +++ b/vendor/github.com/prometheus/procfs/proc_cgroup.go @@ -23,7 +23,7 @@ import ( "github.com/prometheus/procfs/internal/util" ) -// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the the placement of a PID inside a +// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the placement of a PID inside a // specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource // controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies // contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in diff --git a/vendor/github.com/prometheus/procfs/proc_interrupts.go b/vendor/github.com/prometheus/procfs/proc_interrupts.go new file mode 100644 index 000000000..9df79c237 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_interrupts.go @@ -0,0 +1,98 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Interrupt represents a single interrupt line. +type Interrupt struct { + // Info is the type of interrupt. + Info string + // Devices is the name of the device that is located at that IRQ + Devices string + // Values is the number of interrupts per CPU. + Values []string +} + +// Interrupts models the content of /proc/interrupts. Key is the IRQ number. +// - https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/deployment_guide/s2-proc-interrupts +// - https://raspberrypi.stackexchange.com/questions/105802/explanation-of-proc-interrupts-output +type Interrupts map[string]Interrupt + +// Interrupts creates a new instance from a given Proc instance. +func (p Proc) Interrupts() (Interrupts, error) { + data, err := util.ReadFileNoStat(p.path("interrupts")) + if err != nil { + return nil, err + } + return parseInterrupts(bytes.NewReader(data)) +} + +func parseInterrupts(r io.Reader) (Interrupts, error) { + var ( + interrupts = Interrupts{} + scanner = bufio.NewScanner(r) + ) + + if !scanner.Scan() { + return nil, errors.New("interrupts empty") + } + cpuNum := len(strings.Fields(scanner.Text())) // one header per cpu + + for scanner.Scan() { + parts := strings.Fields(scanner.Text()) + if len(parts) == 0 { // skip empty lines + continue + } + if len(parts) < 2 { + return nil, fmt.Errorf("not enough fields in interrupts (expected at least 2 fields but got %d): %s", len(parts), parts) + } + intName := parts[0][:len(parts[0])-1] // remove trailing : + + if len(parts) == 2 { + interrupts[intName] = Interrupt{ + Info: "", + Devices: "", + Values: []string{ + parts[1], + }, + } + continue + } + + intr := Interrupt{ + Values: parts[1 : cpuNum+1], + } + + if _, err := strconv.Atoi(intName); err == nil { // numeral interrupt + intr.Info = parts[cpuNum+1] + intr.Devices = strings.Join(parts[cpuNum+2:], " ") + } else { + intr.Info = strings.Join(parts[cpuNum+1:], " ") + } + interrupts[intName] = intr + } + + return interrupts, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/proc_netstat.go b/vendor/github.com/prometheus/procfs/proc_netstat.go index 48b523819..6a43bb245 100644 --- a/vendor/github.com/prometheus/procfs/proc_netstat.go +++ b/vendor/github.com/prometheus/procfs/proc_netstat.go @@ -33,139 +33,140 @@ type ProcNetstat struct { } type TcpExt struct { // nolint:revive - SyncookiesSent float64 - SyncookiesRecv float64 - SyncookiesFailed float64 - EmbryonicRsts float64 - PruneCalled float64 - RcvPruned float64 - OfoPruned float64 - OutOfWindowIcmps float64 - LockDroppedIcmps float64 - ArpFilter float64 - TW float64 - TWRecycled float64 - TWKilled float64 - PAWSActive float64 - PAWSEstab float64 - DelayedACKs float64 - DelayedACKLocked float64 - DelayedACKLost float64 - ListenOverflows float64 - ListenDrops float64 - TCPHPHits float64 - TCPPureAcks float64 - TCPHPAcks float64 - TCPRenoRecovery float64 - TCPSackRecovery float64 - TCPSACKReneging float64 - TCPSACKReorder float64 - TCPRenoReorder float64 - TCPTSReorder float64 - TCPFullUndo float64 - TCPPartialUndo float64 - TCPDSACKUndo float64 - TCPLossUndo float64 - TCPLostRetransmit float64 - TCPRenoFailures float64 - TCPSackFailures float64 - TCPLossFailures float64 - TCPFastRetrans float64 - TCPSlowStartRetrans float64 - TCPTimeouts float64 - TCPLossProbes float64 - TCPLossProbeRecovery float64 - TCPRenoRecoveryFail float64 - TCPSackRecoveryFail float64 - TCPRcvCollapsed float64 - TCPDSACKOldSent float64 - TCPDSACKOfoSent float64 - TCPDSACKRecv float64 - TCPDSACKOfoRecv float64 - TCPAbortOnData float64 - TCPAbortOnClose float64 - TCPAbortOnMemory float64 - TCPAbortOnTimeout float64 - TCPAbortOnLinger float64 - TCPAbortFailed float64 - TCPMemoryPressures float64 - TCPMemoryPressuresChrono float64 - TCPSACKDiscard float64 - TCPDSACKIgnoredOld float64 - TCPDSACKIgnoredNoUndo float64 - TCPSpuriousRTOs float64 - TCPMD5NotFound float64 - TCPMD5Unexpected float64 - TCPMD5Failure float64 - TCPSackShifted float64 - TCPSackMerged float64 - TCPSackShiftFallback float64 - TCPBacklogDrop float64 - PFMemallocDrop float64 - TCPMinTTLDrop float64 - TCPDeferAcceptDrop float64 - IPReversePathFilter float64 - TCPTimeWaitOverflow float64 - TCPReqQFullDoCookies float64 - TCPReqQFullDrop float64 - TCPRetransFail float64 - TCPRcvCoalesce float64 - TCPOFOQueue float64 - TCPOFODrop float64 - TCPOFOMerge float64 - TCPChallengeACK float64 - TCPSYNChallenge float64 - TCPFastOpenActive float64 - TCPFastOpenActiveFail float64 - TCPFastOpenPassive float64 - TCPFastOpenPassiveFail float64 - TCPFastOpenListenOverflow float64 - TCPFastOpenCookieReqd float64 - TCPFastOpenBlackhole float64 - TCPSpuriousRtxHostQueues float64 - BusyPollRxPackets float64 - TCPAutoCorking float64 - TCPFromZeroWindowAdv float64 - TCPToZeroWindowAdv float64 - TCPWantZeroWindowAdv float64 - TCPSynRetrans float64 - TCPOrigDataSent float64 - TCPHystartTrainDetect float64 - TCPHystartTrainCwnd float64 - TCPHystartDelayDetect float64 - TCPHystartDelayCwnd float64 - TCPACKSkippedSynRecv float64 - TCPACKSkippedPAWS float64 - TCPACKSkippedSeq float64 - TCPACKSkippedFinWait2 float64 - TCPACKSkippedTimeWait float64 - TCPACKSkippedChallenge float64 - TCPWinProbe float64 - TCPKeepAlive float64 - TCPMTUPFail float64 - TCPMTUPSuccess float64 - TCPWqueueTooBig float64 + SyncookiesSent *float64 + SyncookiesRecv *float64 + SyncookiesFailed *float64 + EmbryonicRsts *float64 + PruneCalled *float64 + RcvPruned *float64 + OfoPruned *float64 + OutOfWindowIcmps *float64 + LockDroppedIcmps *float64 + ArpFilter *float64 + TW *float64 + TWRecycled *float64 + TWKilled *float64 + PAWSActive *float64 + PAWSEstab *float64 + DelayedACKs *float64 + DelayedACKLocked *float64 + DelayedACKLost *float64 + ListenOverflows *float64 + ListenDrops *float64 + TCPHPHits *float64 + TCPPureAcks *float64 + TCPHPAcks *float64 + TCPRenoRecovery *float64 + TCPSackRecovery *float64 + TCPSACKReneging *float64 + TCPSACKReorder *float64 + TCPRenoReorder *float64 + TCPTSReorder *float64 + TCPFullUndo *float64 + TCPPartialUndo *float64 + TCPDSACKUndo *float64 + TCPLossUndo *float64 + TCPLostRetransmit *float64 + TCPRenoFailures *float64 + TCPSackFailures *float64 + TCPLossFailures *float64 + TCPFastRetrans *float64 + TCPSlowStartRetrans *float64 + TCPTimeouts *float64 + TCPLossProbes *float64 + TCPLossProbeRecovery *float64 + TCPRenoRecoveryFail *float64 + TCPSackRecoveryFail *float64 + TCPRcvCollapsed *float64 + TCPDSACKOldSent *float64 + TCPDSACKOfoSent *float64 + TCPDSACKRecv *float64 + TCPDSACKOfoRecv *float64 + TCPAbortOnData *float64 + TCPAbortOnClose *float64 + TCPAbortOnMemory *float64 + TCPAbortOnTimeout *float64 + TCPAbortOnLinger *float64 + TCPAbortFailed *float64 + TCPMemoryPressures *float64 + TCPMemoryPressuresChrono *float64 + TCPSACKDiscard *float64 + TCPDSACKIgnoredOld *float64 + TCPDSACKIgnoredNoUndo *float64 + TCPSpuriousRTOs *float64 + TCPMD5NotFound *float64 + TCPMD5Unexpected *float64 + TCPMD5Failure *float64 + TCPSackShifted *float64 + TCPSackMerged *float64 + TCPSackShiftFallback *float64 + TCPBacklogDrop *float64 + PFMemallocDrop *float64 + TCPMinTTLDrop *float64 + TCPDeferAcceptDrop *float64 + IPReversePathFilter *float64 + TCPTimeWaitOverflow *float64 + TCPReqQFullDoCookies *float64 + TCPReqQFullDrop *float64 + TCPRetransFail *float64 + TCPRcvCoalesce *float64 + TCPRcvQDrop *float64 + TCPOFOQueue *float64 + TCPOFODrop *float64 + TCPOFOMerge *float64 + TCPChallengeACK *float64 + TCPSYNChallenge *float64 + TCPFastOpenActive *float64 + TCPFastOpenActiveFail *float64 + TCPFastOpenPassive *float64 + TCPFastOpenPassiveFail *float64 + TCPFastOpenListenOverflow *float64 + TCPFastOpenCookieReqd *float64 + TCPFastOpenBlackhole *float64 + TCPSpuriousRtxHostQueues *float64 + BusyPollRxPackets *float64 + TCPAutoCorking *float64 + TCPFromZeroWindowAdv *float64 + TCPToZeroWindowAdv *float64 + TCPWantZeroWindowAdv *float64 + TCPSynRetrans *float64 + TCPOrigDataSent *float64 + TCPHystartTrainDetect *float64 + TCPHystartTrainCwnd *float64 + TCPHystartDelayDetect *float64 + TCPHystartDelayCwnd *float64 + TCPACKSkippedSynRecv *float64 + TCPACKSkippedPAWS *float64 + TCPACKSkippedSeq *float64 + TCPACKSkippedFinWait2 *float64 + TCPACKSkippedTimeWait *float64 + TCPACKSkippedChallenge *float64 + TCPWinProbe *float64 + TCPKeepAlive *float64 + TCPMTUPFail *float64 + TCPMTUPSuccess *float64 + TCPWqueueTooBig *float64 } type IpExt struct { // nolint:revive - InNoRoutes float64 - InTruncatedPkts float64 - InMcastPkts float64 - OutMcastPkts float64 - InBcastPkts float64 - OutBcastPkts float64 - InOctets float64 - OutOctets float64 - InMcastOctets float64 - OutMcastOctets float64 - InBcastOctets float64 - OutBcastOctets float64 - InCsumErrors float64 - InNoECTPkts float64 - InECT1Pkts float64 - InECT0Pkts float64 - InCEPkts float64 - ReasmOverlaps float64 + InNoRoutes *float64 + InTruncatedPkts *float64 + InMcastPkts *float64 + OutMcastPkts *float64 + InBcastPkts *float64 + OutBcastPkts *float64 + InOctets *float64 + OutOctets *float64 + InMcastOctets *float64 + OutMcastOctets *float64 + InBcastOctets *float64 + OutBcastOctets *float64 + InCsumErrors *float64 + InNoECTPkts *float64 + InECT1Pkts *float64 + InECT0Pkts *float64 + InCEPkts *float64 + ReasmOverlaps *float64 } func (p Proc) Netstat() (ProcNetstat, error) { @@ -174,14 +175,14 @@ func (p Proc) Netstat() (ProcNetstat, error) { if err != nil { return ProcNetstat{PID: p.PID}, err } - procNetstat, err := parseNetstat(bytes.NewReader(data), filename) + procNetstat, err := parseProcNetstat(bytes.NewReader(data), filename) procNetstat.PID = p.PID return procNetstat, err } -// parseNetstat parses the metrics from proc//net/netstat file +// parseProcNetstat parses the metrics from proc//net/netstat file // and returns a ProcNetstat structure. -func parseNetstat(r io.Reader, fileName string) (ProcNetstat, error) { +func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) { var ( scanner = bufio.NewScanner(r) procNetstat = ProcNetstat{} @@ -208,230 +209,232 @@ func parseNetstat(r io.Reader, fileName string) (ProcNetstat, error) { case "TcpExt": switch key { case "SyncookiesSent": - procNetstat.TcpExt.SyncookiesSent = value + procNetstat.TcpExt.SyncookiesSent = &value case "SyncookiesRecv": - procNetstat.TcpExt.SyncookiesRecv = value + procNetstat.TcpExt.SyncookiesRecv = &value case "SyncookiesFailed": - procNetstat.TcpExt.SyncookiesFailed = value + procNetstat.TcpExt.SyncookiesFailed = &value case "EmbryonicRsts": - procNetstat.TcpExt.EmbryonicRsts = value + procNetstat.TcpExt.EmbryonicRsts = &value case "PruneCalled": - procNetstat.TcpExt.PruneCalled = value + procNetstat.TcpExt.PruneCalled = &value case "RcvPruned": - procNetstat.TcpExt.RcvPruned = value + procNetstat.TcpExt.RcvPruned = &value case "OfoPruned": - procNetstat.TcpExt.OfoPruned = value + procNetstat.TcpExt.OfoPruned = &value case "OutOfWindowIcmps": - procNetstat.TcpExt.OutOfWindowIcmps = value + procNetstat.TcpExt.OutOfWindowIcmps = &value case "LockDroppedIcmps": - procNetstat.TcpExt.LockDroppedIcmps = value + procNetstat.TcpExt.LockDroppedIcmps = &value case "ArpFilter": - procNetstat.TcpExt.ArpFilter = value + procNetstat.TcpExt.ArpFilter = &value case "TW": - procNetstat.TcpExt.TW = value + procNetstat.TcpExt.TW = &value case "TWRecycled": - procNetstat.TcpExt.TWRecycled = value + procNetstat.TcpExt.TWRecycled = &value case "TWKilled": - procNetstat.TcpExt.TWKilled = value + procNetstat.TcpExt.TWKilled = &value case "PAWSActive": - procNetstat.TcpExt.PAWSActive = value + procNetstat.TcpExt.PAWSActive = &value case "PAWSEstab": - procNetstat.TcpExt.PAWSEstab = value + procNetstat.TcpExt.PAWSEstab = &value case "DelayedACKs": - procNetstat.TcpExt.DelayedACKs = value + procNetstat.TcpExt.DelayedACKs = &value case "DelayedACKLocked": - procNetstat.TcpExt.DelayedACKLocked = value + procNetstat.TcpExt.DelayedACKLocked = &value case "DelayedACKLost": - procNetstat.TcpExt.DelayedACKLost = value + procNetstat.TcpExt.DelayedACKLost = &value case "ListenOverflows": - procNetstat.TcpExt.ListenOverflows = value + procNetstat.TcpExt.ListenOverflows = &value case "ListenDrops": - procNetstat.TcpExt.ListenDrops = value + procNetstat.TcpExt.ListenDrops = &value case "TCPHPHits": - procNetstat.TcpExt.TCPHPHits = value + procNetstat.TcpExt.TCPHPHits = &value case "TCPPureAcks": - procNetstat.TcpExt.TCPPureAcks = value + procNetstat.TcpExt.TCPPureAcks = &value case "TCPHPAcks": - procNetstat.TcpExt.TCPHPAcks = value + procNetstat.TcpExt.TCPHPAcks = &value case "TCPRenoRecovery": - procNetstat.TcpExt.TCPRenoRecovery = value + procNetstat.TcpExt.TCPRenoRecovery = &value case "TCPSackRecovery": - procNetstat.TcpExt.TCPSackRecovery = value + procNetstat.TcpExt.TCPSackRecovery = &value case "TCPSACKReneging": - procNetstat.TcpExt.TCPSACKReneging = value + procNetstat.TcpExt.TCPSACKReneging = &value case "TCPSACKReorder": - procNetstat.TcpExt.TCPSACKReorder = value + procNetstat.TcpExt.TCPSACKReorder = &value case "TCPRenoReorder": - procNetstat.TcpExt.TCPRenoReorder = value + procNetstat.TcpExt.TCPRenoReorder = &value case "TCPTSReorder": - procNetstat.TcpExt.TCPTSReorder = value + procNetstat.TcpExt.TCPTSReorder = &value case "TCPFullUndo": - procNetstat.TcpExt.TCPFullUndo = value + procNetstat.TcpExt.TCPFullUndo = &value case "TCPPartialUndo": - procNetstat.TcpExt.TCPPartialUndo = value + procNetstat.TcpExt.TCPPartialUndo = &value case "TCPDSACKUndo": - procNetstat.TcpExt.TCPDSACKUndo = value + procNetstat.TcpExt.TCPDSACKUndo = &value case "TCPLossUndo": - procNetstat.TcpExt.TCPLossUndo = value + procNetstat.TcpExt.TCPLossUndo = &value case "TCPLostRetransmit": - procNetstat.TcpExt.TCPLostRetransmit = value + procNetstat.TcpExt.TCPLostRetransmit = &value case "TCPRenoFailures": - procNetstat.TcpExt.TCPRenoFailures = value + procNetstat.TcpExt.TCPRenoFailures = &value case "TCPSackFailures": - procNetstat.TcpExt.TCPSackFailures = value + procNetstat.TcpExt.TCPSackFailures = &value case "TCPLossFailures": - procNetstat.TcpExt.TCPLossFailures = value + procNetstat.TcpExt.TCPLossFailures = &value case "TCPFastRetrans": - procNetstat.TcpExt.TCPFastRetrans = value + procNetstat.TcpExt.TCPFastRetrans = &value case "TCPSlowStartRetrans": - procNetstat.TcpExt.TCPSlowStartRetrans = value + procNetstat.TcpExt.TCPSlowStartRetrans = &value case "TCPTimeouts": - procNetstat.TcpExt.TCPTimeouts = value + procNetstat.TcpExt.TCPTimeouts = &value case "TCPLossProbes": - procNetstat.TcpExt.TCPLossProbes = value + procNetstat.TcpExt.TCPLossProbes = &value case "TCPLossProbeRecovery": - procNetstat.TcpExt.TCPLossProbeRecovery = value + procNetstat.TcpExt.TCPLossProbeRecovery = &value case "TCPRenoRecoveryFail": - procNetstat.TcpExt.TCPRenoRecoveryFail = value + procNetstat.TcpExt.TCPRenoRecoveryFail = &value case "TCPSackRecoveryFail": - procNetstat.TcpExt.TCPSackRecoveryFail = value + procNetstat.TcpExt.TCPSackRecoveryFail = &value case "TCPRcvCollapsed": - procNetstat.TcpExt.TCPRcvCollapsed = value + procNetstat.TcpExt.TCPRcvCollapsed = &value case "TCPDSACKOldSent": - procNetstat.TcpExt.TCPDSACKOldSent = value + procNetstat.TcpExt.TCPDSACKOldSent = &value case "TCPDSACKOfoSent": - procNetstat.TcpExt.TCPDSACKOfoSent = value + procNetstat.TcpExt.TCPDSACKOfoSent = &value case "TCPDSACKRecv": - procNetstat.TcpExt.TCPDSACKRecv = value + procNetstat.TcpExt.TCPDSACKRecv = &value case "TCPDSACKOfoRecv": - procNetstat.TcpExt.TCPDSACKOfoRecv = value + procNetstat.TcpExt.TCPDSACKOfoRecv = &value case "TCPAbortOnData": - procNetstat.TcpExt.TCPAbortOnData = value + procNetstat.TcpExt.TCPAbortOnData = &value case "TCPAbortOnClose": - procNetstat.TcpExt.TCPAbortOnClose = value + procNetstat.TcpExt.TCPAbortOnClose = &value case "TCPDeferAcceptDrop": - procNetstat.TcpExt.TCPDeferAcceptDrop = value + procNetstat.TcpExt.TCPDeferAcceptDrop = &value case "IPReversePathFilter": - procNetstat.TcpExt.IPReversePathFilter = value + procNetstat.TcpExt.IPReversePathFilter = &value case "TCPTimeWaitOverflow": - procNetstat.TcpExt.TCPTimeWaitOverflow = value + procNetstat.TcpExt.TCPTimeWaitOverflow = &value case "TCPReqQFullDoCookies": - procNetstat.TcpExt.TCPReqQFullDoCookies = value + procNetstat.TcpExt.TCPReqQFullDoCookies = &value case "TCPReqQFullDrop": - procNetstat.TcpExt.TCPReqQFullDrop = value + procNetstat.TcpExt.TCPReqQFullDrop = &value case "TCPRetransFail": - procNetstat.TcpExt.TCPRetransFail = value + procNetstat.TcpExt.TCPRetransFail = &value case "TCPRcvCoalesce": - procNetstat.TcpExt.TCPRcvCoalesce = value + procNetstat.TcpExt.TCPRcvCoalesce = &value + case "TCPRcvQDrop": + procNetstat.TcpExt.TCPRcvQDrop = &value case "TCPOFOQueue": - procNetstat.TcpExt.TCPOFOQueue = value + procNetstat.TcpExt.TCPOFOQueue = &value case "TCPOFODrop": - procNetstat.TcpExt.TCPOFODrop = value + procNetstat.TcpExt.TCPOFODrop = &value case "TCPOFOMerge": - procNetstat.TcpExt.TCPOFOMerge = value + procNetstat.TcpExt.TCPOFOMerge = &value case "TCPChallengeACK": - procNetstat.TcpExt.TCPChallengeACK = value + procNetstat.TcpExt.TCPChallengeACK = &value case "TCPSYNChallenge": - procNetstat.TcpExt.TCPSYNChallenge = value + procNetstat.TcpExt.TCPSYNChallenge = &value case "TCPFastOpenActive": - procNetstat.TcpExt.TCPFastOpenActive = value + procNetstat.TcpExt.TCPFastOpenActive = &value case "TCPFastOpenActiveFail": - procNetstat.TcpExt.TCPFastOpenActiveFail = value + procNetstat.TcpExt.TCPFastOpenActiveFail = &value case "TCPFastOpenPassive": - procNetstat.TcpExt.TCPFastOpenPassive = value + procNetstat.TcpExt.TCPFastOpenPassive = &value case "TCPFastOpenPassiveFail": - procNetstat.TcpExt.TCPFastOpenPassiveFail = value + procNetstat.TcpExt.TCPFastOpenPassiveFail = &value case "TCPFastOpenListenOverflow": - procNetstat.TcpExt.TCPFastOpenListenOverflow = value + procNetstat.TcpExt.TCPFastOpenListenOverflow = &value case "TCPFastOpenCookieReqd": - procNetstat.TcpExt.TCPFastOpenCookieReqd = value + procNetstat.TcpExt.TCPFastOpenCookieReqd = &value case "TCPFastOpenBlackhole": - procNetstat.TcpExt.TCPFastOpenBlackhole = value + procNetstat.TcpExt.TCPFastOpenBlackhole = &value case "TCPSpuriousRtxHostQueues": - procNetstat.TcpExt.TCPSpuriousRtxHostQueues = value + procNetstat.TcpExt.TCPSpuriousRtxHostQueues = &value case "BusyPollRxPackets": - procNetstat.TcpExt.BusyPollRxPackets = value + procNetstat.TcpExt.BusyPollRxPackets = &value case "TCPAutoCorking": - procNetstat.TcpExt.TCPAutoCorking = value + procNetstat.TcpExt.TCPAutoCorking = &value case "TCPFromZeroWindowAdv": - procNetstat.TcpExt.TCPFromZeroWindowAdv = value + procNetstat.TcpExt.TCPFromZeroWindowAdv = &value case "TCPToZeroWindowAdv": - procNetstat.TcpExt.TCPToZeroWindowAdv = value + procNetstat.TcpExt.TCPToZeroWindowAdv = &value case "TCPWantZeroWindowAdv": - procNetstat.TcpExt.TCPWantZeroWindowAdv = value + procNetstat.TcpExt.TCPWantZeroWindowAdv = &value case "TCPSynRetrans": - procNetstat.TcpExt.TCPSynRetrans = value + procNetstat.TcpExt.TCPSynRetrans = &value case "TCPOrigDataSent": - procNetstat.TcpExt.TCPOrigDataSent = value + procNetstat.TcpExt.TCPOrigDataSent = &value case "TCPHystartTrainDetect": - procNetstat.TcpExt.TCPHystartTrainDetect = value + procNetstat.TcpExt.TCPHystartTrainDetect = &value case "TCPHystartTrainCwnd": - procNetstat.TcpExt.TCPHystartTrainCwnd = value + procNetstat.TcpExt.TCPHystartTrainCwnd = &value case "TCPHystartDelayDetect": - procNetstat.TcpExt.TCPHystartDelayDetect = value + procNetstat.TcpExt.TCPHystartDelayDetect = &value case "TCPHystartDelayCwnd": - procNetstat.TcpExt.TCPHystartDelayCwnd = value + procNetstat.TcpExt.TCPHystartDelayCwnd = &value case "TCPACKSkippedSynRecv": - procNetstat.TcpExt.TCPACKSkippedSynRecv = value + procNetstat.TcpExt.TCPACKSkippedSynRecv = &value case "TCPACKSkippedPAWS": - procNetstat.TcpExt.TCPACKSkippedPAWS = value + procNetstat.TcpExt.TCPACKSkippedPAWS = &value case "TCPACKSkippedSeq": - procNetstat.TcpExt.TCPACKSkippedSeq = value + procNetstat.TcpExt.TCPACKSkippedSeq = &value case "TCPACKSkippedFinWait2": - procNetstat.TcpExt.TCPACKSkippedFinWait2 = value + procNetstat.TcpExt.TCPACKSkippedFinWait2 = &value case "TCPACKSkippedTimeWait": - procNetstat.TcpExt.TCPACKSkippedTimeWait = value + procNetstat.TcpExt.TCPACKSkippedTimeWait = &value case "TCPACKSkippedChallenge": - procNetstat.TcpExt.TCPACKSkippedChallenge = value + procNetstat.TcpExt.TCPACKSkippedChallenge = &value case "TCPWinProbe": - procNetstat.TcpExt.TCPWinProbe = value + procNetstat.TcpExt.TCPWinProbe = &value case "TCPKeepAlive": - procNetstat.TcpExt.TCPKeepAlive = value + procNetstat.TcpExt.TCPKeepAlive = &value case "TCPMTUPFail": - procNetstat.TcpExt.TCPMTUPFail = value + procNetstat.TcpExt.TCPMTUPFail = &value case "TCPMTUPSuccess": - procNetstat.TcpExt.TCPMTUPSuccess = value + procNetstat.TcpExt.TCPMTUPSuccess = &value case "TCPWqueueTooBig": - procNetstat.TcpExt.TCPWqueueTooBig = value + procNetstat.TcpExt.TCPWqueueTooBig = &value } case "IpExt": switch key { case "InNoRoutes": - procNetstat.IpExt.InNoRoutes = value + procNetstat.IpExt.InNoRoutes = &value case "InTruncatedPkts": - procNetstat.IpExt.InTruncatedPkts = value + procNetstat.IpExt.InTruncatedPkts = &value case "InMcastPkts": - procNetstat.IpExt.InMcastPkts = value + procNetstat.IpExt.InMcastPkts = &value case "OutMcastPkts": - procNetstat.IpExt.OutMcastPkts = value + procNetstat.IpExt.OutMcastPkts = &value case "InBcastPkts": - procNetstat.IpExt.InBcastPkts = value + procNetstat.IpExt.InBcastPkts = &value case "OutBcastPkts": - procNetstat.IpExt.OutBcastPkts = value + procNetstat.IpExt.OutBcastPkts = &value case "InOctets": - procNetstat.IpExt.InOctets = value + procNetstat.IpExt.InOctets = &value case "OutOctets": - procNetstat.IpExt.OutOctets = value + procNetstat.IpExt.OutOctets = &value case "InMcastOctets": - procNetstat.IpExt.InMcastOctets = value + procNetstat.IpExt.InMcastOctets = &value case "OutMcastOctets": - procNetstat.IpExt.OutMcastOctets = value + procNetstat.IpExt.OutMcastOctets = &value case "InBcastOctets": - procNetstat.IpExt.InBcastOctets = value + procNetstat.IpExt.InBcastOctets = &value case "OutBcastOctets": - procNetstat.IpExt.OutBcastOctets = value + procNetstat.IpExt.OutBcastOctets = &value case "InCsumErrors": - procNetstat.IpExt.InCsumErrors = value + procNetstat.IpExt.InCsumErrors = &value case "InNoECTPkts": - procNetstat.IpExt.InNoECTPkts = value + procNetstat.IpExt.InNoECTPkts = &value case "InECT1Pkts": - procNetstat.IpExt.InECT1Pkts = value + procNetstat.IpExt.InECT1Pkts = &value case "InECT0Pkts": - procNetstat.IpExt.InECT0Pkts = value + procNetstat.IpExt.InECT0Pkts = &value case "InCEPkts": - procNetstat.IpExt.InCEPkts = value + procNetstat.IpExt.InCEPkts = &value case "ReasmOverlaps": - procNetstat.IpExt.ReasmOverlaps = value + procNetstat.IpExt.ReasmOverlaps = &value } } } diff --git a/vendor/github.com/prometheus/procfs/proc_snmp.go b/vendor/github.com/prometheus/procfs/proc_snmp.go index ae191896c..6c46b7188 100644 --- a/vendor/github.com/prometheus/procfs/proc_snmp.go +++ b/vendor/github.com/prometheus/procfs/proc_snmp.go @@ -37,100 +37,100 @@ type ProcSnmp struct { } type Ip struct { // nolint:revive - Forwarding float64 - DefaultTTL float64 - InReceives float64 - InHdrErrors float64 - InAddrErrors float64 - ForwDatagrams float64 - InUnknownProtos float64 - InDiscards float64 - InDelivers float64 - OutRequests float64 - OutDiscards float64 - OutNoRoutes float64 - ReasmTimeout float64 - ReasmReqds float64 - ReasmOKs float64 - ReasmFails float64 - FragOKs float64 - FragFails float64 - FragCreates float64 + Forwarding *float64 + DefaultTTL *float64 + InReceives *float64 + InHdrErrors *float64 + InAddrErrors *float64 + ForwDatagrams *float64 + InUnknownProtos *float64 + InDiscards *float64 + InDelivers *float64 + OutRequests *float64 + OutDiscards *float64 + OutNoRoutes *float64 + ReasmTimeout *float64 + ReasmReqds *float64 + ReasmOKs *float64 + ReasmFails *float64 + FragOKs *float64 + FragFails *float64 + FragCreates *float64 } -type Icmp struct { - InMsgs float64 - InErrors float64 - InCsumErrors float64 - InDestUnreachs float64 - InTimeExcds float64 - InParmProbs float64 - InSrcQuenchs float64 - InRedirects float64 - InEchos float64 - InEchoReps float64 - InTimestamps float64 - InTimestampReps float64 - InAddrMasks float64 - InAddrMaskReps float64 - OutMsgs float64 - OutErrors float64 - OutDestUnreachs float64 - OutTimeExcds float64 - OutParmProbs float64 - OutSrcQuenchs float64 - OutRedirects float64 - OutEchos float64 - OutEchoReps float64 - OutTimestamps float64 - OutTimestampReps float64 - OutAddrMasks float64 - OutAddrMaskReps float64 +type Icmp struct { // nolint:revive + InMsgs *float64 + InErrors *float64 + InCsumErrors *float64 + InDestUnreachs *float64 + InTimeExcds *float64 + InParmProbs *float64 + InSrcQuenchs *float64 + InRedirects *float64 + InEchos *float64 + InEchoReps *float64 + InTimestamps *float64 + InTimestampReps *float64 + InAddrMasks *float64 + InAddrMaskReps *float64 + OutMsgs *float64 + OutErrors *float64 + OutDestUnreachs *float64 + OutTimeExcds *float64 + OutParmProbs *float64 + OutSrcQuenchs *float64 + OutRedirects *float64 + OutEchos *float64 + OutEchoReps *float64 + OutTimestamps *float64 + OutTimestampReps *float64 + OutAddrMasks *float64 + OutAddrMaskReps *float64 } type IcmpMsg struct { - InType3 float64 - OutType3 float64 + InType3 *float64 + OutType3 *float64 } type Tcp struct { // nolint:revive - RtoAlgorithm float64 - RtoMin float64 - RtoMax float64 - MaxConn float64 - ActiveOpens float64 - PassiveOpens float64 - AttemptFails float64 - EstabResets float64 - CurrEstab float64 - InSegs float64 - OutSegs float64 - RetransSegs float64 - InErrs float64 - OutRsts float64 - InCsumErrors float64 + RtoAlgorithm *float64 + RtoMin *float64 + RtoMax *float64 + MaxConn *float64 + ActiveOpens *float64 + PassiveOpens *float64 + AttemptFails *float64 + EstabResets *float64 + CurrEstab *float64 + InSegs *float64 + OutSegs *float64 + RetransSegs *float64 + InErrs *float64 + OutRsts *float64 + InCsumErrors *float64 } type Udp struct { // nolint:revive - InDatagrams float64 - NoPorts float64 - InErrors float64 - OutDatagrams float64 - RcvbufErrors float64 - SndbufErrors float64 - InCsumErrors float64 - IgnoredMulti float64 + InDatagrams *float64 + NoPorts *float64 + InErrors *float64 + OutDatagrams *float64 + RcvbufErrors *float64 + SndbufErrors *float64 + InCsumErrors *float64 + IgnoredMulti *float64 } type UdpLite struct { // nolint:revive - InDatagrams float64 - NoPorts float64 - InErrors float64 - OutDatagrams float64 - RcvbufErrors float64 - SndbufErrors float64 - InCsumErrors float64 - IgnoredMulti float64 + InDatagrams *float64 + NoPorts *float64 + InErrors *float64 + OutDatagrams *float64 + RcvbufErrors *float64 + SndbufErrors *float64 + InCsumErrors *float64 + IgnoredMulti *float64 } func (p Proc) Snmp() (ProcSnmp, error) { @@ -173,178 +173,178 @@ func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) { case "Ip": switch key { case "Forwarding": - procSnmp.Ip.Forwarding = value + procSnmp.Ip.Forwarding = &value case "DefaultTTL": - procSnmp.Ip.DefaultTTL = value + procSnmp.Ip.DefaultTTL = &value case "InReceives": - procSnmp.Ip.InReceives = value + procSnmp.Ip.InReceives = &value case "InHdrErrors": - procSnmp.Ip.InHdrErrors = value + procSnmp.Ip.InHdrErrors = &value case "InAddrErrors": - procSnmp.Ip.InAddrErrors = value + procSnmp.Ip.InAddrErrors = &value case "ForwDatagrams": - procSnmp.Ip.ForwDatagrams = value + procSnmp.Ip.ForwDatagrams = &value case "InUnknownProtos": - procSnmp.Ip.InUnknownProtos = value + procSnmp.Ip.InUnknownProtos = &value case "InDiscards": - procSnmp.Ip.InDiscards = value + procSnmp.Ip.InDiscards = &value case "InDelivers": - procSnmp.Ip.InDelivers = value + procSnmp.Ip.InDelivers = &value case "OutRequests": - procSnmp.Ip.OutRequests = value + procSnmp.Ip.OutRequests = &value case "OutDiscards": - procSnmp.Ip.OutDiscards = value + procSnmp.Ip.OutDiscards = &value case "OutNoRoutes": - procSnmp.Ip.OutNoRoutes = value + procSnmp.Ip.OutNoRoutes = &value case "ReasmTimeout": - procSnmp.Ip.ReasmTimeout = value + procSnmp.Ip.ReasmTimeout = &value case "ReasmReqds": - procSnmp.Ip.ReasmReqds = value + procSnmp.Ip.ReasmReqds = &value case "ReasmOKs": - procSnmp.Ip.ReasmOKs = value + procSnmp.Ip.ReasmOKs = &value case "ReasmFails": - procSnmp.Ip.ReasmFails = value + procSnmp.Ip.ReasmFails = &value case "FragOKs": - procSnmp.Ip.FragOKs = value + procSnmp.Ip.FragOKs = &value case "FragFails": - procSnmp.Ip.FragFails = value + procSnmp.Ip.FragFails = &value case "FragCreates": - procSnmp.Ip.FragCreates = value + procSnmp.Ip.FragCreates = &value } case "Icmp": switch key { case "InMsgs": - procSnmp.Icmp.InMsgs = value + procSnmp.Icmp.InMsgs = &value case "InErrors": - procSnmp.Icmp.InErrors = value + procSnmp.Icmp.InErrors = &value case "InCsumErrors": - procSnmp.Icmp.InCsumErrors = value + procSnmp.Icmp.InCsumErrors = &value case "InDestUnreachs": - procSnmp.Icmp.InDestUnreachs = value + procSnmp.Icmp.InDestUnreachs = &value case "InTimeExcds": - procSnmp.Icmp.InTimeExcds = value + procSnmp.Icmp.InTimeExcds = &value case "InParmProbs": - procSnmp.Icmp.InParmProbs = value + procSnmp.Icmp.InParmProbs = &value case "InSrcQuenchs": - procSnmp.Icmp.InSrcQuenchs = value + procSnmp.Icmp.InSrcQuenchs = &value case "InRedirects": - procSnmp.Icmp.InRedirects = value + procSnmp.Icmp.InRedirects = &value case "InEchos": - procSnmp.Icmp.InEchos = value + procSnmp.Icmp.InEchos = &value case "InEchoReps": - procSnmp.Icmp.InEchoReps = value + procSnmp.Icmp.InEchoReps = &value case "InTimestamps": - procSnmp.Icmp.InTimestamps = value + procSnmp.Icmp.InTimestamps = &value case "InTimestampReps": - procSnmp.Icmp.InTimestampReps = value + procSnmp.Icmp.InTimestampReps = &value case "InAddrMasks": - procSnmp.Icmp.InAddrMasks = value + procSnmp.Icmp.InAddrMasks = &value case "InAddrMaskReps": - procSnmp.Icmp.InAddrMaskReps = value + procSnmp.Icmp.InAddrMaskReps = &value case "OutMsgs": - procSnmp.Icmp.OutMsgs = value + procSnmp.Icmp.OutMsgs = &value case "OutErrors": - procSnmp.Icmp.OutErrors = value + procSnmp.Icmp.OutErrors = &value case "OutDestUnreachs": - procSnmp.Icmp.OutDestUnreachs = value + procSnmp.Icmp.OutDestUnreachs = &value case "OutTimeExcds": - procSnmp.Icmp.OutTimeExcds = value + procSnmp.Icmp.OutTimeExcds = &value case "OutParmProbs": - procSnmp.Icmp.OutParmProbs = value + procSnmp.Icmp.OutParmProbs = &value case "OutSrcQuenchs": - procSnmp.Icmp.OutSrcQuenchs = value + procSnmp.Icmp.OutSrcQuenchs = &value case "OutRedirects": - procSnmp.Icmp.OutRedirects = value + procSnmp.Icmp.OutRedirects = &value case "OutEchos": - procSnmp.Icmp.OutEchos = value + procSnmp.Icmp.OutEchos = &value case "OutEchoReps": - procSnmp.Icmp.OutEchoReps = value + procSnmp.Icmp.OutEchoReps = &value case "OutTimestamps": - procSnmp.Icmp.OutTimestamps = value + procSnmp.Icmp.OutTimestamps = &value case "OutTimestampReps": - procSnmp.Icmp.OutTimestampReps = value + procSnmp.Icmp.OutTimestampReps = &value case "OutAddrMasks": - procSnmp.Icmp.OutAddrMasks = value + procSnmp.Icmp.OutAddrMasks = &value case "OutAddrMaskReps": - procSnmp.Icmp.OutAddrMaskReps = value + procSnmp.Icmp.OutAddrMaskReps = &value } case "IcmpMsg": switch key { case "InType3": - procSnmp.IcmpMsg.InType3 = value + procSnmp.IcmpMsg.InType3 = &value case "OutType3": - procSnmp.IcmpMsg.OutType3 = value + procSnmp.IcmpMsg.OutType3 = &value } case "Tcp": switch key { case "RtoAlgorithm": - procSnmp.Tcp.RtoAlgorithm = value + procSnmp.Tcp.RtoAlgorithm = &value case "RtoMin": - procSnmp.Tcp.RtoMin = value + procSnmp.Tcp.RtoMin = &value case "RtoMax": - procSnmp.Tcp.RtoMax = value + procSnmp.Tcp.RtoMax = &value case "MaxConn": - procSnmp.Tcp.MaxConn = value + procSnmp.Tcp.MaxConn = &value case "ActiveOpens": - procSnmp.Tcp.ActiveOpens = value + procSnmp.Tcp.ActiveOpens = &value case "PassiveOpens": - procSnmp.Tcp.PassiveOpens = value + procSnmp.Tcp.PassiveOpens = &value case "AttemptFails": - procSnmp.Tcp.AttemptFails = value + procSnmp.Tcp.AttemptFails = &value case "EstabResets": - procSnmp.Tcp.EstabResets = value + procSnmp.Tcp.EstabResets = &value case "CurrEstab": - procSnmp.Tcp.CurrEstab = value + procSnmp.Tcp.CurrEstab = &value case "InSegs": - procSnmp.Tcp.InSegs = value + procSnmp.Tcp.InSegs = &value case "OutSegs": - procSnmp.Tcp.OutSegs = value + procSnmp.Tcp.OutSegs = &value case "RetransSegs": - procSnmp.Tcp.RetransSegs = value + procSnmp.Tcp.RetransSegs = &value case "InErrs": - procSnmp.Tcp.InErrs = value + procSnmp.Tcp.InErrs = &value case "OutRsts": - procSnmp.Tcp.OutRsts = value + procSnmp.Tcp.OutRsts = &value case "InCsumErrors": - procSnmp.Tcp.InCsumErrors = value + procSnmp.Tcp.InCsumErrors = &value } case "Udp": switch key { case "InDatagrams": - procSnmp.Udp.InDatagrams = value + procSnmp.Udp.InDatagrams = &value case "NoPorts": - procSnmp.Udp.NoPorts = value + procSnmp.Udp.NoPorts = &value case "InErrors": - procSnmp.Udp.InErrors = value + procSnmp.Udp.InErrors = &value case "OutDatagrams": - procSnmp.Udp.OutDatagrams = value + procSnmp.Udp.OutDatagrams = &value case "RcvbufErrors": - procSnmp.Udp.RcvbufErrors = value + procSnmp.Udp.RcvbufErrors = &value case "SndbufErrors": - procSnmp.Udp.SndbufErrors = value + procSnmp.Udp.SndbufErrors = &value case "InCsumErrors": - procSnmp.Udp.InCsumErrors = value + procSnmp.Udp.InCsumErrors = &value case "IgnoredMulti": - procSnmp.Udp.IgnoredMulti = value + procSnmp.Udp.IgnoredMulti = &value } case "UdpLite": switch key { case "InDatagrams": - procSnmp.UdpLite.InDatagrams = value + procSnmp.UdpLite.InDatagrams = &value case "NoPorts": - procSnmp.UdpLite.NoPorts = value + procSnmp.UdpLite.NoPorts = &value case "InErrors": - procSnmp.UdpLite.InErrors = value + procSnmp.UdpLite.InErrors = &value case "OutDatagrams": - procSnmp.UdpLite.OutDatagrams = value + procSnmp.UdpLite.OutDatagrams = &value case "RcvbufErrors": - procSnmp.UdpLite.RcvbufErrors = value + procSnmp.UdpLite.RcvbufErrors = &value case "SndbufErrors": - procSnmp.UdpLite.SndbufErrors = value + procSnmp.UdpLite.SndbufErrors = &value case "InCsumErrors": - procSnmp.UdpLite.InCsumErrors = value + procSnmp.UdpLite.InCsumErrors = &value case "IgnoredMulti": - procSnmp.UdpLite.IgnoredMulti = value + procSnmp.UdpLite.IgnoredMulti = &value } } } diff --git a/vendor/github.com/prometheus/procfs/proc_snmp6.go b/vendor/github.com/prometheus/procfs/proc_snmp6.go index f611992d5..3059cc6a1 100644 --- a/vendor/github.com/prometheus/procfs/proc_snmp6.go +++ b/vendor/github.com/prometheus/procfs/proc_snmp6.go @@ -36,106 +36,106 @@ type ProcSnmp6 struct { } type Ip6 struct { // nolint:revive - InReceives float64 - InHdrErrors float64 - InTooBigErrors float64 - InNoRoutes float64 - InAddrErrors float64 - InUnknownProtos float64 - InTruncatedPkts float64 - InDiscards float64 - InDelivers float64 - OutForwDatagrams float64 - OutRequests float64 - OutDiscards float64 - OutNoRoutes float64 - ReasmTimeout float64 - ReasmReqds float64 - ReasmOKs float64 - ReasmFails float64 - FragOKs float64 - FragFails float64 - FragCreates float64 - InMcastPkts float64 - OutMcastPkts float64 - InOctets float64 - OutOctets float64 - InMcastOctets float64 - OutMcastOctets float64 - InBcastOctets float64 - OutBcastOctets float64 - InNoECTPkts float64 - InECT1Pkts float64 - InECT0Pkts float64 - InCEPkts float64 + InReceives *float64 + InHdrErrors *float64 + InTooBigErrors *float64 + InNoRoutes *float64 + InAddrErrors *float64 + InUnknownProtos *float64 + InTruncatedPkts *float64 + InDiscards *float64 + InDelivers *float64 + OutForwDatagrams *float64 + OutRequests *float64 + OutDiscards *float64 + OutNoRoutes *float64 + ReasmTimeout *float64 + ReasmReqds *float64 + ReasmOKs *float64 + ReasmFails *float64 + FragOKs *float64 + FragFails *float64 + FragCreates *float64 + InMcastPkts *float64 + OutMcastPkts *float64 + InOctets *float64 + OutOctets *float64 + InMcastOctets *float64 + OutMcastOctets *float64 + InBcastOctets *float64 + OutBcastOctets *float64 + InNoECTPkts *float64 + InECT1Pkts *float64 + InECT0Pkts *float64 + InCEPkts *float64 } type Icmp6 struct { - InMsgs float64 - InErrors float64 - OutMsgs float64 - OutErrors float64 - InCsumErrors float64 - InDestUnreachs float64 - InPktTooBigs float64 - InTimeExcds float64 - InParmProblems float64 - InEchos float64 - InEchoReplies float64 - InGroupMembQueries float64 - InGroupMembResponses float64 - InGroupMembReductions float64 - InRouterSolicits float64 - InRouterAdvertisements float64 - InNeighborSolicits float64 - InNeighborAdvertisements float64 - InRedirects float64 - InMLDv2Reports float64 - OutDestUnreachs float64 - OutPktTooBigs float64 - OutTimeExcds float64 - OutParmProblems float64 - OutEchos float64 - OutEchoReplies float64 - OutGroupMembQueries float64 - OutGroupMembResponses float64 - OutGroupMembReductions float64 - OutRouterSolicits float64 - OutRouterAdvertisements float64 - OutNeighborSolicits float64 - OutNeighborAdvertisements float64 - OutRedirects float64 - OutMLDv2Reports float64 - InType1 float64 - InType134 float64 - InType135 float64 - InType136 float64 - InType143 float64 - OutType133 float64 - OutType135 float64 - OutType136 float64 - OutType143 float64 + InMsgs *float64 + InErrors *float64 + OutMsgs *float64 + OutErrors *float64 + InCsumErrors *float64 + InDestUnreachs *float64 + InPktTooBigs *float64 + InTimeExcds *float64 + InParmProblems *float64 + InEchos *float64 + InEchoReplies *float64 + InGroupMembQueries *float64 + InGroupMembResponses *float64 + InGroupMembReductions *float64 + InRouterSolicits *float64 + InRouterAdvertisements *float64 + InNeighborSolicits *float64 + InNeighborAdvertisements *float64 + InRedirects *float64 + InMLDv2Reports *float64 + OutDestUnreachs *float64 + OutPktTooBigs *float64 + OutTimeExcds *float64 + OutParmProblems *float64 + OutEchos *float64 + OutEchoReplies *float64 + OutGroupMembQueries *float64 + OutGroupMembResponses *float64 + OutGroupMembReductions *float64 + OutRouterSolicits *float64 + OutRouterAdvertisements *float64 + OutNeighborSolicits *float64 + OutNeighborAdvertisements *float64 + OutRedirects *float64 + OutMLDv2Reports *float64 + InType1 *float64 + InType134 *float64 + InType135 *float64 + InType136 *float64 + InType143 *float64 + OutType133 *float64 + OutType135 *float64 + OutType136 *float64 + OutType143 *float64 } type Udp6 struct { // nolint:revive - InDatagrams float64 - NoPorts float64 - InErrors float64 - OutDatagrams float64 - RcvbufErrors float64 - SndbufErrors float64 - InCsumErrors float64 - IgnoredMulti float64 + InDatagrams *float64 + NoPorts *float64 + InErrors *float64 + OutDatagrams *float64 + RcvbufErrors *float64 + SndbufErrors *float64 + InCsumErrors *float64 + IgnoredMulti *float64 } type UdpLite6 struct { // nolint:revive - InDatagrams float64 - NoPorts float64 - InErrors float64 - OutDatagrams float64 - RcvbufErrors float64 - SndbufErrors float64 - InCsumErrors float64 + InDatagrams *float64 + NoPorts *float64 + InErrors *float64 + OutDatagrams *float64 + RcvbufErrors *float64 + SndbufErrors *float64 + InCsumErrors *float64 } func (p Proc) Snmp6() (ProcSnmp6, error) { @@ -182,197 +182,197 @@ func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) { case "Ip6": switch key { case "InReceives": - procSnmp6.Ip6.InReceives = value + procSnmp6.Ip6.InReceives = &value case "InHdrErrors": - procSnmp6.Ip6.InHdrErrors = value + procSnmp6.Ip6.InHdrErrors = &value case "InTooBigErrors": - procSnmp6.Ip6.InTooBigErrors = value + procSnmp6.Ip6.InTooBigErrors = &value case "InNoRoutes": - procSnmp6.Ip6.InNoRoutes = value + procSnmp6.Ip6.InNoRoutes = &value case "InAddrErrors": - procSnmp6.Ip6.InAddrErrors = value + procSnmp6.Ip6.InAddrErrors = &value case "InUnknownProtos": - procSnmp6.Ip6.InUnknownProtos = value + procSnmp6.Ip6.InUnknownProtos = &value case "InTruncatedPkts": - procSnmp6.Ip6.InTruncatedPkts = value + procSnmp6.Ip6.InTruncatedPkts = &value case "InDiscards": - procSnmp6.Ip6.InDiscards = value + procSnmp6.Ip6.InDiscards = &value case "InDelivers": - procSnmp6.Ip6.InDelivers = value + procSnmp6.Ip6.InDelivers = &value case "OutForwDatagrams": - procSnmp6.Ip6.OutForwDatagrams = value + procSnmp6.Ip6.OutForwDatagrams = &value case "OutRequests": - procSnmp6.Ip6.OutRequests = value + procSnmp6.Ip6.OutRequests = &value case "OutDiscards": - procSnmp6.Ip6.OutDiscards = value + procSnmp6.Ip6.OutDiscards = &value case "OutNoRoutes": - procSnmp6.Ip6.OutNoRoutes = value + procSnmp6.Ip6.OutNoRoutes = &value case "ReasmTimeout": - procSnmp6.Ip6.ReasmTimeout = value + procSnmp6.Ip6.ReasmTimeout = &value case "ReasmReqds": - procSnmp6.Ip6.ReasmReqds = value + procSnmp6.Ip6.ReasmReqds = &value case "ReasmOKs": - procSnmp6.Ip6.ReasmOKs = value + procSnmp6.Ip6.ReasmOKs = &value case "ReasmFails": - procSnmp6.Ip6.ReasmFails = value + procSnmp6.Ip6.ReasmFails = &value case "FragOKs": - procSnmp6.Ip6.FragOKs = value + procSnmp6.Ip6.FragOKs = &value case "FragFails": - procSnmp6.Ip6.FragFails = value + procSnmp6.Ip6.FragFails = &value case "FragCreates": - procSnmp6.Ip6.FragCreates = value + procSnmp6.Ip6.FragCreates = &value case "InMcastPkts": - procSnmp6.Ip6.InMcastPkts = value + procSnmp6.Ip6.InMcastPkts = &value case "OutMcastPkts": - procSnmp6.Ip6.OutMcastPkts = value + procSnmp6.Ip6.OutMcastPkts = &value case "InOctets": - procSnmp6.Ip6.InOctets = value + procSnmp6.Ip6.InOctets = &value case "OutOctets": - procSnmp6.Ip6.OutOctets = value + procSnmp6.Ip6.OutOctets = &value case "InMcastOctets": - procSnmp6.Ip6.InMcastOctets = value + procSnmp6.Ip6.InMcastOctets = &value case "OutMcastOctets": - procSnmp6.Ip6.OutMcastOctets = value + procSnmp6.Ip6.OutMcastOctets = &value case "InBcastOctets": - procSnmp6.Ip6.InBcastOctets = value + procSnmp6.Ip6.InBcastOctets = &value case "OutBcastOctets": - procSnmp6.Ip6.OutBcastOctets = value + procSnmp6.Ip6.OutBcastOctets = &value case "InNoECTPkts": - procSnmp6.Ip6.InNoECTPkts = value + procSnmp6.Ip6.InNoECTPkts = &value case "InECT1Pkts": - procSnmp6.Ip6.InECT1Pkts = value + procSnmp6.Ip6.InECT1Pkts = &value case "InECT0Pkts": - procSnmp6.Ip6.InECT0Pkts = value + procSnmp6.Ip6.InECT0Pkts = &value case "InCEPkts": - procSnmp6.Ip6.InCEPkts = value + procSnmp6.Ip6.InCEPkts = &value } case "Icmp6": switch key { case "InMsgs": - procSnmp6.Icmp6.InMsgs = value + procSnmp6.Icmp6.InMsgs = &value case "InErrors": - procSnmp6.Icmp6.InErrors = value + procSnmp6.Icmp6.InErrors = &value case "OutMsgs": - procSnmp6.Icmp6.OutMsgs = value + procSnmp6.Icmp6.OutMsgs = &value case "OutErrors": - procSnmp6.Icmp6.OutErrors = value + procSnmp6.Icmp6.OutErrors = &value case "InCsumErrors": - procSnmp6.Icmp6.InCsumErrors = value + procSnmp6.Icmp6.InCsumErrors = &value case "InDestUnreachs": - procSnmp6.Icmp6.InDestUnreachs = value + procSnmp6.Icmp6.InDestUnreachs = &value case "InPktTooBigs": - procSnmp6.Icmp6.InPktTooBigs = value + procSnmp6.Icmp6.InPktTooBigs = &value case "InTimeExcds": - procSnmp6.Icmp6.InTimeExcds = value + procSnmp6.Icmp6.InTimeExcds = &value case "InParmProblems": - procSnmp6.Icmp6.InParmProblems = value + procSnmp6.Icmp6.InParmProblems = &value case "InEchos": - procSnmp6.Icmp6.InEchos = value + procSnmp6.Icmp6.InEchos = &value case "InEchoReplies": - procSnmp6.Icmp6.InEchoReplies = value + procSnmp6.Icmp6.InEchoReplies = &value case "InGroupMembQueries": - procSnmp6.Icmp6.InGroupMembQueries = value + procSnmp6.Icmp6.InGroupMembQueries = &value case "InGroupMembResponses": - procSnmp6.Icmp6.InGroupMembResponses = value + procSnmp6.Icmp6.InGroupMembResponses = &value case "InGroupMembReductions": - procSnmp6.Icmp6.InGroupMembReductions = value + procSnmp6.Icmp6.InGroupMembReductions = &value case "InRouterSolicits": - procSnmp6.Icmp6.InRouterSolicits = value + procSnmp6.Icmp6.InRouterSolicits = &value case "InRouterAdvertisements": - procSnmp6.Icmp6.InRouterAdvertisements = value + procSnmp6.Icmp6.InRouterAdvertisements = &value case "InNeighborSolicits": - procSnmp6.Icmp6.InNeighborSolicits = value + procSnmp6.Icmp6.InNeighborSolicits = &value case "InNeighborAdvertisements": - procSnmp6.Icmp6.InNeighborAdvertisements = value + procSnmp6.Icmp6.InNeighborAdvertisements = &value case "InRedirects": - procSnmp6.Icmp6.InRedirects = value + procSnmp6.Icmp6.InRedirects = &value case "InMLDv2Reports": - procSnmp6.Icmp6.InMLDv2Reports = value + procSnmp6.Icmp6.InMLDv2Reports = &value case "OutDestUnreachs": - procSnmp6.Icmp6.OutDestUnreachs = value + procSnmp6.Icmp6.OutDestUnreachs = &value case "OutPktTooBigs": - procSnmp6.Icmp6.OutPktTooBigs = value + procSnmp6.Icmp6.OutPktTooBigs = &value case "OutTimeExcds": - procSnmp6.Icmp6.OutTimeExcds = value + procSnmp6.Icmp6.OutTimeExcds = &value case "OutParmProblems": - procSnmp6.Icmp6.OutParmProblems = value + procSnmp6.Icmp6.OutParmProblems = &value case "OutEchos": - procSnmp6.Icmp6.OutEchos = value + procSnmp6.Icmp6.OutEchos = &value case "OutEchoReplies": - procSnmp6.Icmp6.OutEchoReplies = value + procSnmp6.Icmp6.OutEchoReplies = &value case "OutGroupMembQueries": - procSnmp6.Icmp6.OutGroupMembQueries = value + procSnmp6.Icmp6.OutGroupMembQueries = &value case "OutGroupMembResponses": - procSnmp6.Icmp6.OutGroupMembResponses = value + procSnmp6.Icmp6.OutGroupMembResponses = &value case "OutGroupMembReductions": - procSnmp6.Icmp6.OutGroupMembReductions = value + procSnmp6.Icmp6.OutGroupMembReductions = &value case "OutRouterSolicits": - procSnmp6.Icmp6.OutRouterSolicits = value + procSnmp6.Icmp6.OutRouterSolicits = &value case "OutRouterAdvertisements": - procSnmp6.Icmp6.OutRouterAdvertisements = value + procSnmp6.Icmp6.OutRouterAdvertisements = &value case "OutNeighborSolicits": - procSnmp6.Icmp6.OutNeighborSolicits = value + procSnmp6.Icmp6.OutNeighborSolicits = &value case "OutNeighborAdvertisements": - procSnmp6.Icmp6.OutNeighborAdvertisements = value + procSnmp6.Icmp6.OutNeighborAdvertisements = &value case "OutRedirects": - procSnmp6.Icmp6.OutRedirects = value + procSnmp6.Icmp6.OutRedirects = &value case "OutMLDv2Reports": - procSnmp6.Icmp6.OutMLDv2Reports = value + procSnmp6.Icmp6.OutMLDv2Reports = &value case "InType1": - procSnmp6.Icmp6.InType1 = value + procSnmp6.Icmp6.InType1 = &value case "InType134": - procSnmp6.Icmp6.InType134 = value + procSnmp6.Icmp6.InType134 = &value case "InType135": - procSnmp6.Icmp6.InType135 = value + procSnmp6.Icmp6.InType135 = &value case "InType136": - procSnmp6.Icmp6.InType136 = value + procSnmp6.Icmp6.InType136 = &value case "InType143": - procSnmp6.Icmp6.InType143 = value + procSnmp6.Icmp6.InType143 = &value case "OutType133": - procSnmp6.Icmp6.OutType133 = value + procSnmp6.Icmp6.OutType133 = &value case "OutType135": - procSnmp6.Icmp6.OutType135 = value + procSnmp6.Icmp6.OutType135 = &value case "OutType136": - procSnmp6.Icmp6.OutType136 = value + procSnmp6.Icmp6.OutType136 = &value case "OutType143": - procSnmp6.Icmp6.OutType143 = value + procSnmp6.Icmp6.OutType143 = &value } case "Udp6": switch key { case "InDatagrams": - procSnmp6.Udp6.InDatagrams = value + procSnmp6.Udp6.InDatagrams = &value case "NoPorts": - procSnmp6.Udp6.NoPorts = value + procSnmp6.Udp6.NoPorts = &value case "InErrors": - procSnmp6.Udp6.InErrors = value + procSnmp6.Udp6.InErrors = &value case "OutDatagrams": - procSnmp6.Udp6.OutDatagrams = value + procSnmp6.Udp6.OutDatagrams = &value case "RcvbufErrors": - procSnmp6.Udp6.RcvbufErrors = value + procSnmp6.Udp6.RcvbufErrors = &value case "SndbufErrors": - procSnmp6.Udp6.SndbufErrors = value + procSnmp6.Udp6.SndbufErrors = &value case "InCsumErrors": - procSnmp6.Udp6.InCsumErrors = value + procSnmp6.Udp6.InCsumErrors = &value case "IgnoredMulti": - procSnmp6.Udp6.IgnoredMulti = value + procSnmp6.Udp6.IgnoredMulti = &value } case "UdpLite6": switch key { case "InDatagrams": - procSnmp6.UdpLite6.InDatagrams = value + procSnmp6.UdpLite6.InDatagrams = &value case "NoPorts": - procSnmp6.UdpLite6.NoPorts = value + procSnmp6.UdpLite6.NoPorts = &value case "InErrors": - procSnmp6.UdpLite6.InErrors = value + procSnmp6.UdpLite6.InErrors = &value case "OutDatagrams": - procSnmp6.UdpLite6.OutDatagrams = value + procSnmp6.UdpLite6.OutDatagrams = &value case "RcvbufErrors": - procSnmp6.UdpLite6.RcvbufErrors = value + procSnmp6.UdpLite6.RcvbufErrors = &value case "SndbufErrors": - procSnmp6.UdpLite6.SndbufErrors = value + procSnmp6.UdpLite6.SndbufErrors = &value case "InCsumErrors": - procSnmp6.UdpLite6.InCsumErrors = value + procSnmp6.UdpLite6.InCsumErrors = &value } } } diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go index 06c556ef9..b278eb2c2 100644 --- a/vendor/github.com/prometheus/procfs/proc_stat.go +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -102,6 +102,8 @@ type ProcStat struct { RSS int // Soft limit in bytes on the rss of the process. RSSLimit uint64 + // CPU number last executed on. + Processor uint // Real-time scheduling priority, a number in the range 1 to 99 for processes // scheduled under a real-time policy, or 0, for non-real-time processes. RTPriority uint @@ -184,7 +186,7 @@ func (p Proc) Stat() (ProcStat, error) { &ignoreUint64, &ignoreUint64, &ignoreInt64, - &ignoreInt64, + &s.Processor, &s.RTPriority, &s.Policy, &s.DelayAcctBlkIOTicks, diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go index 594022ded..3d8c06439 100644 --- a/vendor/github.com/prometheus/procfs/proc_status.go +++ b/vendor/github.com/prometheus/procfs/proc_status.go @@ -96,10 +96,10 @@ func (p Proc) NewStatus() (ProcStatus, error) { kv := strings.SplitN(line, ":", 2) // removes spaces - k := string(strings.TrimSpace(kv[0])) - v := string(strings.TrimSpace(kv[1])) + k := strings.TrimSpace(kv[0]) + v := strings.TrimSpace(kv[1]) // removes "kB" - v = string(bytes.Trim([]byte(v), " kB")) + v = strings.TrimSuffix(v, " kB") // value to int when possible // we can skip error check here, 'cause vKBytes is not used when value is a string diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go index 33f97caa0..586af48af 100644 --- a/vendor/github.com/prometheus/procfs/stat.go +++ b/vendor/github.com/prometheus/procfs/stat.go @@ -62,7 +62,7 @@ type Stat struct { // Summed up cpu statistics. CPUTotal CPUStat // Per-CPU statistics. - CPU []CPUStat + CPU map[int64]CPUStat // Number of times interrupts were handled, which contains numbered and unnumbered IRQs. IRQTotal uint64 // Number of times a numbered IRQ was triggered. @@ -170,10 +170,23 @@ func (fs FS) Stat() (Stat, error) { if err != nil { return Stat{}, err } + procStat, err := parseStat(bytes.NewReader(data), fileName) + if err != nil { + return Stat{}, err + } + return procStat, nil +} - stat := Stat{} +// parseStat parses the metrics from /proc/[pid]/stat. +func parseStat(r io.Reader, fileName string) (Stat, error) { + var ( + scanner = bufio.NewScanner(r) + stat = Stat{ + CPU: make(map[int64]CPUStat), + } + err error + ) - scanner := bufio.NewScanner(bytes.NewReader(data)) for scanner.Scan() { line := scanner.Text() parts := strings.Fields(scanner.Text()) @@ -228,9 +241,6 @@ func (fs FS) Stat() (Stat, error) { if cpuID == -1 { stat.CPUTotal = cpuStat } else { - for int64(len(stat.CPU)) <= cpuID { - stat.CPU = append(stat.CPU, CPUStat{}) - } stat.CPU[cpuID] = cpuStat } } diff --git a/vendor/github.com/prometheus/procfs/thread.go b/vendor/github.com/prometheus/procfs/thread.go new file mode 100644 index 000000000..f08bfc769 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/thread.go @@ -0,0 +1,79 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "os" + "strconv" + + fsi "github.com/prometheus/procfs/internal/fs" +) + +// Provide access to /proc/PID/task/TID files, for thread specific values. Since +// such files have the same structure as /proc/PID/ ones, the data structures +// and the parsers for the latter may be reused. + +// AllThreads returns a list of all currently available threads under /proc/PID. +func AllThreads(pid int) (Procs, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Procs{}, err + } + return fs.AllThreads(pid) +} + +// AllThreads returns a list of all currently available threads for PID. +func (fs FS) AllThreads(pid int) (Procs, error) { + taskPath := fs.proc.Path(strconv.Itoa(pid), "task") + d, err := os.Open(taskPath) + if err != nil { + return Procs{}, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return Procs{}, fmt.Errorf("could not read %q: %w", d.Name(), err) + } + + t := Procs{} + for _, n := range names { + tid, err := strconv.ParseInt(n, 10, 64) + if err != nil { + continue + } + t = append(t, Proc{PID: int(tid), fs: fsi.FS(taskPath)}) + } + + return t, nil +} + +// Thread returns a process for a given PID, TID. +func (fs FS) Thread(pid, tid int) (Proc, error) { + taskPath := fs.proc.Path(strconv.Itoa(pid), "task") + if _, err := os.Stat(taskPath); err != nil { + return Proc{}, err + } + return Proc{PID: tid, fs: fsi.FS(taskPath)}, nil +} + +// Thread returns a process for a given TID of Proc. +func (proc Proc) Thread(tid int) (Proc, error) { + tfs := fsi.FS(proc.path("task")) + if _, err := os.Stat(tfs.Path(strconv.Itoa(tid))); err != nil { + return Proc{}, err + } + return Proc{PID: tid, fs: tfs}, nil +} diff --git a/vendor/github.com/prometheus/procfs/vm.go b/vendor/github.com/prometheus/procfs/vm.go index 20ceb77e2..cdedcae99 100644 --- a/vendor/github.com/prometheus/procfs/vm.go +++ b/vendor/github.com/prometheus/procfs/vm.go @@ -26,7 +26,9 @@ import ( ) // The VM interface is described at -// https://www.kernel.org/doc/Documentation/sysctl/vm.txt +// +// https://www.kernel.org/doc/Documentation/sysctl/vm.txt +// // Each setting is exposed as a single file. // Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array // and numa_zonelist_order (deprecated) which is a string. diff --git a/vendor/github.com/prometheus/prometheus/config/config.go b/vendor/github.com/prometheus/prometheus/config/config.go index a13f397f8..a29c98eed 100644 --- a/vendor/github.com/prometheus/prometheus/config/config.go +++ b/vendor/github.com/prometheus/prometheus/config/config.go @@ -80,7 +80,8 @@ func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, erro return cfg, nil } - for i, v := range cfg.GlobalConfig.ExternalLabels { + b := labels.ScratchBuilder{} + cfg.GlobalConfig.ExternalLabels.Range(func(v labels.Label) { newV := os.Expand(v.Value, func(s string) string { if s == "$" { return "$" @@ -93,10 +94,10 @@ func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, erro }) if newV != v.Value { level.Debug(logger).Log("msg", "External label replaced", "label", v.Name, "input", v.Value, "output", newV) - v.Value = newV - cfg.GlobalConfig.ExternalLabels[i] = v } - } + b.Add(v.Name, newV) + }) + cfg.GlobalConfig.ExternalLabels = b.Labels() return cfg, nil } @@ -112,10 +113,6 @@ func LoadFile(filename string, agentMode, expandExternalLabels bool, logger log. } if agentMode { - if len(cfg.RemoteWriteConfigs) == 0 { - return nil, errors.New("at least one remote_write target must be specified in agent mode") - } - if len(cfg.AlertingConfig.AlertmanagerConfigs) > 0 || len(cfg.AlertingConfig.AlertRelabelConfigs) > 0 { return nil, errors.New("field alerting is not allowed in agent mode") } @@ -219,12 +216,13 @@ var ( // Config is the top-level configuration for Prometheus's config files. type Config struct { - GlobalConfig GlobalConfig `yaml:"global"` - AlertingConfig AlertingConfig `yaml:"alerting,omitempty"` - RuleFiles []string `yaml:"rule_files,omitempty"` - ScrapeConfigs []*ScrapeConfig `yaml:"scrape_configs,omitempty"` - StorageConfig StorageConfig `yaml:"storage,omitempty"` - TracingConfig TracingConfig `yaml:"tracing,omitempty"` + GlobalConfig GlobalConfig `yaml:"global"` + AlertingConfig AlertingConfig `yaml:"alerting,omitempty"` + RuleFiles []string `yaml:"rule_files,omitempty"` + ScrapeConfigFiles []string `yaml:"scrape_config_files,omitempty"` + ScrapeConfigs []*ScrapeConfig `yaml:"scrape_configs,omitempty"` + StorageConfig StorageConfig `yaml:"storage,omitempty"` + TracingConfig TracingConfig `yaml:"tracing,omitempty"` RemoteWriteConfigs []*RemoteWriteConfig `yaml:"remote_write,omitempty"` RemoteReadConfigs []*RemoteReadConfig `yaml:"remote_read,omitempty"` @@ -238,6 +236,9 @@ func (c *Config) SetDirectory(dir string) { for i, file := range c.RuleFiles { c.RuleFiles[i] = config.JoinDir(dir, file) } + for i, file := range c.ScrapeConfigFiles { + c.ScrapeConfigFiles[i] = config.JoinDir(dir, file) + } for _, c := range c.ScrapeConfigs { c.SetDirectory(dir) } @@ -257,6 +258,58 @@ func (c Config) String() string { return string(b) } +// ScrapeConfigs returns the scrape configurations. +func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) { + scfgs := make([]*ScrapeConfig, len(c.ScrapeConfigs)) + + jobNames := map[string]string{} + for i, scfg := range c.ScrapeConfigs { + // We do these checks for library users that would not call Validate in + // Unmarshal. + if err := scfg.Validate(c.GlobalConfig.ScrapeInterval, c.GlobalConfig.ScrapeTimeout); err != nil { + return nil, err + } + + if _, ok := jobNames[scfg.JobName]; ok { + return nil, fmt.Errorf("found multiple scrape configs with job name %q", scfg.JobName) + } + jobNames[scfg.JobName] = "main config file" + scfgs[i] = scfg + } + for _, pat := range c.ScrapeConfigFiles { + fs, err := filepath.Glob(pat) + if err != nil { + // The only error can be a bad pattern. + return nil, fmt.Errorf("error retrieving scrape config files for %q: %w", pat, err) + } + for _, filename := range fs { + cfg := ScrapeConfigs{} + content, err := os.ReadFile(filename) + if err != nil { + return nil, fileErr(filename, err) + } + err = yaml.UnmarshalStrict(content, &cfg) + if err != nil { + return nil, fileErr(filename, err) + } + for _, scfg := range cfg.ScrapeConfigs { + if err := scfg.Validate(c.GlobalConfig.ScrapeInterval, c.GlobalConfig.ScrapeTimeout); err != nil { + return nil, fileErr(filename, err) + } + + if f, ok := jobNames[scfg.JobName]; ok { + return nil, fileErr(filename, fmt.Errorf("found multiple scrape configs with job name %q, first found in %s", scfg.JobName, f)) + } + jobNames[scfg.JobName] = fmt.Sprintf("%q", filePath(filename)) + + scfg.SetDirectory(filepath.Dir(filename)) + scfgs = append(scfgs, scfg) + } + } + } + return scfgs, nil +} + // UnmarshalYAML implements the yaml.Unmarshaler interface. func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { *c = DefaultConfig @@ -279,26 +332,18 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { return fmt.Errorf("invalid rule file path %q", rf) } } + + for _, sf := range c.ScrapeConfigFiles { + if !patRulePath.MatchString(sf) { + return fmt.Errorf("invalid scrape config file path %q", sf) + } + } + // Do global overrides and validate unique names. jobNames := map[string]struct{}{} for _, scfg := range c.ScrapeConfigs { - if scfg == nil { - return errors.New("empty or null scrape config section") - } - // First set the correct scrape interval, then check that the timeout - // (inferred or explicit) is not greater than that. - if scfg.ScrapeInterval == 0 { - scfg.ScrapeInterval = c.GlobalConfig.ScrapeInterval - } - if scfg.ScrapeTimeout > scfg.ScrapeInterval { - return fmt.Errorf("scrape timeout greater than scrape interval for scrape config with job name %q", scfg.JobName) - } - if scfg.ScrapeTimeout == 0 { - if c.GlobalConfig.ScrapeTimeout > scfg.ScrapeInterval { - scfg.ScrapeTimeout = scfg.ScrapeInterval - } else { - scfg.ScrapeTimeout = c.GlobalConfig.ScrapeTimeout - } + if err := scfg.Validate(c.GlobalConfig.ScrapeInterval, c.GlobalConfig.ScrapeTimeout); err != nil { + return err } if _, ok := jobNames[scfg.JobName]; ok { @@ -361,13 +406,16 @@ func (c *GlobalConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { return err } - for _, l := range gc.ExternalLabels { + if err := gc.ExternalLabels.Validate(func(l labels.Label) error { if !model.LabelName(l.Name).IsValid() { return fmt.Errorf("%q is not a valid label name", l.Name) } if !model.LabelValue(l.Value).IsValid() { return fmt.Errorf("%q is not a valid label value", l.Value) } + return nil + }); err != nil { + return err } // First set the correct scrape interval, then check that the timeout @@ -394,13 +442,17 @@ func (c *GlobalConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { // isZero returns true iff the global config is the zero value. func (c *GlobalConfig) isZero() bool { - return c.ExternalLabels == nil && + return c.ExternalLabels.IsEmpty() && c.ScrapeInterval == 0 && c.ScrapeTimeout == 0 && c.EvaluationInterval == 0 && c.QueryLogFile == "" } +type ScrapeConfigs struct { + ScrapeConfigs []*ScrapeConfig `yaml:"scrape_configs,omitempty"` +} + // ScrapeConfig configures a scraping unit for Prometheus. type ScrapeConfig struct { // The job name to which the job label is set by default. @@ -494,6 +546,28 @@ func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { return nil } +func (c *ScrapeConfig) Validate(defaultInterval, defaultTimeout model.Duration) error { + if c == nil { + return errors.New("empty or null scrape config section") + } + // First set the correct scrape interval, then check that the timeout + // (inferred or explicit) is not greater than that. + if c.ScrapeInterval == 0 { + c.ScrapeInterval = defaultInterval + } + if c.ScrapeTimeout > c.ScrapeInterval { + return fmt.Errorf("scrape timeout greater than scrape interval for scrape config with job name %q", c.JobName) + } + if c.ScrapeTimeout == 0 { + if defaultTimeout > c.ScrapeInterval { + c.ScrapeTimeout = c.ScrapeInterval + } else { + c.ScrapeTimeout = defaultTimeout + } + } + return nil +} + // MarshalYAML implements the yaml.Marshaler interface. func (c *ScrapeConfig) MarshalYAML() (interface{}, error) { return discovery.MarshalYAMLWithInlineConfigs(c) @@ -776,12 +850,13 @@ func CheckTargetAddress(address model.LabelValue) error { // RemoteWriteConfig is the configuration for writing to remote storage. type RemoteWriteConfig struct { - URL *config.URL `yaml:"url"` - RemoteTimeout model.Duration `yaml:"remote_timeout,omitempty"` - Headers map[string]string `yaml:"headers,omitempty"` - WriteRelabelConfigs []*relabel.Config `yaml:"write_relabel_configs,omitempty"` - Name string `yaml:"name,omitempty"` - SendExemplars bool `yaml:"send_exemplars,omitempty"` + URL *config.URL `yaml:"url"` + RemoteTimeout model.Duration `yaml:"remote_timeout,omitempty"` + Headers map[string]string `yaml:"headers,omitempty"` + WriteRelabelConfigs []*relabel.Config `yaml:"write_relabel_configs,omitempty"` + Name string `yaml:"name,omitempty"` + SendExemplars bool `yaml:"send_exemplars,omitempty"` + SendNativeHistograms bool `yaml:"send_native_histograms,omitempty"` // We cannot do proper Go type embedding below as the parser will then parse // values arbitrarily into the overflow maps of further-down types. @@ -935,3 +1010,15 @@ func (c *RemoteReadConfig) UnmarshalYAML(unmarshal func(interface{}) error) erro // Thus we just do its validation here. return c.HTTPClientConfig.Validate() } + +func filePath(filename string) string { + absPath, err := filepath.Abs(filename) + if err != nil { + return filename + } + return absPath +} + +func fileErr(filename string, err error) error { + return fmt.Errorf("%q: %w", filePath(filename), err) +} diff --git a/vendor/github.com/prometheus/prometheus/discovery/file/file.go b/vendor/github.com/prometheus/prometheus/discovery/file/file.go index 5aa7f2e5f..c45595c6d 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/file/file.go +++ b/vendor/github.com/prometheus/prometheus/discovery/file/file.go @@ -39,18 +39,23 @@ import ( ) var ( + fileSDReadErrorsCount = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_sd_file_read_errors_total", + Help: "The number of File-SD read errors.", + }) fileSDScanDuration = prometheus.NewSummary( prometheus.SummaryOpts{ Name: "prometheus_sd_file_scan_duration_seconds", Help: "The duration of the File-SD scan in seconds.", Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, }) - fileSDReadErrorsCount = prometheus.NewCounter( + fileSDTimeStamp = NewTimestampCollector() + fileWatcherErrorsCount = prometheus.NewCounter( prometheus.CounterOpts{ - Name: "prometheus_sd_file_read_errors_total", - Help: "The number of File-SD read errors.", + Name: "prometheus_sd_file_watcher_errors_total", + Help: "The number of File-SD errors caused by filesystem watch failures.", }) - fileSDTimeStamp = NewTimestampCollector() patFileSDName = regexp.MustCompile(`^[^*]*(\*[^/]*)?\.(json|yml|yaml|JSON|YML|YAML)$`) @@ -62,7 +67,7 @@ var ( func init() { discovery.RegisterConfig(&SDConfig{}) - prometheus.MustRegister(fileSDScanDuration, fileSDReadErrorsCount, fileSDTimeStamp) + prometheus.MustRegister(fileSDReadErrorsCount, fileSDScanDuration, fileSDTimeStamp, fileWatcherErrorsCount) } // SDConfig is the configuration for file based discovery. @@ -237,6 +242,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { watcher, err := fsnotify.NewWatcher() if err != nil { level.Error(d.logger).Log("msg", "Error adding file watcher", "err", err) + fileWatcherErrorsCount.Inc() return } d.watcher = watcher diff --git a/vendor/github.com/prometheus/prometheus/discovery/manager.go b/vendor/github.com/prometheus/prometheus/discovery/manager.go index b7357fa6c..8b304a0fa 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/manager.go +++ b/vendor/github.com/prometheus/prometheus/discovery/manager.go @@ -428,11 +428,11 @@ func (m *Manager) registerProviders(cfgs Configs, setName string) int { } typ := cfg.Name() d, err := cfg.NewDiscoverer(DiscovererOptions{ - Logger: log.With(m.logger, "discovery", typ), + Logger: log.With(m.logger, "discovery", typ, "config", setName), HTTPClientOptions: m.httpOpts, }) if err != nil { - level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ) + level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ, "config", setName) failed++ return } diff --git a/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go b/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go new file mode 100644 index 000000000..cd73083bb --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go @@ -0,0 +1,943 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package histogram + +import ( + "fmt" + "strings" +) + +// FloatHistogram is similar to Histogram but uses float64 for all +// counts. Additionally, bucket counts are absolute and not deltas. +// +// A FloatHistogram is needed by PromQL to handle operations that might result +// in fractional counts. Since the counts in a histogram are unlikely to be too +// large to be represented precisely by a float64, a FloatHistogram can also be +// used to represent a histogram with integer counts and thus serves as a more +// generalized representation. +type FloatHistogram struct { + // Counter reset information. + CounterResetHint CounterResetHint + // Currently valid schema numbers are -4 <= n <= 8. They are all for + // base-2 bucket schemas, where 1 is a bucket boundary in each case, and + // then each power of two is divided into 2^n logarithmic buckets. Or + // in other words, each bucket boundary is the previous boundary times + // 2^(2^-n). + Schema int32 + // Width of the zero bucket. + ZeroThreshold float64 + // Observations falling into the zero bucket. Must be zero or positive. + ZeroCount float64 + // Total number of observations. Must be zero or positive. + Count float64 + // Sum of observations. This is also used as the stale marker. + Sum float64 + // Spans for positive and negative buckets (see Span below). + PositiveSpans, NegativeSpans []Span + // Observation counts in buckets. Each represents an absolute count and + // must be zero or positive. + PositiveBuckets, NegativeBuckets []float64 +} + +// Copy returns a deep copy of the Histogram. +func (h *FloatHistogram) Copy() *FloatHistogram { + c := *h + + if h.PositiveSpans != nil { + c.PositiveSpans = make([]Span, len(h.PositiveSpans)) + copy(c.PositiveSpans, h.PositiveSpans) + } + if h.NegativeSpans != nil { + c.NegativeSpans = make([]Span, len(h.NegativeSpans)) + copy(c.NegativeSpans, h.NegativeSpans) + } + if h.PositiveBuckets != nil { + c.PositiveBuckets = make([]float64, len(h.PositiveBuckets)) + copy(c.PositiveBuckets, h.PositiveBuckets) + } + if h.NegativeBuckets != nil { + c.NegativeBuckets = make([]float64, len(h.NegativeBuckets)) + copy(c.NegativeBuckets, h.NegativeBuckets) + } + + return &c +} + +// CopyToSchema works like Copy, but the returned deep copy has the provided +// target schema, which must be ≤ the original schema (i.e. it must have a lower +// resolution). +func (h *FloatHistogram) CopyToSchema(targetSchema int32) *FloatHistogram { + if targetSchema == h.Schema { + // Fast path. + return h.Copy() + } + if targetSchema > h.Schema { + panic(fmt.Errorf("cannot copy from schema %d to %d", h.Schema, targetSchema)) + } + c := FloatHistogram{ + Schema: targetSchema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: h.ZeroCount, + Count: h.Count, + Sum: h.Sum, + } + + // TODO(beorn7): This is a straight-forward implementation using merging + // iterators for the original buckets and then adding one merged bucket + // after another to the newly created FloatHistogram. It's well possible + // that a more involved implementation performs much better, which we + // could do if this code path turns out to be performance-critical. + var iInSpan, index int32 + for iSpan, iBucket, it := -1, -1, h.floatBucketIterator(true, 0, targetSchema); it.Next(); { + b := it.At() + c.PositiveSpans, c.PositiveBuckets, iSpan, iBucket, iInSpan = addBucket( + b, c.PositiveSpans, c.PositiveBuckets, iSpan, iBucket, iInSpan, index, + ) + index = b.Index + } + for iSpan, iBucket, it := -1, -1, h.floatBucketIterator(false, 0, targetSchema); it.Next(); { + b := it.At() + c.NegativeSpans, c.NegativeBuckets, iSpan, iBucket, iInSpan = addBucket( + b, c.NegativeSpans, c.NegativeBuckets, iSpan, iBucket, iInSpan, index, + ) + index = b.Index + } + + return &c +} + +// String returns a string representation of the Histogram. +func (h *FloatHistogram) String() string { + var sb strings.Builder + fmt.Fprintf(&sb, "{count:%g, sum:%g", h.Count, h.Sum) + + var nBuckets []Bucket[float64] + for it := h.NegativeBucketIterator(); it.Next(); { + bucket := it.At() + if bucket.Count != 0 { + nBuckets = append(nBuckets, it.At()) + } + } + for i := len(nBuckets) - 1; i >= 0; i-- { + fmt.Fprintf(&sb, ", %s", nBuckets[i].String()) + } + + if h.ZeroCount != 0 { + fmt.Fprintf(&sb, ", %s", h.ZeroBucket().String()) + } + + for it := h.PositiveBucketIterator(); it.Next(); { + bucket := it.At() + if bucket.Count != 0 { + fmt.Fprintf(&sb, ", %s", bucket.String()) + } + } + + sb.WriteRune('}') + return sb.String() +} + +// ZeroBucket returns the zero bucket. +func (h *FloatHistogram) ZeroBucket() Bucket[float64] { + return Bucket[float64]{ + Lower: -h.ZeroThreshold, + Upper: h.ZeroThreshold, + LowerInclusive: true, + UpperInclusive: true, + Count: h.ZeroCount, + } +} + +// Scale scales the FloatHistogram by the provided factor, i.e. it scales all +// bucket counts including the zero bucket and the count and the sum of +// observations. The bucket layout stays the same. This method changes the +// receiving histogram directly (rather than acting on a copy). It returns a +// pointer to the receiving histogram for convenience. +func (h *FloatHistogram) Scale(factor float64) *FloatHistogram { + h.ZeroCount *= factor + h.Count *= factor + h.Sum *= factor + for i := range h.PositiveBuckets { + h.PositiveBuckets[i] *= factor + } + for i := range h.NegativeBuckets { + h.NegativeBuckets[i] *= factor + } + return h +} + +// Add adds the provided other histogram to the receiving histogram. Count, Sum, +// and buckets from the other histogram are added to the corresponding +// components of the receiving histogram. Buckets in the other histogram that do +// not exist in the receiving histogram are inserted into the latter. The +// resulting histogram might have buckets with a population of zero or directly +// adjacent spans (offset=0). To normalize those, call the Compact method. +// +// The method reconciles differences in the zero threshold and in the schema, +// but the schema of the other histogram must be ≥ the schema of the receiving +// histogram (i.e. must have an equal or higher resolution). This means that the +// schema of the receiving histogram won't change. Its zero threshold, however, +// will change if needed. The other histogram will not be modified in any case. +// +// This method returns a pointer to the receiving histogram for convenience. +func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram { + switch { + case other.CounterResetHint == h.CounterResetHint: + // Adding apples to apples, all good. No need to change anything. + case h.CounterResetHint == GaugeType: + // Adding something else to a gauge. That's probably OK. Outcome is a gauge. + // Nothing to do since the receiver is already marked as gauge. + case other.CounterResetHint == GaugeType: + // Similar to before, but this time the receiver is "something else" and we have to change it to gauge. + h.CounterResetHint = GaugeType + case h.CounterResetHint == UnknownCounterReset: + // With the receiver's CounterResetHint being "unknown", this could still be legitimate + // if the caller knows what they are doing. Outcome is then again "unknown". + // No need to do anything since the receiver's CounterResetHint is already "unknown". + case other.CounterResetHint == UnknownCounterReset: + // Similar to before, but now we have to set the receiver's CounterResetHint to "unknown". + h.CounterResetHint = UnknownCounterReset + default: + // All other cases shouldn't actually happen. + // They are a direct collision of CounterReset and NotCounterReset. + // Conservatively set the CounterResetHint to "unknown" and isse a warning. + h.CounterResetHint = UnknownCounterReset + // TODO(trevorwhitney): Actually issue the warning as soon as the plumbing for it is in place + } + + otherZeroCount := h.reconcileZeroBuckets(other) + h.ZeroCount += otherZeroCount + h.Count += other.Count + h.Sum += other.Sum + + // TODO(beorn7): If needed, this can be optimized by inspecting the + // spans in other and create missing buckets in h in batches. + var iInSpan, index int32 + for iSpan, iBucket, it := -1, -1, other.floatBucketIterator(true, h.ZeroThreshold, h.Schema); it.Next(); { + b := it.At() + h.PositiveSpans, h.PositiveBuckets, iSpan, iBucket, iInSpan = addBucket( + b, h.PositiveSpans, h.PositiveBuckets, iSpan, iBucket, iInSpan, index, + ) + index = b.Index + } + for iSpan, iBucket, it := -1, -1, other.floatBucketIterator(false, h.ZeroThreshold, h.Schema); it.Next(); { + b := it.At() + h.NegativeSpans, h.NegativeBuckets, iSpan, iBucket, iInSpan = addBucket( + b, h.NegativeSpans, h.NegativeBuckets, iSpan, iBucket, iInSpan, index, + ) + index = b.Index + } + return h +} + +// Sub works like Add but subtracts the other histogram. +func (h *FloatHistogram) Sub(other *FloatHistogram) *FloatHistogram { + otherZeroCount := h.reconcileZeroBuckets(other) + h.ZeroCount -= otherZeroCount + h.Count -= other.Count + h.Sum -= other.Sum + + // TODO(beorn7): If needed, this can be optimized by inspecting the + // spans in other and create missing buckets in h in batches. + var iInSpan, index int32 + for iSpan, iBucket, it := -1, -1, other.floatBucketIterator(true, h.ZeroThreshold, h.Schema); it.Next(); { + b := it.At() + b.Count *= -1 + h.PositiveSpans, h.PositiveBuckets, iSpan, iBucket, iInSpan = addBucket( + b, h.PositiveSpans, h.PositiveBuckets, iSpan, iBucket, iInSpan, index, + ) + index = b.Index + } + for iSpan, iBucket, it := -1, -1, other.floatBucketIterator(false, h.ZeroThreshold, h.Schema); it.Next(); { + b := it.At() + b.Count *= -1 + h.NegativeSpans, h.NegativeBuckets, iSpan, iBucket, iInSpan = addBucket( + b, h.NegativeSpans, h.NegativeBuckets, iSpan, iBucket, iInSpan, index, + ) + index = b.Index + } + return h +} + +// Equals returns true if the given float histogram matches exactly. +// Exact match is when there are no new buckets (even empty) and no missing buckets, +// and all the bucket values match. Spans can have different empty length spans in between, +// but they must represent the same bucket layout to match. +func (h *FloatHistogram) Equals(h2 *FloatHistogram) bool { + if h2 == nil { + return false + } + + if h.Schema != h2.Schema || h.ZeroThreshold != h2.ZeroThreshold || + h.ZeroCount != h2.ZeroCount || h.Count != h2.Count || h.Sum != h2.Sum { + return false + } + + if !spansMatch(h.PositiveSpans, h2.PositiveSpans) { + return false + } + if !spansMatch(h.NegativeSpans, h2.NegativeSpans) { + return false + } + + if !bucketsMatch(h.PositiveBuckets, h2.PositiveBuckets) { + return false + } + if !bucketsMatch(h.NegativeBuckets, h2.NegativeBuckets) { + return false + } + + return true +} + +// addBucket takes the "coordinates" of the last bucket that was handled and +// adds the provided bucket after it. If a corresponding bucket exists, the +// count is added. If not, the bucket is inserted. The updated slices and the +// coordinates of the inserted or added-to bucket are returned. +func addBucket( + b Bucket[float64], + spans []Span, buckets []float64, + iSpan, iBucket int, + iInSpan, index int32, +) ( + newSpans []Span, newBuckets []float64, + newISpan, newIBucket int, newIInSpan int32, +) { + if iSpan == -1 { + // First add, check if it is before all spans. + if len(spans) == 0 || spans[0].Offset > b.Index { + // Add bucket before all others. + buckets = append(buckets, 0) + copy(buckets[1:], buckets) + buckets[0] = b.Count + if len(spans) > 0 && spans[0].Offset == b.Index+1 { + spans[0].Length++ + spans[0].Offset-- + return spans, buckets, 0, 0, 0 + } + spans = append(spans, Span{}) + copy(spans[1:], spans) + spans[0] = Span{Offset: b.Index, Length: 1} + if len(spans) > 1 { + // Convert the absolute offset in the formerly + // first span to a relative offset. + spans[1].Offset -= b.Index + 1 + } + return spans, buckets, 0, 0, 0 + } + if spans[0].Offset == b.Index { + // Just add to first bucket. + buckets[0] += b.Count + return spans, buckets, 0, 0, 0 + } + // We are behind the first bucket, so set everything to the + // first bucket and continue normally. + iSpan, iBucket, iInSpan = 0, 0, 0 + index = spans[0].Offset + } + deltaIndex := b.Index - index + for { + remainingInSpan := int32(spans[iSpan].Length) - iInSpan + if deltaIndex < remainingInSpan { + // Bucket is in current span. + iBucket += int(deltaIndex) + iInSpan += deltaIndex + buckets[iBucket] += b.Count + return spans, buckets, iSpan, iBucket, iInSpan + } + deltaIndex -= remainingInSpan + iBucket += int(remainingInSpan) + iSpan++ + if iSpan == len(spans) || deltaIndex < spans[iSpan].Offset { + // Bucket is in gap behind previous span (or there are no further spans). + buckets = append(buckets, 0) + copy(buckets[iBucket+1:], buckets[iBucket:]) + buckets[iBucket] = b.Count + if deltaIndex == 0 { + // Directly after previous span, extend previous span. + if iSpan < len(spans) { + spans[iSpan].Offset-- + } + iSpan-- + iInSpan = int32(spans[iSpan].Length) + spans[iSpan].Length++ + return spans, buckets, iSpan, iBucket, iInSpan + } + if iSpan < len(spans) && deltaIndex == spans[iSpan].Offset-1 { + // Directly before next span, extend next span. + iInSpan = 0 + spans[iSpan].Offset-- + spans[iSpan].Length++ + return spans, buckets, iSpan, iBucket, iInSpan + } + // No next span, or next span is not directly adjacent to new bucket. + // Add new span. + iInSpan = 0 + if iSpan < len(spans) { + spans[iSpan].Offset -= deltaIndex + 1 + } + spans = append(spans, Span{}) + copy(spans[iSpan+1:], spans[iSpan:]) + spans[iSpan] = Span{Length: 1, Offset: deltaIndex} + return spans, buckets, iSpan, iBucket, iInSpan + } + // Try start of next span. + deltaIndex -= spans[iSpan].Offset + iInSpan = 0 + } +} + +// Compact eliminates empty buckets at the beginning and end of each span, then +// merges spans that are consecutive or at most maxEmptyBuckets apart, and +// finally splits spans that contain more consecutive empty buckets than +// maxEmptyBuckets. (The actual implementation might do something more efficient +// but with the same result.) The compaction happens "in place" in the +// receiving histogram, but a pointer to it is returned for convenience. +// +// The ideal value for maxEmptyBuckets depends on circumstances. The motivation +// to set maxEmptyBuckets > 0 is the assumption that is is less overhead to +// represent very few empty buckets explicitly within one span than cutting the +// one span into two to treat the empty buckets as a gap between the two spans, +// both in terms of storage requirement as well as in terms of encoding and +// decoding effort. However, the tradeoffs are subtle. For one, they are +// different in the exposition format vs. in a TSDB chunk vs. for the in-memory +// representation as Go types. In the TSDB, as an additional aspects, the span +// layout is only stored once per chunk, while many histograms with that same +// chunk layout are then only stored with their buckets (so that even a single +// empty bucket will be stored many times). +// +// For the Go types, an additional Span takes 8 bytes. Similarly, an additional +// bucket takes 8 bytes. Therefore, with a single separating empty bucket, both +// options have the same storage requirement, but the single-span solution is +// easier to iterate through. Still, the safest bet is to use maxEmptyBuckets==0 +// and only use a larger number if you know what you are doing. +func (h *FloatHistogram) Compact(maxEmptyBuckets int) *FloatHistogram { + h.PositiveBuckets, h.PositiveSpans = compactBuckets( + h.PositiveBuckets, h.PositiveSpans, maxEmptyBuckets, false, + ) + h.NegativeBuckets, h.NegativeSpans = compactBuckets( + h.NegativeBuckets, h.NegativeSpans, maxEmptyBuckets, false, + ) + return h +} + +// DetectReset returns true if the receiving histogram is missing any buckets +// that have a non-zero population in the provided previous histogram. It also +// returns true if any count (in any bucket, in the zero count, or in the count +// of observations, but NOT the sum of observations) is smaller in the receiving +// histogram compared to the previous histogram. Otherwise, it returns false. +// +// This method will shortcut to true if a CounterReset is detected, and shortcut +// to false if NotCounterReset is detected. Otherwise it will do the work to detect +// a reset. +// +// Special behavior in case the Schema or the ZeroThreshold are not the same in +// both histograms: +// +// - A decrease of the ZeroThreshold or an increase of the Schema (i.e. an +// increase of resolution) can only happen together with a reset. Thus, the +// method returns true in either case. +// +// - Upon an increase of the ZeroThreshold, the buckets in the previous +// histogram that fall within the new ZeroThreshold are added to the ZeroCount +// of the previous histogram (without mutating the provided previous +// histogram). The scenario that a populated bucket of the previous histogram +// is partially within, partially outside of the new ZeroThreshold, can only +// happen together with a counter reset and therefore shortcuts to returning +// true. +// +// - Upon a decrease of the Schema, the buckets of the previous histogram are +// merged so that they match the new, lower-resolution schema (again without +// mutating the provided previous histogram). +func (h *FloatHistogram) DetectReset(previous *FloatHistogram) bool { + if h.CounterResetHint == CounterReset { + return true + } + if h.CounterResetHint == NotCounterReset { + return false + } + // In all other cases of CounterResetHint (UnknownCounterReset and GaugeType), + // we go on as we would otherwise, for reasons explained below. + // + // If the CounterResetHint is UnknownCounterReset, we do not know yet if this histogram comes + // with a counter reset. Therefore, we have to do all the detailed work to find out if there + // is a counter reset or not. + // We do the same if the CounterResetHint is GaugeType, which should not happen, but PromQL still + // allows the user to apply functions to gauge histograms that are only meant for counter histograms. + // In this case, we treat the gauge histograms as a counter histograms + // (and we plan to return a warning about it to the user). + if h.Count < previous.Count { + return true + } + if h.Schema > previous.Schema { + return true + } + if h.ZeroThreshold < previous.ZeroThreshold { + // ZeroThreshold decreased. + return true + } + previousZeroCount, newThreshold := previous.zeroCountForLargerThreshold(h.ZeroThreshold) + if newThreshold != h.ZeroThreshold { + // ZeroThreshold is within a populated bucket in previous + // histogram. + return true + } + if h.ZeroCount < previousZeroCount { + return true + } + currIt := h.floatBucketIterator(true, h.ZeroThreshold, h.Schema) + prevIt := previous.floatBucketIterator(true, h.ZeroThreshold, h.Schema) + if detectReset(currIt, prevIt) { + return true + } + currIt = h.floatBucketIterator(false, h.ZeroThreshold, h.Schema) + prevIt = previous.floatBucketIterator(false, h.ZeroThreshold, h.Schema) + return detectReset(currIt, prevIt) +} + +func detectReset(currIt, prevIt BucketIterator[float64]) bool { + if !prevIt.Next() { + return false // If no buckets in previous histogram, nothing can be reset. + } + prevBucket := prevIt.At() + if !currIt.Next() { + // No bucket in current, but at least one in previous + // histogram. Check if any of those are non-zero, in which case + // this is a reset. + for { + if prevBucket.Count != 0 { + return true + } + if !prevIt.Next() { + return false + } + } + } + currBucket := currIt.At() + for { + // Forward currIt until we find the bucket corresponding to prevBucket. + for currBucket.Index < prevBucket.Index { + if !currIt.Next() { + // Reached end of currIt early, therefore + // previous histogram has a bucket that the + // current one does not have. Unlass all + // remaining buckets in the previous histogram + // are unpopulated, this is a reset. + for { + if prevBucket.Count != 0 { + return true + } + if !prevIt.Next() { + return false + } + } + } + currBucket = currIt.At() + } + if currBucket.Index > prevBucket.Index { + // Previous histogram has a bucket the current one does + // not have. If it's populated, it's a reset. + if prevBucket.Count != 0 { + return true + } + } else { + // We have reached corresponding buckets in both iterators. + // We can finally compare the counts. + if currBucket.Count < prevBucket.Count { + return true + } + } + if !prevIt.Next() { + // Reached end of prevIt without finding offending buckets. + return false + } + prevBucket = prevIt.At() + } +} + +// PositiveBucketIterator returns a BucketIterator to iterate over all positive +// buckets in ascending order (starting next to the zero bucket and going up). +func (h *FloatHistogram) PositiveBucketIterator() BucketIterator[float64] { + return h.floatBucketIterator(true, 0, h.Schema) +} + +// NegativeBucketIterator returns a BucketIterator to iterate over all negative +// buckets in descending order (starting next to the zero bucket and going +// down). +func (h *FloatHistogram) NegativeBucketIterator() BucketIterator[float64] { + return h.floatBucketIterator(false, 0, h.Schema) +} + +// PositiveReverseBucketIterator returns a BucketIterator to iterate over all +// positive buckets in descending order (starting at the highest bucket and +// going down towards the zero bucket). +func (h *FloatHistogram) PositiveReverseBucketIterator() BucketIterator[float64] { + return newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true) +} + +// NegativeReverseBucketIterator returns a BucketIterator to iterate over all +// negative buckets in ascending order (starting at the lowest bucket and going +// up towards the zero bucket). +func (h *FloatHistogram) NegativeReverseBucketIterator() BucketIterator[float64] { + return newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false) +} + +// AllBucketIterator returns a BucketIterator to iterate over all negative, +// zero, and positive buckets in ascending order (starting at the lowest bucket +// and going up). If the highest negative bucket or the lowest positive bucket +// overlap with the zero bucket, their upper or lower boundary, respectively, is +// set to the zero threshold. +func (h *FloatHistogram) AllBucketIterator() BucketIterator[float64] { + return &allFloatBucketIterator{ + h: h, + negIter: h.NegativeReverseBucketIterator(), + posIter: h.PositiveBucketIterator(), + state: -1, + } +} + +// zeroCountForLargerThreshold returns what the histogram's zero count would be +// if the ZeroThreshold had the provided larger (or equal) value. If the +// provided value is less than the histogram's ZeroThreshold, the method panics. +// If the largerThreshold ends up within a populated bucket of the histogram, it +// is adjusted upwards to the lower limit of that bucket (all in terms of +// absolute values) and that bucket's count is included in the returned +// count. The adjusted threshold is returned, too. +func (h *FloatHistogram) zeroCountForLargerThreshold(largerThreshold float64) (count, threshold float64) { + // Fast path. + if largerThreshold == h.ZeroThreshold { + return h.ZeroCount, largerThreshold + } + if largerThreshold < h.ZeroThreshold { + panic(fmt.Errorf("new threshold %f is less than old threshold %f", largerThreshold, h.ZeroThreshold)) + } +outer: + for { + count = h.ZeroCount + i := h.PositiveBucketIterator() + for i.Next() { + b := i.At() + if b.Lower >= largerThreshold { + break + } + count += b.Count // Bucket to be merged into zero bucket. + if b.Upper > largerThreshold { + // New threshold ended up within a bucket. if it's + // populated, we need to adjust largerThreshold before + // we are done here. + if b.Count != 0 { + largerThreshold = b.Upper + } + break + } + } + i = h.NegativeBucketIterator() + for i.Next() { + b := i.At() + if b.Upper <= -largerThreshold { + break + } + count += b.Count // Bucket to be merged into zero bucket. + if b.Lower < -largerThreshold { + // New threshold ended up within a bucket. If + // it's populated, we need to adjust + // largerThreshold and have to redo the whole + // thing because the treatment of the positive + // buckets is invalid now. + if b.Count != 0 { + largerThreshold = -b.Lower + continue outer + } + break + } + } + return count, largerThreshold + } +} + +// trimBucketsInZeroBucket removes all buckets that are within the zero +// bucket. It assumes that the zero threshold is at a bucket boundary and that +// the counts in the buckets to remove are already part of the zero count. +func (h *FloatHistogram) trimBucketsInZeroBucket() { + i := h.PositiveBucketIterator() + bucketsIdx := 0 + for i.Next() { + b := i.At() + if b.Lower >= h.ZeroThreshold { + break + } + h.PositiveBuckets[bucketsIdx] = 0 + bucketsIdx++ + } + i = h.NegativeBucketIterator() + bucketsIdx = 0 + for i.Next() { + b := i.At() + if b.Upper <= -h.ZeroThreshold { + break + } + h.NegativeBuckets[bucketsIdx] = 0 + bucketsIdx++ + } + // We are abusing Compact to trim the buckets set to zero + // above. Premature compacting could cause additional cost, but this + // code path is probably rarely used anyway. + h.Compact(0) +} + +// reconcileZeroBuckets finds a zero bucket large enough to include the zero +// buckets of both histograms (the receiving histogram and the other histogram) +// with a zero threshold that is not within a populated bucket in either +// histogram. This method modifies the receiving histogram accourdingly, but +// leaves the other histogram as is. Instead, it returns the zero count the +// other histogram would have if it were modified. +func (h *FloatHistogram) reconcileZeroBuckets(other *FloatHistogram) float64 { + otherZeroCount := other.ZeroCount + otherZeroThreshold := other.ZeroThreshold + + for otherZeroThreshold != h.ZeroThreshold { + if h.ZeroThreshold > otherZeroThreshold { + otherZeroCount, otherZeroThreshold = other.zeroCountForLargerThreshold(h.ZeroThreshold) + } + if otherZeroThreshold > h.ZeroThreshold { + h.ZeroCount, h.ZeroThreshold = h.zeroCountForLargerThreshold(otherZeroThreshold) + h.trimBucketsInZeroBucket() + } + } + return otherZeroCount +} + +// floatBucketIterator is a low-level constructor for bucket iterators. +// +// If positive is true, the returned iterator iterates through the positive +// buckets, otherwise through the negative buckets. +// +// If absoluteStartValue is < the lowest absolute value of any upper bucket +// boundary, the iterator starts with the first bucket. Otherwise, it will skip +// all buckets with an absolute value of their upper boundary ≤ +// absoluteStartValue. +// +// targetSchema must be ≤ the schema of FloatHistogram (and of course within the +// legal values for schemas in general). The buckets are merged to match the +// targetSchema prior to iterating (without mutating FloatHistogram). +func (h *FloatHistogram) floatBucketIterator( + positive bool, absoluteStartValue float64, targetSchema int32, +) *floatBucketIterator { + if targetSchema > h.Schema { + panic(fmt.Errorf("cannot merge from schema %d to %d", h.Schema, targetSchema)) + } + i := &floatBucketIterator{ + baseBucketIterator: baseBucketIterator[float64, float64]{ + schema: h.Schema, + positive: positive, + }, + targetSchema: targetSchema, + absoluteStartValue: absoluteStartValue, + } + if positive { + i.spans = h.PositiveSpans + i.buckets = h.PositiveBuckets + } else { + i.spans = h.NegativeSpans + i.buckets = h.NegativeBuckets + } + return i +} + +// reverseFloatbucketiterator is a low-level constructor for reverse bucket iterators. +func newReverseFloatBucketIterator( + spans []Span, buckets []float64, schema int32, positive bool, +) *reverseFloatBucketIterator { + r := &reverseFloatBucketIterator{ + baseBucketIterator: baseBucketIterator[float64, float64]{ + schema: schema, + spans: spans, + buckets: buckets, + positive: positive, + }, + } + + r.spansIdx = len(r.spans) - 1 + r.bucketsIdx = len(r.buckets) - 1 + if r.spansIdx >= 0 { + r.idxInSpan = int32(r.spans[r.spansIdx].Length) - 1 + } + r.currIdx = 0 + for _, s := range r.spans { + r.currIdx += s.Offset + int32(s.Length) + } + + return r +} + +type floatBucketIterator struct { + baseBucketIterator[float64, float64] + + targetSchema int32 // targetSchema is the schema to merge to and must be ≤ schema. + origIdx int32 // The bucket index within the original schema. + absoluteStartValue float64 // Never return buckets with an upper bound ≤ this value. +} + +func (i *floatBucketIterator) Next() bool { + if i.spansIdx >= len(i.spans) { + return false + } + + // Copy all of these into local variables so that we can forward to the + // next bucket and then roll back if needed. + origIdx, spansIdx, idxInSpan := i.origIdx, i.spansIdx, i.idxInSpan + span := i.spans[spansIdx] + firstPass := true + i.currCount = 0 + +mergeLoop: // Merge together all buckets from the original schema that fall into one bucket in the targetSchema. + for { + if i.bucketsIdx == 0 { + // Seed origIdx for the first bucket. + origIdx = span.Offset + } else { + origIdx++ + } + for idxInSpan >= span.Length { + // We have exhausted the current span and have to find a new + // one. We even handle pathologic spans of length 0 here. + idxInSpan = 0 + spansIdx++ + if spansIdx >= len(i.spans) { + if firstPass { + return false + } + break mergeLoop + } + span = i.spans[spansIdx] + origIdx += span.Offset + } + currIdx := i.targetIdx(origIdx) + if firstPass { + i.currIdx = currIdx + firstPass = false + } else if currIdx != i.currIdx { + // Reached next bucket in targetSchema. + // Do not actually forward to the next bucket, but break out. + break mergeLoop + } + i.currCount += i.buckets[i.bucketsIdx] + idxInSpan++ + i.bucketsIdx++ + i.origIdx, i.spansIdx, i.idxInSpan = origIdx, spansIdx, idxInSpan + if i.schema == i.targetSchema { + // Don't need to test the next bucket for mergeability + // if we have no schema change anyway. + break mergeLoop + } + } + // Skip buckets before absoluteStartValue. + // TODO(beorn7): Maybe do something more efficient than this recursive call. + if getBound(i.currIdx, i.targetSchema) <= i.absoluteStartValue { + return i.Next() + } + return true +} + +// targetIdx returns the bucket index within i.targetSchema for the given bucket +// index within i.schema. +func (i *floatBucketIterator) targetIdx(idx int32) int32 { + if i.schema == i.targetSchema { + // Fast path for the common case. The below would yield the same + // result, just with more effort. + return idx + } + return ((idx - 1) >> (i.schema - i.targetSchema)) + 1 +} + +type reverseFloatBucketIterator struct { + baseBucketIterator[float64, float64] + idxInSpan int32 // Changed from uint32 to allow negative values for exhaustion detection. +} + +func (i *reverseFloatBucketIterator) Next() bool { + i.currIdx-- + if i.bucketsIdx < 0 { + return false + } + + for i.idxInSpan < 0 { + // We have exhausted the current span and have to find a new + // one. We'll even handle pathologic spans of length 0. + i.spansIdx-- + i.idxInSpan = int32(i.spans[i.spansIdx].Length) - 1 + i.currIdx -= i.spans[i.spansIdx+1].Offset + } + + i.currCount = i.buckets[i.bucketsIdx] + i.bucketsIdx-- + i.idxInSpan-- + return true +} + +type allFloatBucketIterator struct { + h *FloatHistogram + negIter, posIter BucketIterator[float64] + // -1 means we are iterating negative buckets. + // 0 means it is time for the zero bucket. + // 1 means we are iterating positive buckets. + // Anything else means iteration is over. + state int8 + currBucket Bucket[float64] +} + +func (i *allFloatBucketIterator) Next() bool { + switch i.state { + case -1: + if i.negIter.Next() { + i.currBucket = i.negIter.At() + if i.currBucket.Upper > -i.h.ZeroThreshold { + i.currBucket.Upper = -i.h.ZeroThreshold + } + return true + } + i.state = 0 + return i.Next() + case 0: + i.state = 1 + if i.h.ZeroCount > 0 { + i.currBucket = Bucket[float64]{ + Lower: -i.h.ZeroThreshold, + Upper: i.h.ZeroThreshold, + LowerInclusive: true, + UpperInclusive: true, + Count: i.h.ZeroCount, + // Index is irrelevant for the zero bucket. + } + return true + } + return i.Next() + case 1: + if i.posIter.Next() { + i.currBucket = i.posIter.At() + if i.currBucket.Lower < i.h.ZeroThreshold { + i.currBucket.Lower = i.h.ZeroThreshold + } + return true + } + i.state = 42 + return false + } + + return false +} + +func (i *allFloatBucketIterator) At() Bucket[float64] { + return i.currBucket +} diff --git a/vendor/github.com/prometheus/prometheus/model/histogram/generic.go b/vendor/github.com/prometheus/prometheus/model/histogram/generic.go new file mode 100644 index 000000000..e1de5ffb5 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/model/histogram/generic.go @@ -0,0 +1,548 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package histogram + +import ( + "fmt" + "math" + "strings" +) + +// BucketCount is a type constraint for the count in a bucket, which can be +// float64 (for type FloatHistogram) or uint64 (for type Histogram). +type BucketCount interface { + float64 | uint64 +} + +// InternalBucketCount is used internally by Histogram and FloatHistogram. The +// difference to the BucketCount above is that Histogram internally uses deltas +// between buckets rather than absolute counts (while FloatHistogram uses +// absolute counts directly). Go type parameters don't allow type +// specialization. Therefore, where special treatment of deltas between buckets +// vs. absolute counts is important, this information has to be provided as a +// separate boolean parameter "deltaBuckets" +type InternalBucketCount interface { + float64 | int64 +} + +// Bucket represents a bucket with lower and upper limit and the absolute count +// of samples in the bucket. It also specifies if each limit is inclusive or +// not. (Mathematically, inclusive limits create a closed interval, and +// non-inclusive limits an open interval.) +// +// To represent cumulative buckets, Lower is set to -Inf, and the Count is then +// cumulative (including the counts of all buckets for smaller values). +type Bucket[BC BucketCount] struct { + Lower, Upper float64 + LowerInclusive, UpperInclusive bool + Count BC + + // Index within schema. To easily compare buckets that share the same + // schema and sign (positive or negative). Irrelevant for the zero bucket. + Index int32 +} + +// String returns a string representation of a Bucket, using the usual +// mathematical notation of '['/']' for inclusive bounds and '('/')' for +// non-inclusive bounds. +func (b Bucket[BC]) String() string { + var sb strings.Builder + if b.LowerInclusive { + sb.WriteRune('[') + } else { + sb.WriteRune('(') + } + fmt.Fprintf(&sb, "%g,%g", b.Lower, b.Upper) + if b.UpperInclusive { + sb.WriteRune(']') + } else { + sb.WriteRune(')') + } + fmt.Fprintf(&sb, ":%v", b.Count) + return sb.String() +} + +// BucketIterator iterates over the buckets of a Histogram, returning decoded +// buckets. +type BucketIterator[BC BucketCount] interface { + // Next advances the iterator by one. + Next() bool + // At returns the current bucket. + At() Bucket[BC] +} + +// baseBucketIterator provides a struct that is shared by most BucketIterator +// implementations, together with an implementation of the At method. This +// iterator can be embedded in full implementations of BucketIterator to save on +// code replication. +type baseBucketIterator[BC BucketCount, IBC InternalBucketCount] struct { + schema int32 + spans []Span + buckets []IBC + + positive bool // Whether this is for positive buckets. + + spansIdx int // Current span within spans slice. + idxInSpan uint32 // Index in the current span. 0 <= idxInSpan < span.Length. + bucketsIdx int // Current bucket within buckets slice. + + currCount IBC // Count in the current bucket. + currIdx int32 // The actual bucket index. +} + +func (b baseBucketIterator[BC, IBC]) At() Bucket[BC] { + bucket := Bucket[BC]{ + Count: BC(b.currCount), + Index: b.currIdx, + } + if b.positive { + bucket.Upper = getBound(b.currIdx, b.schema) + bucket.Lower = getBound(b.currIdx-1, b.schema) + } else { + bucket.Lower = -getBound(b.currIdx, b.schema) + bucket.Upper = -getBound(b.currIdx-1, b.schema) + } + bucket.LowerInclusive = bucket.Lower < 0 + bucket.UpperInclusive = bucket.Upper > 0 + return bucket +} + +// compactBuckets is a generic function used by both Histogram.Compact and +// FloatHistogram.Compact. Set deltaBuckets to true if the provided buckets are +// deltas. Set it to false if the buckets contain absolute counts. +func compactBuckets[IBC InternalBucketCount](buckets []IBC, spans []Span, maxEmptyBuckets int, deltaBuckets bool) ([]IBC, []Span) { + // Fast path: If there are no empty buckets AND no offset in any span is + // <= maxEmptyBuckets AND no span has length 0, there is nothing to do and we can return + // immediately. We check that first because it's cheap and presumably + // common. + nothingToDo := true + var currentBucketAbsolute IBC + for _, bucket := range buckets { + if deltaBuckets { + currentBucketAbsolute += bucket + } else { + currentBucketAbsolute = bucket + } + if currentBucketAbsolute == 0 { + nothingToDo = false + break + } + } + if nothingToDo { + for _, span := range spans { + if int(span.Offset) <= maxEmptyBuckets || span.Length == 0 { + nothingToDo = false + break + } + } + if nothingToDo { + return buckets, spans + } + } + + var iBucket, iSpan int + var posInSpan uint32 + currentBucketAbsolute = 0 + + // Helper function. + emptyBucketsHere := func() int { + i := 0 + abs := currentBucketAbsolute + for uint32(i)+posInSpan < spans[iSpan].Length && abs == 0 { + i++ + if i+iBucket >= len(buckets) { + break + } + abs = buckets[i+iBucket] + } + return i + } + + // Merge spans with zero-offset to avoid special cases later. + if len(spans) > 1 { + for i, span := range spans[1:] { + if span.Offset == 0 { + spans[iSpan].Length += span.Length + continue + } + iSpan++ + if i+1 != iSpan { + spans[iSpan] = span + } + } + spans = spans[:iSpan+1] + iSpan = 0 + } + + // Merge spans with zero-length to avoid special cases later. + for i, span := range spans { + if span.Length == 0 { + if i+1 < len(spans) { + spans[i+1].Offset += span.Offset + } + continue + } + if i != iSpan { + spans[iSpan] = span + } + iSpan++ + } + spans = spans[:iSpan] + iSpan = 0 + + // Cut out empty buckets from start and end of spans, no matter + // what. Also cut out empty buckets from the middle of a span but only + // if there are more than maxEmptyBuckets consecutive empty buckets. + for iBucket < len(buckets) { + if deltaBuckets { + currentBucketAbsolute += buckets[iBucket] + } else { + currentBucketAbsolute = buckets[iBucket] + } + if nEmpty := emptyBucketsHere(); nEmpty > 0 { + if posInSpan > 0 && + nEmpty < int(spans[iSpan].Length-posInSpan) && + nEmpty <= maxEmptyBuckets { + // The empty buckets are in the middle of a + // span, and there are few enough to not bother. + // Just fast-forward. + iBucket += nEmpty + if deltaBuckets { + currentBucketAbsolute = 0 + } + posInSpan += uint32(nEmpty) + continue + } + // In all other cases, we cut out the empty buckets. + if deltaBuckets && iBucket+nEmpty < len(buckets) { + currentBucketAbsolute = -buckets[iBucket] + buckets[iBucket+nEmpty] += buckets[iBucket] + } + buckets = append(buckets[:iBucket], buckets[iBucket+nEmpty:]...) + if posInSpan == 0 { + // Start of span. + if nEmpty == int(spans[iSpan].Length) { + // The whole span is empty. + offset := spans[iSpan].Offset + spans = append(spans[:iSpan], spans[iSpan+1:]...) + if len(spans) > iSpan { + spans[iSpan].Offset += offset + int32(nEmpty) + } + continue + } + spans[iSpan].Length -= uint32(nEmpty) + spans[iSpan].Offset += int32(nEmpty) + continue + } + // It's in the middle or in the end of the span. + // Split the current span. + newSpan := Span{ + Offset: int32(nEmpty), + Length: spans[iSpan].Length - posInSpan - uint32(nEmpty), + } + spans[iSpan].Length = posInSpan + // In any case, we have to split to the next span. + iSpan++ + posInSpan = 0 + if newSpan.Length == 0 { + // The span is empty, so we were already at the end of a span. + // We don't have to insert the new span, just adjust the next + // span's offset, if there is one. + if iSpan < len(spans) { + spans[iSpan].Offset += int32(nEmpty) + } + continue + } + // Insert the new span. + spans = append(spans, Span{}) + if iSpan+1 < len(spans) { + copy(spans[iSpan+1:], spans[iSpan:]) + } + spans[iSpan] = newSpan + continue + } + iBucket++ + posInSpan++ + if posInSpan >= spans[iSpan].Length { + posInSpan = 0 + iSpan++ + } + } + if maxEmptyBuckets == 0 || len(buckets) == 0 { + return buckets, spans + } + + // Finally, check if any offsets between spans are small enough to merge + // the spans. + iBucket = int(spans[0].Length) + if deltaBuckets { + currentBucketAbsolute = 0 + for _, bucket := range buckets[:iBucket] { + currentBucketAbsolute += bucket + } + } + iSpan = 1 + for iSpan < len(spans) { + if int(spans[iSpan].Offset) > maxEmptyBuckets { + l := int(spans[iSpan].Length) + if deltaBuckets { + for _, bucket := range buckets[iBucket : iBucket+l] { + currentBucketAbsolute += bucket + } + } + iBucket += l + iSpan++ + continue + } + // Merge span with previous one and insert empty buckets. + offset := int(spans[iSpan].Offset) + spans[iSpan-1].Length += uint32(offset) + spans[iSpan].Length + spans = append(spans[:iSpan], spans[iSpan+1:]...) + newBuckets := make([]IBC, len(buckets)+offset) + copy(newBuckets, buckets[:iBucket]) + copy(newBuckets[iBucket+offset:], buckets[iBucket:]) + if deltaBuckets { + newBuckets[iBucket] = -currentBucketAbsolute + newBuckets[iBucket+offset] += currentBucketAbsolute + } + iBucket += offset + buckets = newBuckets + currentBucketAbsolute = buckets[iBucket] + // Note that with many merges, it would be more efficient to + // first record all the chunks of empty buckets to insert and + // then do it in one go through all the buckets. + } + + return buckets, spans +} + +func bucketsMatch[IBC InternalBucketCount](b1, b2 []IBC) bool { + if len(b1) != len(b2) { + return false + } + for i, b := range b1 { + if b != b2[i] { + return false + } + } + return true +} + +func getBound(idx, schema int32) float64 { + // Here a bit of context about the behavior for the last bucket counting + // regular numbers (called simply "last bucket" below) and the bucket + // counting observations of ±Inf (called "inf bucket" below, with an idx + // one higher than that of the "last bucket"): + // + // If we apply the usual formula to the last bucket, its upper bound + // would be calculated as +Inf. The reason is that the max possible + // regular float64 number (math.MaxFloat64) doesn't coincide with one of + // the calculated bucket boundaries. So the calculated boundary has to + // be larger than math.MaxFloat64, and the only float64 larger than + // math.MaxFloat64 is +Inf. However, we want to count actual + // observations of ±Inf in the inf bucket. Therefore, we have to treat + // the upper bound of the last bucket specially and set it to + // math.MaxFloat64. (The upper bound of the inf bucket, with its idx + // being one higher than that of the last bucket, naturally comes out as + // +Inf by the usual formula. So that's fine.) + // + // math.MaxFloat64 has a frac of 0.9999999999999999 and an exp of + // 1024. If there were a float64 number following math.MaxFloat64, it + // would have a frac of 1.0 and an exp of 1024, or equivalently a frac + // of 0.5 and an exp of 1025. However, since frac must be smaller than + // 1, and exp must be smaller than 1025, either representation overflows + // a float64. (Which, in turn, is the reason that math.MaxFloat64 is the + // largest possible float64. Q.E.D.) However, the formula for + // calculating the upper bound from the idx and schema of the last + // bucket results in precisely that. It is either frac=1.0 & exp=1024 + // (for schema < 0) or frac=0.5 & exp=1025 (for schema >=0). (This is, + // by the way, a power of two where the exponent itself is a power of + // two, 2¹⁰ in fact, which coinicides with a bucket boundary in all + // schemas.) So these are the special cases we have to catch below. + if schema < 0 { + exp := int(idx) << -schema + if exp == 1024 { + // This is the last bucket before the overflow bucket + // (for ±Inf observations). Return math.MaxFloat64 as + // explained above. + return math.MaxFloat64 + } + return math.Ldexp(1, exp) + } + + fracIdx := idx & ((1 << schema) - 1) + frac := exponentialBounds[schema][fracIdx] + exp := (int(idx) >> schema) + 1 + if frac == 0.5 && exp == 1025 { + // This is the last bucket before the overflow bucket (for ±Inf + // observations). Return math.MaxFloat64 as explained above. + return math.MaxFloat64 + } + return math.Ldexp(frac, exp) +} + +// exponentialBounds is a precalculated table of bucket bounds in the interval +// [0.5,1) in schema 0 to 8. +var exponentialBounds = [][]float64{ + // Schema "0": + {0.5}, + // Schema 1: + {0.5, 0.7071067811865475}, + // Schema 2: + {0.5, 0.5946035575013605, 0.7071067811865475, 0.8408964152537144}, + // Schema 3: + { + 0.5, 0.5452538663326288, 0.5946035575013605, 0.6484197773255048, + 0.7071067811865475, 0.7711054127039704, 0.8408964152537144, 0.9170040432046711, + }, + // Schema 4: + { + 0.5, 0.5221368912137069, 0.5452538663326288, 0.5693943173783458, + 0.5946035575013605, 0.620928906036742, 0.6484197773255048, 0.6771277734684463, + 0.7071067811865475, 0.7384130729697496, 0.7711054127039704, 0.805245165974627, + 0.8408964152537144, 0.8781260801866495, 0.9170040432046711, 0.9576032806985735, + }, + // Schema 5: + { + 0.5, 0.5109485743270583, 0.5221368912137069, 0.5335702003384117, + 0.5452538663326288, 0.5571933712979462, 0.5693943173783458, 0.5818624293887887, + 0.5946035575013605, 0.6076236799902344, 0.620928906036742, 0.6345254785958666, + 0.6484197773255048, 0.6626183215798706, 0.6771277734684463, 0.6919549409819159, + 0.7071067811865475, 0.7225904034885232, 0.7384130729697496, 0.7545822137967112, + 0.7711054127039704, 0.7879904225539431, 0.805245165974627, 0.8228777390769823, + 0.8408964152537144, 0.8593096490612387, 0.8781260801866495, 0.8973545375015533, + 0.9170040432046711, 0.9370838170551498, 0.9576032806985735, 0.9785720620876999, + }, + // Schema 6: + { + 0.5, 0.5054446430258502, 0.5109485743270583, 0.5165124395106142, + 0.5221368912137069, 0.5278225891802786, 0.5335702003384117, 0.5393803988785598, + 0.5452538663326288, 0.5511912916539204, 0.5571933712979462, 0.5632608093041209, + 0.5693943173783458, 0.5755946149764913, 0.5818624293887887, 0.5881984958251406, + 0.5946035575013605, 0.6010783657263515, 0.6076236799902344, 0.6142402680534349, + 0.620928906036742, 0.6276903785123455, 0.6345254785958666, 0.6414350080393891, + 0.6484197773255048, 0.6554806057623822, 0.6626183215798706, 0.6698337620266515, + 0.6771277734684463, 0.6845012114872953, 0.6919549409819159, 0.6994898362691555, + 0.7071067811865475, 0.7148066691959849, 0.7225904034885232, 0.7304588970903234, + 0.7384130729697496, 0.7464538641456323, 0.7545822137967112, 0.762799075372269, + 0.7711054127039704, 0.7795022001189185, 0.7879904225539431, 0.7965710756711334, + 0.805245165974627, 0.8140137109286738, 0.8228777390769823, 0.8318382901633681, + 0.8408964152537144, 0.8500531768592616, 0.8593096490612387, 0.8686669176368529, + 0.8781260801866495, 0.8876882462632604, 0.8973545375015533, 0.9071260877501991, + 0.9170040432046711, 0.9269895625416926, 0.9370838170551498, 0.9472879907934827, + 0.9576032806985735, 0.9680308967461471, 0.9785720620876999, 0.9892280131939752, + }, + // Schema 7: + { + 0.5, 0.5027149505564014, 0.5054446430258502, 0.5081891574554764, + 0.5109485743270583, 0.5137229745593818, 0.5165124395106142, 0.5193170509806894, + 0.5221368912137069, 0.5249720429003435, 0.5278225891802786, 0.5306886136446309, + 0.5335702003384117, 0.5364674337629877, 0.5393803988785598, 0.5423091811066545, + 0.5452538663326288, 0.5482145409081883, 0.5511912916539204, 0.5541842058618393, + 0.5571933712979462, 0.5602188762048033, 0.5632608093041209, 0.5663192597993595, + 0.5693943173783458, 0.572486072215902, 0.5755946149764913, 0.5787200368168754, + 0.5818624293887887, 0.585021884841625, 0.5881984958251406, 0.5913923554921704, + 0.5946035575013605, 0.5978321960199137, 0.6010783657263515, 0.6043421618132907, + 0.6076236799902344, 0.6109230164863786, 0.6142402680534349, 0.6175755319684665, + 0.620928906036742, 0.6243004885946023, 0.6276903785123455, 0.6310986751971253, + 0.6345254785958666, 0.637970889198196, 0.6414350080393891, 0.6449179367033329, + 0.6484197773255048, 0.6519406325959679, 0.6554806057623822, 0.659039800633032, + 0.6626183215798706, 0.6662162735415805, 0.6698337620266515, 0.6734708931164728, + 0.6771277734684463, 0.6808045103191123, 0.6845012114872953, 0.688217985377265, + 0.6919549409819159, 0.6957121878859629, 0.6994898362691555, 0.7032879969095076, + 0.7071067811865475, 0.7109463010845827, 0.7148066691959849, 0.718687998724491, + 0.7225904034885232, 0.7265139979245261, 0.7304588970903234, 0.7344252166684908, + 0.7384130729697496, 0.7424225829363761, 0.7464538641456323, 0.7505070348132126, + 0.7545822137967112, 0.7586795205991071, 0.762799075372269, 0.7669409989204777, + 0.7711054127039704, 0.7752924388424999, 0.7795022001189185, 0.7837348199827764, + 0.7879904225539431, 0.7922691326262467, 0.7965710756711334, 0.8008963778413465, + 0.805245165974627, 0.8096175675974316, 0.8140137109286738, 0.8184337248834821, + 0.8228777390769823, 0.8273458838280969, 0.8318382901633681, 0.8363550898207981, + 0.8408964152537144, 0.8454623996346523, 0.8500531768592616, 0.8546688815502312, + 0.8593096490612387, 0.8639756154809185, 0.8686669176368529, 0.8733836930995842, + 0.8781260801866495, 0.8828942179666361, 0.8876882462632604, 0.8925083056594671, + 0.8973545375015533, 0.9022270839033115, 0.9071260877501991, 0.9120516927035263, + 0.9170040432046711, 0.9219832844793128, 0.9269895625416926, 0.9320230241988943, + 0.9370838170551498, 0.9421720895161669, 0.9472879907934827, 0.9524316709088368, + 0.9576032806985735, 0.9628029718180622, 0.9680308967461471, 0.9732872087896164, + 0.9785720620876999, 0.9838856116165875, 0.9892280131939752, 0.9945994234836328, + }, + // Schema 8: + { + 0.5, 0.5013556375251013, 0.5027149505564014, 0.5040779490592088, + 0.5054446430258502, 0.5068150424757447, 0.5081891574554764, 0.509566998038869, + 0.5109485743270583, 0.5123338964485679, 0.5137229745593818, 0.5151158188430205, + 0.5165124395106142, 0.5179128468009786, 0.5193170509806894, 0.520725062344158, + 0.5221368912137069, 0.5235525479396449, 0.5249720429003435, 0.526395386502313, + 0.5278225891802786, 0.5292536613972564, 0.5306886136446309, 0.5321274564422321, + 0.5335702003384117, 0.5350168559101208, 0.5364674337629877, 0.5379219445313954, + 0.5393803988785598, 0.5408428074966075, 0.5423091811066545, 0.5437795304588847, + 0.5452538663326288, 0.5467321995364429, 0.5482145409081883, 0.549700901315111, + 0.5511912916539204, 0.5526857228508706, 0.5541842058618393, 0.5556867516724088, + 0.5571933712979462, 0.5587040757836845, 0.5602188762048033, 0.5617377836665098, + 0.5632608093041209, 0.564787964283144, 0.5663192597993595, 0.5678547070789026, + 0.5693943173783458, 0.5709381019847808, 0.572486072215902, 0.5740382394200894, + 0.5755946149764913, 0.5771552102951081, 0.5787200368168754, 0.5802891060137493, + 0.5818624293887887, 0.5834400184762408, 0.585021884841625, 0.5866080400818185, + 0.5881984958251406, 0.5897932637314379, 0.5913923554921704, 0.5929957828304968, + 0.5946035575013605, 0.5962156912915756, 0.5978321960199137, 0.5994530835371903, + 0.6010783657263515, 0.6027080545025619, 0.6043421618132907, 0.6059806996384005, + 0.6076236799902344, 0.6092711149137041, 0.6109230164863786, 0.6125793968185725, + 0.6142402680534349, 0.6159056423670379, 0.6175755319684665, 0.6192499490999082, + 0.620928906036742, 0.622612415087629, 0.6243004885946023, 0.6259931389331581, + 0.6276903785123455, 0.6293922197748583, 0.6310986751971253, 0.6328097572894031, + 0.6345254785958666, 0.6362458516947014, 0.637970889198196, 0.6397006037528346, + 0.6414350080393891, 0.6431741147730128, 0.6449179367033329, 0.6466664866145447, + 0.6484197773255048, 0.6501778216898253, 0.6519406325959679, 0.6537082229673385, + 0.6554806057623822, 0.6572577939746774, 0.659039800633032, 0.6608266388015788, + 0.6626183215798706, 0.6644148621029772, 0.6662162735415805, 0.6680225691020727, + 0.6698337620266515, 0.6716498655934177, 0.6734708931164728, 0.6752968579460171, + 0.6771277734684463, 0.6789636531064505, 0.6808045103191123, 0.6826503586020058, + 0.6845012114872953, 0.6863570825438342, 0.688217985377265, 0.690083933630119, + 0.6919549409819159, 0.6938310211492645, 0.6957121878859629, 0.6975984549830999, + 0.6994898362691555, 0.7013863456101023, 0.7032879969095076, 0.7051948041086352, + 0.7071067811865475, 0.7090239421602076, 0.7109463010845827, 0.7128738720527471, + 0.7148066691959849, 0.7167447066838943, 0.718687998724491, 0.7206365595643126, + 0.7225904034885232, 0.7245495448210174, 0.7265139979245261, 0.7284837772007218, + 0.7304588970903234, 0.7324393720732029, 0.7344252166684908, 0.7364164454346837, + 0.7384130729697496, 0.7404151139112358, 0.7424225829363761, 0.7444354947621984, + 0.7464538641456323, 0.7484777058836176, 0.7505070348132126, 0.7525418658117031, + 0.7545822137967112, 0.7566280937263048, 0.7586795205991071, 0.7607365094544071, + 0.762799075372269, 0.7648672334736434, 0.7669409989204777, 0.7690203869158282, + 0.7711054127039704, 0.7731960915705107, 0.7752924388424999, 0.7773944698885442, + 0.7795022001189185, 0.7816156449856788, 0.7837348199827764, 0.7858597406461707, + 0.7879904225539431, 0.7901268813264122, 0.7922691326262467, 0.7944171921585818, + 0.7965710756711334, 0.7987307989543135, 0.8008963778413465, 0.8030678282083853, + 0.805245165974627, 0.8074284071024302, 0.8096175675974316, 0.8118126635086642, + 0.8140137109286738, 0.8162207259936375, 0.8184337248834821, 0.820652723822003, + 0.8228777390769823, 0.8251087869603088, 0.8273458838280969, 0.8295890460808079, + 0.8318382901633681, 0.8340936325652911, 0.8363550898207981, 0.8386226785089391, + 0.8408964152537144, 0.8431763167241966, 0.8454623996346523, 0.8477546807446661, + 0.8500531768592616, 0.8523579048290255, 0.8546688815502312, 0.8569861239649629, + 0.8593096490612387, 0.8616394738731368, 0.8639756154809185, 0.8663180910111553, + 0.8686669176368529, 0.871022112577578, 0.8733836930995842, 0.8757516765159389, + 0.8781260801866495, 0.8805069215187917, 0.8828942179666361, 0.8852879870317771, + 0.8876882462632604, 0.890095013257712, 0.8925083056594671, 0.8949281411607002, + 0.8973545375015533, 0.8997875124702672, 0.9022270839033115, 0.9046732696855155, + 0.9071260877501991, 0.909585556079304, 0.9120516927035263, 0.9145245157024483, + 0.9170040432046711, 0.9194902933879467, 0.9219832844793128, 0.9244830347552253, + 0.9269895625416926, 0.92950288621441, 0.9320230241988943, 0.9345499949706191, + 0.9370838170551498, 0.93962450902828, 0.9421720895161669, 0.9447265771954693, + 0.9472879907934827, 0.9498563490882775, 0.9524316709088368, 0.9550139751351947, + 0.9576032806985735, 0.9601996065815236, 0.9628029718180622, 0.9654133954938133, + 0.9680308967461471, 0.9706554947643201, 0.9732872087896164, 0.9759260581154889, + 0.9785720620876999, 0.9812252401044634, 0.9838856116165875, 0.9865531961276168, + 0.9892280131939752, 0.9919100824251095, 0.9945994234836328, 0.9972960560854698, + }, +} diff --git a/vendor/github.com/prometheus/prometheus/model/histogram/histogram.go b/vendor/github.com/prometheus/prometheus/model/histogram/histogram.go new file mode 100644 index 000000000..6d425307c --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/model/histogram/histogram.go @@ -0,0 +1,450 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package histogram + +import ( + "fmt" + "math" + "strings" +) + +// CounterResetHint contains the known information about a counter reset, +// or alternatively that we are dealing with a gauge histogram, where counter resets do not apply. +type CounterResetHint byte + +const ( + UnknownCounterReset CounterResetHint = iota // UnknownCounterReset means we cannot say if this histogram signals a counter reset or not. + CounterReset // CounterReset means there was definitely a counter reset starting from this histogram. + NotCounterReset // NotCounterReset means there was definitely no counter reset with this histogram. + GaugeType // GaugeType means this is a gauge histogram, where counter resets do not happen. +) + +// Histogram encodes a sparse, high-resolution histogram. See the design +// document for full details: +// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit# +// +// The most tricky bit is how bucket indices represent real bucket boundaries. +// An example for schema 0 (by which each bucket is twice as wide as the +// previous bucket): +// +// Bucket boundaries → [-2,-1) [-1,-0.5) [-0.5,-0.25) ... [-0.001,0.001] ... (0.25,0.5] (0.5,1] (1,2] .... +// ↑ ↑ ↑ ↑ ↑ ↑ ↑ +// Zero bucket (width e.g. 0.001) → | | | ZB | | | +// Positive bucket indices → | | | ... -1 0 1 2 3 +// Negative bucket indices → 3 2 1 0 -1 ... +// +// Which bucket indices are actually used is determined by the spans. +type Histogram struct { + // Counter reset information. + CounterResetHint CounterResetHint + // Currently valid schema numbers are -4 <= n <= 8. They are all for + // base-2 bucket schemas, where 1 is a bucket boundary in each case, and + // then each power of two is divided into 2^n logarithmic buckets. Or + // in other words, each bucket boundary is the previous boundary times + // 2^(2^-n). + Schema int32 + // Width of the zero bucket. + ZeroThreshold float64 + // Observations falling into the zero bucket. + ZeroCount uint64 + // Total number of observations. + Count uint64 + // Sum of observations. This is also used as the stale marker. + Sum float64 + // Spans for positive and negative buckets (see Span below). + PositiveSpans, NegativeSpans []Span + // Observation counts in buckets. The first element is an absolute + // count. All following ones are deltas relative to the previous + // element. + PositiveBuckets, NegativeBuckets []int64 +} + +// A Span defines a continuous sequence of buckets. +type Span struct { + // Gap to previous span (always positive), or starting index for the 1st + // span (which can be negative). + Offset int32 + // Length of the span. + Length uint32 +} + +// Copy returns a deep copy of the Histogram. +func (h *Histogram) Copy() *Histogram { + c := *h + + if len(h.PositiveSpans) != 0 { + c.PositiveSpans = make([]Span, len(h.PositiveSpans)) + copy(c.PositiveSpans, h.PositiveSpans) + } + if len(h.NegativeSpans) != 0 { + c.NegativeSpans = make([]Span, len(h.NegativeSpans)) + copy(c.NegativeSpans, h.NegativeSpans) + } + if len(h.PositiveBuckets) != 0 { + c.PositiveBuckets = make([]int64, len(h.PositiveBuckets)) + copy(c.PositiveBuckets, h.PositiveBuckets) + } + if len(h.NegativeBuckets) != 0 { + c.NegativeBuckets = make([]int64, len(h.NegativeBuckets)) + copy(c.NegativeBuckets, h.NegativeBuckets) + } + + return &c +} + +// String returns a string representation of the Histogram. +func (h *Histogram) String() string { + var sb strings.Builder + fmt.Fprintf(&sb, "{count:%d, sum:%g", h.Count, h.Sum) + + var nBuckets []Bucket[uint64] + for it := h.NegativeBucketIterator(); it.Next(); { + bucket := it.At() + if bucket.Count != 0 { + nBuckets = append(nBuckets, it.At()) + } + } + for i := len(nBuckets) - 1; i >= 0; i-- { + fmt.Fprintf(&sb, ", %s", nBuckets[i].String()) + } + + if h.ZeroCount != 0 { + fmt.Fprintf(&sb, ", %s", h.ZeroBucket().String()) + } + + for it := h.PositiveBucketIterator(); it.Next(); { + bucket := it.At() + if bucket.Count != 0 { + fmt.Fprintf(&sb, ", %s", bucket.String()) + } + } + + sb.WriteRune('}') + return sb.String() +} + +// ZeroBucket returns the zero bucket. +func (h *Histogram) ZeroBucket() Bucket[uint64] { + return Bucket[uint64]{ + Lower: -h.ZeroThreshold, + Upper: h.ZeroThreshold, + LowerInclusive: true, + UpperInclusive: true, + Count: h.ZeroCount, + } +} + +// PositiveBucketIterator returns a BucketIterator to iterate over all positive +// buckets in ascending order (starting next to the zero bucket and going up). +func (h *Histogram) PositiveBucketIterator() BucketIterator[uint64] { + return newRegularBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true) +} + +// NegativeBucketIterator returns a BucketIterator to iterate over all negative +// buckets in descending order (starting next to the zero bucket and going down). +func (h *Histogram) NegativeBucketIterator() BucketIterator[uint64] { + return newRegularBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false) +} + +// CumulativeBucketIterator returns a BucketIterator to iterate over a +// cumulative view of the buckets. This method currently only supports +// Histograms without negative buckets and panics if the Histogram has negative +// buckets. It is currently only used for testing. +func (h *Histogram) CumulativeBucketIterator() BucketIterator[uint64] { + if len(h.NegativeBuckets) > 0 { + panic("CumulativeBucketIterator called on Histogram with negative buckets") + } + return &cumulativeBucketIterator{h: h, posSpansIdx: -1} +} + +// Equals returns true if the given histogram matches exactly. +// Exact match is when there are no new buckets (even empty) and no missing buckets, +// and all the bucket values match. Spans can have different empty length spans in between, +// but they must represent the same bucket layout to match. +func (h *Histogram) Equals(h2 *Histogram) bool { + if h2 == nil { + return false + } + + if h.Schema != h2.Schema || h.ZeroThreshold != h2.ZeroThreshold || + h.ZeroCount != h2.ZeroCount || h.Count != h2.Count || h.Sum != h2.Sum { + return false + } + + if !spansMatch(h.PositiveSpans, h2.PositiveSpans) { + return false + } + if !spansMatch(h.NegativeSpans, h2.NegativeSpans) { + return false + } + + if !bucketsMatch(h.PositiveBuckets, h2.PositiveBuckets) { + return false + } + if !bucketsMatch(h.NegativeBuckets, h2.NegativeBuckets) { + return false + } + + return true +} + +// spansMatch returns true if both spans represent the same bucket layout +// after combining zero length spans with the next non-zero length span. +func spansMatch(s1, s2 []Span) bool { + if len(s1) == 0 && len(s2) == 0 { + return true + } + + s1idx, s2idx := 0, 0 + for { + if s1idx >= len(s1) { + return allEmptySpans(s2[s2idx:]) + } + if s2idx >= len(s2) { + return allEmptySpans(s1[s1idx:]) + } + + currS1, currS2 := s1[s1idx], s2[s2idx] + s1idx++ + s2idx++ + if currS1.Length == 0 { + // This span is zero length, so we add consecutive such spans + // until we find a non-zero span. + for ; s1idx < len(s1) && s1[s1idx].Length == 0; s1idx++ { + currS1.Offset += s1[s1idx].Offset + } + if s1idx < len(s1) { + currS1.Offset += s1[s1idx].Offset + currS1.Length = s1[s1idx].Length + s1idx++ + } + } + if currS2.Length == 0 { + // This span is zero length, so we add consecutive such spans + // until we find a non-zero span. + for ; s2idx < len(s2) && s2[s2idx].Length == 0; s2idx++ { + currS2.Offset += s2[s2idx].Offset + } + if s2idx < len(s2) { + currS2.Offset += s2[s2idx].Offset + currS2.Length = s2[s2idx].Length + s2idx++ + } + } + + if currS1.Length == 0 && currS2.Length == 0 { + // The last spans of both set are zero length. Previous spans match. + return true + } + + if currS1.Offset != currS2.Offset || currS1.Length != currS2.Length { + return false + } + } +} + +func allEmptySpans(s []Span) bool { + for _, ss := range s { + if ss.Length > 0 { + return false + } + } + return true +} + +// Compact works like FloatHistogram.Compact. See there for detailed +// explanations. +func (h *Histogram) Compact(maxEmptyBuckets int) *Histogram { + h.PositiveBuckets, h.PositiveSpans = compactBuckets( + h.PositiveBuckets, h.PositiveSpans, maxEmptyBuckets, true, + ) + h.NegativeBuckets, h.NegativeSpans = compactBuckets( + h.NegativeBuckets, h.NegativeSpans, maxEmptyBuckets, true, + ) + return h +} + +// ToFloat returns a FloatHistogram representation of the Histogram. It is a +// deep copy (e.g. spans are not shared). +func (h *Histogram) ToFloat() *FloatHistogram { + var ( + positiveSpans, negativeSpans []Span + positiveBuckets, negativeBuckets []float64 + ) + if len(h.PositiveSpans) != 0 { + positiveSpans = make([]Span, len(h.PositiveSpans)) + copy(positiveSpans, h.PositiveSpans) + } + if len(h.NegativeSpans) != 0 { + negativeSpans = make([]Span, len(h.NegativeSpans)) + copy(negativeSpans, h.NegativeSpans) + } + if len(h.PositiveBuckets) != 0 { + positiveBuckets = make([]float64, len(h.PositiveBuckets)) + var current float64 + for i, b := range h.PositiveBuckets { + current += float64(b) + positiveBuckets[i] = current + } + } + if len(h.NegativeBuckets) != 0 { + negativeBuckets = make([]float64, len(h.NegativeBuckets)) + var current float64 + for i, b := range h.NegativeBuckets { + current += float64(b) + negativeBuckets[i] = current + } + } + + return &FloatHistogram{ + CounterResetHint: h.CounterResetHint, + Schema: h.Schema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: float64(h.ZeroCount), + Count: float64(h.Count), + Sum: h.Sum, + PositiveSpans: positiveSpans, + NegativeSpans: negativeSpans, + PositiveBuckets: positiveBuckets, + NegativeBuckets: negativeBuckets, + } +} + +type regularBucketIterator struct { + baseBucketIterator[uint64, int64] +} + +func newRegularBucketIterator(spans []Span, buckets []int64, schema int32, positive bool) *regularBucketIterator { + i := baseBucketIterator[uint64, int64]{ + schema: schema, + spans: spans, + buckets: buckets, + positive: positive, + } + return ®ularBucketIterator{i} +} + +func (r *regularBucketIterator) Next() bool { + if r.spansIdx >= len(r.spans) { + return false + } + span := r.spans[r.spansIdx] + // Seed currIdx for the first bucket. + if r.bucketsIdx == 0 { + r.currIdx = span.Offset + } else { + r.currIdx++ + } + for r.idxInSpan >= span.Length { + // We have exhausted the current span and have to find a new + // one. We'll even handle pathologic spans of length 0. + r.idxInSpan = 0 + r.spansIdx++ + if r.spansIdx >= len(r.spans) { + return false + } + span = r.spans[r.spansIdx] + r.currIdx += span.Offset + } + + r.currCount += r.buckets[r.bucketsIdx] + r.idxInSpan++ + r.bucketsIdx++ + return true +} + +type cumulativeBucketIterator struct { + h *Histogram + + posSpansIdx int // Index in h.PositiveSpans we are in. -1 means 0 bucket. + posBucketsIdx int // Index in h.PositiveBuckets. + idxInSpan uint32 // Index in the current span. 0 <= idxInSpan < span.Length. + + initialized bool + currIdx int32 // The actual bucket index after decoding from spans. + currUpper float64 // The upper boundary of the current bucket. + currCount int64 // Current non-cumulative count for the current bucket. Does not apply for empty bucket. + currCumulativeCount uint64 // Current "cumulative" count for the current bucket. + + // Between 2 spans there could be some empty buckets which + // still needs to be counted for cumulative buckets. + // When we hit the end of a span, we use this to iterate + // through the empty buckets. + emptyBucketCount int32 +} + +func (c *cumulativeBucketIterator) Next() bool { + if c.posSpansIdx == -1 { + // Zero bucket. + c.posSpansIdx++ + if c.h.ZeroCount == 0 { + return c.Next() + } + + c.currUpper = c.h.ZeroThreshold + c.currCount = int64(c.h.ZeroCount) + c.currCumulativeCount = uint64(c.currCount) + return true + } + + if c.posSpansIdx >= len(c.h.PositiveSpans) { + return false + } + + if c.emptyBucketCount > 0 { + // We are traversing through empty buckets at the moment. + c.currUpper = getBound(c.currIdx, c.h.Schema) + c.currIdx++ + c.emptyBucketCount-- + return true + } + + span := c.h.PositiveSpans[c.posSpansIdx] + if c.posSpansIdx == 0 && !c.initialized { + // Initializing. + c.currIdx = span.Offset + // The first bucket is an absolute value and not a delta with Zero bucket. + c.currCount = 0 + c.initialized = true + } + + c.currCount += c.h.PositiveBuckets[c.posBucketsIdx] + c.currCumulativeCount += uint64(c.currCount) + c.currUpper = getBound(c.currIdx, c.h.Schema) + + c.posBucketsIdx++ + c.idxInSpan++ + c.currIdx++ + if c.idxInSpan >= span.Length { + // Move to the next span. This one is done. + c.posSpansIdx++ + c.idxInSpan = 0 + if c.posSpansIdx < len(c.h.PositiveSpans) { + c.emptyBucketCount = c.h.PositiveSpans[c.posSpansIdx].Offset + } + } + + return true +} + +func (c *cumulativeBucketIterator) At() Bucket[uint64] { + return Bucket[uint64]{ + Upper: c.currUpper, + Lower: math.Inf(-1), + UpperInclusive: true, + LowerInclusive: true, + Count: c.currCumulativeCount, + Index: c.currIdx - 1, + } +} diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels.go b/vendor/github.com/prometheus/prometheus/model/labels/labels.go index 48237bdc0..8fc401564 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/labels.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/labels.go @@ -11,15 +11,18 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !stringlabels + package labels import ( "bytes" "encoding/json" - "sort" "strconv" "github.com/cespare/xxhash/v2" + "github.com/prometheus/common/model" + "golang.org/x/exp/slices" ) // Well-known label names used by Prometheus components. @@ -134,6 +137,7 @@ func (ls Labels) MatchLabels(on bool, names ...string) Labels { } // Hash returns a hash value for the label set. +// Note: the result is not guaranteed to be consistent across different runs of Prometheus. func (ls Labels) Hash() uint64 { // Use xxhash.Sum64(b) for fast path as it's faster. b := make([]byte, 0, 1024) @@ -311,6 +315,19 @@ func (ls Labels) WithoutEmpty() Labels { return ls } +// IsValid checks if the metric name or label names are valid. +func (ls Labels) IsValid() bool { + for _, l := range ls { + if l.Name == model.MetricNameLabel && !model.IsValidMetricName(model.LabelValue(l.Value)) { + return false + } + if !model.LabelName(l.Name).IsValid() || !model.LabelValue(l.Value).IsValid() { + return false + } + } + return true +} + // Equal returns whether the two label sets are equal. func Equal(ls, o Labels) bool { if len(ls) != len(o) { @@ -342,10 +359,8 @@ func EmptyLabels() Labels { // The caller has to guarantee that all label names are unique. func New(ls ...Label) Labels { set := make(Labels, 0, len(ls)) - for _, l := range ls { - set = append(set, l) - } - sort.Sort(set) + set = append(set, ls...) + slices.SortFunc(set, func(a, b Label) bool { return a.Name < b.Name }) return set } @@ -369,7 +384,7 @@ func FromStrings(ss ...string) Labels { res = append(res, Label{Name: ss[i], Value: ss[i+1]}) } - sort.Sort(res) + slices.SortFunc(res, func(a, b Label) bool { return a.Name < b.Name }) return res } @@ -399,6 +414,49 @@ func Compare(a, b Labels) int { return len(a) - len(b) } +// Copy labels from b on top of whatever was in ls previously, reusing memory or expanding if needed. +func (ls *Labels) CopyFrom(b Labels) { + (*ls) = append((*ls)[:0], b...) +} + +// IsEmpty returns true if ls represents an empty set of labels. +func (ls Labels) IsEmpty() bool { + return len(ls) == 0 +} + +// Range calls f on each label. +func (ls Labels) Range(f func(l Label)) { + for _, l := range ls { + f(l) + } +} + +// Validate calls f on each label. If f returns a non-nil error, then it returns that error cancelling the iteration. +func (ls Labels) Validate(f func(l Label) error) error { + for _, l := range ls { + if err := f(l); err != nil { + return err + } + } + return nil +} + +// InternStrings calls intern on every string value inside ls, replacing them with what it returns. +func (ls *Labels) InternStrings(intern func(string) string) { + for i, l := range *ls { + (*ls)[i].Name = intern(l.Name) + (*ls)[i].Value = intern(l.Value) + } +} + +// ReleaseStrings calls release on every string value inside ls. +func (ls Labels) ReleaseStrings(release func(string)) { + for _, l := range ls { + release(l.Name) + release(l.Value) + } +} + // Builder allows modifying Labels. type Builder struct { base Labels @@ -455,7 +513,7 @@ Outer: return b } -// Set the name/value pair as a label. +// Set the name/value pair as a label. A value of "" means delete that label. func (b *Builder) Set(n, v string) *Builder { if v == "" { // Empty labels are the same as missing labels. @@ -472,6 +530,46 @@ func (b *Builder) Set(n, v string) *Builder { return b } +func (b *Builder) Get(n string) string { + for _, d := range b.del { + if d == n { + return "" + } + } + for _, a := range b.add { + if a.Name == n { + return a.Value + } + } + return b.base.Get(n) +} + +// Range calls f on each label in the Builder. +func (b *Builder) Range(f func(l Label)) { + // Stack-based arrays to avoid heap allocation in most cases. + var addStack [128]Label + var delStack [128]string + // Take a copy of add and del, so they are unaffected by calls to Set() or Del(). + origAdd, origDel := append(addStack[:0], b.add...), append(delStack[:0], b.del...) + b.base.Range(func(l Label) { + if !slices.Contains(origDel, l.Name) && !contains(origAdd, l.Name) { + f(l) + } + }) + for _, a := range origAdd { + f(a) + } +} + +func contains(s []Label, n string) bool { + for _, a := range s { + if a.Name == n { + return true + } + } + return false +} + // Labels returns the labels from the builder, adding them to res if non-nil. // Argument res can be the same as b.base, if caller wants to overwrite that slice. // If no modifications were made, the original labels are returned. @@ -487,26 +585,55 @@ func (b *Builder) Labels(res Labels) Labels { } else { res = res[:0] } -Outer: // Justification that res can be the same slice as base: in this loop // we move forward through base, and either skip an element or assign // it to res at its current position or an earlier position. for _, l := range b.base { - for _, n := range b.del { - if l.Name == n { - continue Outer - } - } - for _, la := range b.add { - if l.Name == la.Name { - continue Outer - } + if slices.Contains(b.del, l.Name) || contains(b.add, l.Name) { + continue } res = append(res, l) } if len(b.add) > 0 { // Base is already in order, so we only need to sort if we add to it. res = append(res, b.add...) - sort.Sort(res) + slices.SortFunc(res, func(a, b Label) bool { return a.Name < b.Name }) } return res } + +// ScratchBuilder allows efficient construction of a Labels from scratch. +type ScratchBuilder struct { + add Labels +} + +// NewScratchBuilder creates a ScratchBuilder initialized for Labels with n entries. +func NewScratchBuilder(n int) ScratchBuilder { + return ScratchBuilder{add: make([]Label, 0, n)} +} + +func (b *ScratchBuilder) Reset() { + b.add = b.add[:0] +} + +// Add a name/value pair. +// Note if you Add the same name twice you will get a duplicate label, which is invalid. +func (b *ScratchBuilder) Add(name, value string) { + b.add = append(b.add, Label{Name: name, Value: value}) +} + +// Sort the labels added so far by name. +func (b *ScratchBuilder) Sort() { + slices.SortFunc(b.add, func(a, b Label) bool { return a.Name < b.Name }) +} + +// Asssign is for when you already have a Labels which you want this ScratchBuilder to return. +func (b *ScratchBuilder) Assign(ls Labels) { + b.add = append(b.add[:0], ls...) // Copy on top of our slice, so we don't retain the input slice. +} + +// Return the name/value pairs added so far as a Labels object. +// Note: if you want them sorted, call Sort() first. +func (b *ScratchBuilder) Labels() Labels { + // Copy the slice, so the next use of ScratchBuilder doesn't overwrite. + return append([]Label{}, b.add...) +} diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels_string.go b/vendor/github.com/prometheus/prometheus/model/labels/labels_string.go new file mode 100644 index 000000000..560b643db --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/model/labels/labels_string.go @@ -0,0 +1,825 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build stringlabels + +package labels + +import ( + "bytes" + "encoding/json" + "reflect" + "strconv" + "unsafe" + + "github.com/cespare/xxhash/v2" + "github.com/prometheus/common/model" + "golang.org/x/exp/slices" +) + +// Well-known label names used by Prometheus components. +const ( + MetricName = "__name__" + AlertName = "alertname" + BucketLabel = "le" + InstanceName = "instance" +) + +var seps = []byte{'\xff'} + +// Label is a key/value pair of strings. +type Label struct { + Name, Value string +} + +// Labels is implemented by a single flat string holding name/value pairs. +// Each name and value is preceded by its length in varint encoding. +// Names are in order. +type Labels struct { + data string +} + +type labelSlice []Label + +func (ls labelSlice) Len() int { return len(ls) } +func (ls labelSlice) Swap(i, j int) { ls[i], ls[j] = ls[j], ls[i] } +func (ls labelSlice) Less(i, j int) bool { return ls[i].Name < ls[j].Name } + +func decodeSize(data string, index int) (int, int) { + var size int + for shift := uint(0); ; shift += 7 { + // Just panic if we go of the end of data, since all Labels strings are constructed internally and + // malformed data indicates a bug, or memory corruption. + b := data[index] + index++ + size |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + return size, index +} + +func decodeString(data string, index int) (string, int) { + var size int + size, index = decodeSize(data, index) + return data[index : index+size], index + size +} + +func (ls Labels) String() string { + var b bytes.Buffer + + b.WriteByte('{') + for i := 0; i < len(ls.data); { + if i > 0 { + b.WriteByte(',') + b.WriteByte(' ') + } + var name, value string + name, i = decodeString(ls.data, i) + value, i = decodeString(ls.data, i) + b.WriteString(name) + b.WriteByte('=') + b.WriteString(strconv.Quote(value)) + } + b.WriteByte('}') + return b.String() +} + +// Bytes returns ls as a byte slice. +// It uses non-printing characters and so should not be used for printing. +func (ls Labels) Bytes(buf []byte) []byte { + if cap(buf) < len(ls.data) { + buf = make([]byte, len(ls.data)) + } else { + buf = buf[:len(ls.data)] + } + copy(buf, ls.data) + return buf +} + +// MarshalJSON implements json.Marshaler. +func (ls Labels) MarshalJSON() ([]byte, error) { + return json.Marshal(ls.Map()) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (ls *Labels) UnmarshalJSON(b []byte) error { + var m map[string]string + + if err := json.Unmarshal(b, &m); err != nil { + return err + } + + *ls = FromMap(m) + return nil +} + +// MarshalYAML implements yaml.Marshaler. +func (ls Labels) MarshalYAML() (interface{}, error) { + return ls.Map(), nil +} + +// IsZero implements yaml.IsZeroer - if we don't have this then 'omitempty' fields are always omitted. +func (ls Labels) IsZero() bool { + return len(ls.data) == 0 +} + +// UnmarshalYAML implements yaml.Unmarshaler. +func (ls *Labels) UnmarshalYAML(unmarshal func(interface{}) error) error { + var m map[string]string + + if err := unmarshal(&m); err != nil { + return err + } + + *ls = FromMap(m) + return nil +} + +// MatchLabels returns a subset of Labels that matches/does not match with the provided label names based on the 'on' boolean. +// If on is set to true, it returns the subset of labels that match with the provided label names and its inverse when 'on' is set to false. +// TODO: This is only used in printing an error message +func (ls Labels) MatchLabels(on bool, names ...string) Labels { + b := NewBuilder(ls) + if on { + b.Keep(names...) + } else { + b.Del(MetricName) + b.Del(names...) + } + return b.Labels(EmptyLabels()) +} + +// Hash returns a hash value for the label set. +// Note: the result is not guaranteed to be consistent across different runs of Prometheus. +func (ls Labels) Hash() uint64 { + return xxhash.Sum64(yoloBytes(ls.data)) +} + +// HashForLabels returns a hash value for the labels matching the provided names. +// 'names' have to be sorted in ascending order. +func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) { + b = b[:0] + j := 0 + for i := 0; i < len(ls.data); { + var name, value string + name, i = decodeString(ls.data, i) + value, i = decodeString(ls.data, i) + for j < len(names) && names[j] < name { + j++ + } + if j == len(names) { + break + } + if name == names[j] { + b = append(b, name...) + b = append(b, seps[0]) + b = append(b, value...) + b = append(b, seps[0]) + } + } + + return xxhash.Sum64(b), b +} + +// HashWithoutLabels returns a hash value for all labels except those matching +// the provided names. +// 'names' have to be sorted in ascending order. +func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) { + b = b[:0] + j := 0 + for i := 0; i < len(ls.data); { + var name, value string + name, i = decodeString(ls.data, i) + value, i = decodeString(ls.data, i) + for j < len(names) && names[j] < name { + j++ + } + if name == MetricName || (j < len(names) && name == names[j]) { + continue + } + b = append(b, name...) + b = append(b, seps[0]) + b = append(b, value...) + b = append(b, seps[0]) + } + return xxhash.Sum64(b), b +} + +// BytesWithLabels is just as Bytes(), but only for labels matching names. +// 'names' have to be sorted in ascending order. +func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte { + b := buf[:0] + j := 0 + for pos := 0; pos < len(ls.data); { + lName, newPos := decodeString(ls.data, pos) + _, newPos = decodeString(ls.data, newPos) + for j < len(names) && names[j] < lName { + j++ + } + if j == len(names) { + break + } + if lName == names[j] { + b = append(b, ls.data[pos:newPos]...) + } + pos = newPos + } + return b +} + +// BytesWithoutLabels is just as Bytes(), but only for labels not matching names. +// 'names' have to be sorted in ascending order. +func (ls Labels) BytesWithoutLabels(buf []byte, names ...string) []byte { + b := buf[:0] + j := 0 + for pos := 0; pos < len(ls.data); { + lName, newPos := decodeString(ls.data, pos) + _, newPos = decodeString(ls.data, newPos) + for j < len(names) && names[j] < lName { + j++ + } + if j == len(names) || lName != names[j] { + b = append(b, ls.data[pos:newPos]...) + } + pos = newPos + } + return b +} + +// Copy returns a copy of the labels. +func (ls Labels) Copy() Labels { + buf := append([]byte{}, ls.data...) + return Labels{data: yoloString(buf)} +} + +// Get returns the value for the label with the given name. +// Returns an empty string if the label doesn't exist. +func (ls Labels) Get(name string) string { + for i := 0; i < len(ls.data); { + var lName, lValue string + lName, i = decodeString(ls.data, i) + lValue, i = decodeString(ls.data, i) + if lName == name { + return lValue + } + } + return "" +} + +// Has returns true if the label with the given name is present. +func (ls Labels) Has(name string) bool { + for i := 0; i < len(ls.data); { + var lName string + lName, i = decodeString(ls.data, i) + _, i = decodeString(ls.data, i) + if lName == name { + return true + } + } + return false +} + +// HasDuplicateLabelNames returns whether ls has duplicate label names. +// It assumes that the labelset is sorted. +func (ls Labels) HasDuplicateLabelNames() (string, bool) { + var lName, prevName string + for i := 0; i < len(ls.data); { + lName, i = decodeString(ls.data, i) + _, i = decodeString(ls.data, i) + if lName == prevName { + return lName, true + } + prevName = lName + } + return "", false +} + +// WithoutEmpty returns the labelset without empty labels. +// May return the same labelset. +func (ls Labels) WithoutEmpty() Labels { + for pos := 0; pos < len(ls.data); { + _, newPos := decodeString(ls.data, pos) + lValue, newPos := decodeString(ls.data, newPos) + if lValue != "" { + pos = newPos + continue + } + // Do not copy the slice until it's necessary. + // TODO: could optimise the case where all blanks are at the end. + // Note: we size the new buffer on the assumption there is exactly one blank value. + buf := make([]byte, pos, pos+(len(ls.data)-newPos)) + copy(buf, ls.data[:pos]) // copy the initial non-blank labels + pos = newPos // move past the first blank value + for pos < len(ls.data) { + var newPos int + _, newPos = decodeString(ls.data, pos) + lValue, newPos = decodeString(ls.data, newPos) + if lValue != "" { + buf = append(buf, ls.data[pos:newPos]...) + } + pos = newPos + } + return Labels{data: yoloString(buf)} + } + return ls +} + +// IsValid checks if the metric name or label names are valid. +func (ls Labels) IsValid() bool { + err := ls.Validate(func(l Label) error { + if l.Name == model.MetricNameLabel && !model.IsValidMetricName(model.LabelValue(l.Value)) { + return strconv.ErrSyntax + } + if !model.LabelName(l.Name).IsValid() || !model.LabelValue(l.Value).IsValid() { + return strconv.ErrSyntax + } + return nil + }) + return err == nil +} + +// Equal returns whether the two label sets are equal. +func Equal(ls, o Labels) bool { + return ls.data == o.data +} + +// Map returns a string map of the labels. +func (ls Labels) Map() map[string]string { + m := make(map[string]string, len(ls.data)/10) + for i := 0; i < len(ls.data); { + var lName, lValue string + lName, i = decodeString(ls.data, i) + lValue, i = decodeString(ls.data, i) + m[lName] = lValue + } + return m +} + +// EmptyLabels returns an empty Labels value, for convenience. +func EmptyLabels() Labels { + return Labels{} +} + +func yoloString(b []byte) string { + return *((*string)(unsafe.Pointer(&b))) +} + +func yoloBytes(s string) (b []byte) { + *(*string)(unsafe.Pointer(&b)) = s + (*reflect.SliceHeader)(unsafe.Pointer(&b)).Cap = len(s) + return +} + +// New returns a sorted Labels from the given labels. +// The caller has to guarantee that all label names are unique. +func New(ls ...Label) Labels { + slices.SortFunc(ls, func(a, b Label) bool { return a.Name < b.Name }) + size := labelsSize(ls) + buf := make([]byte, size) + marshalLabelsToSizedBuffer(ls, buf) + return Labels{data: yoloString(buf)} +} + +// FromMap returns new sorted Labels from the given map. +func FromMap(m map[string]string) Labels { + l := make([]Label, 0, len(m)) + for k, v := range m { + l = append(l, Label{Name: k, Value: v}) + } + return New(l...) +} + +// FromStrings creates new labels from pairs of strings. +func FromStrings(ss ...string) Labels { + if len(ss)%2 != 0 { + panic("invalid number of strings") + } + ls := make([]Label, 0, len(ss)/2) + for i := 0; i < len(ss); i += 2 { + ls = append(ls, Label{Name: ss[i], Value: ss[i+1]}) + } + + return New(ls...) +} + +// Compare compares the two label sets. +// The result will be 0 if a==b, <0 if a < b, and >0 if a > b. +// TODO: replace with Less function - Compare is never needed. +// TODO: just compare the underlying strings when we don't need alphanumeric sorting. +func Compare(a, b Labels) int { + l := len(a.data) + if len(b.data) < l { + l = len(b.data) + } + + ia, ib := 0, 0 + for ia < l { + var aName, bName string + aName, ia = decodeString(a.data, ia) + bName, ib = decodeString(b.data, ib) + if aName != bName { + if aName < bName { + return -1 + } + return 1 + } + var aValue, bValue string + aValue, ia = decodeString(a.data, ia) + bValue, ib = decodeString(b.data, ib) + if aValue != bValue { + if aValue < bValue { + return -1 + } + return 1 + } + } + // If all labels so far were in common, the set with fewer labels comes first. + return len(a.data) - len(b.data) +} + +// Copy labels from b on top of whatever was in ls previously, reusing memory or expanding if needed. +func (ls *Labels) CopyFrom(b Labels) { + ls.data = b.data // strings are immutable +} + +// IsEmpty returns true if ls represents an empty set of labels. +func (ls Labels) IsEmpty() bool { + return len(ls.data) == 0 +} + +// Len returns the number of labels; it is relatively slow. +func (ls Labels) Len() int { + count := 0 + for i := 0; i < len(ls.data); { + var size int + size, i = decodeSize(ls.data, i) + i += size + size, i = decodeSize(ls.data, i) + i += size + count++ + } + return count +} + +// Range calls f on each label. +func (ls Labels) Range(f func(l Label)) { + for i := 0; i < len(ls.data); { + var lName, lValue string + lName, i = decodeString(ls.data, i) + lValue, i = decodeString(ls.data, i) + f(Label{Name: lName, Value: lValue}) + } +} + +// Validate calls f on each label. If f returns a non-nil error, then it returns that error cancelling the iteration. +func (ls Labels) Validate(f func(l Label) error) error { + for i := 0; i < len(ls.data); { + var lName, lValue string + lName, i = decodeString(ls.data, i) + lValue, i = decodeString(ls.data, i) + err := f(Label{Name: lName, Value: lValue}) + if err != nil { + return err + } + } + return nil +} + +// InternStrings calls intern on every string value inside ls, replacing them with what it returns. +func (ls *Labels) InternStrings(intern func(string) string) { + ls.data = intern(ls.data) +} + +// ReleaseStrings calls release on every string value inside ls. +func (ls Labels) ReleaseStrings(release func(string)) { + release(ls.data) +} + +// Builder allows modifying Labels. +type Builder struct { + base Labels + del []string + add []Label +} + +// NewBuilder returns a new LabelsBuilder. +func NewBuilder(base Labels) *Builder { + b := &Builder{ + del: make([]string, 0, 5), + add: make([]Label, 0, 5), + } + b.Reset(base) + return b +} + +// Reset clears all current state for the builder. +func (b *Builder) Reset(base Labels) { + b.base = base + b.del = b.del[:0] + b.add = b.add[:0] + for i := 0; i < len(base.data); { + var lName, lValue string + lName, i = decodeString(base.data, i) + lValue, i = decodeString(base.data, i) + if lValue == "" { + b.del = append(b.del, lName) + } + } +} + +// Del deletes the label of the given name. +func (b *Builder) Del(ns ...string) *Builder { + for _, n := range ns { + for i, a := range b.add { + if a.Name == n { + b.add = append(b.add[:i], b.add[i+1:]...) + } + } + b.del = append(b.del, n) + } + return b +} + +// Keep removes all labels from the base except those with the given names. +func (b *Builder) Keep(ns ...string) *Builder { +Outer: + for i := 0; i < len(b.base.data); { + var lName string + lName, i = decodeString(b.base.data, i) + _, i = decodeString(b.base.data, i) + for _, n := range ns { + if lName == n { + continue Outer + } + } + b.del = append(b.del, lName) + } + return b +} + +// Set the name/value pair as a label. A value of "" means delete that label. +func (b *Builder) Set(n, v string) *Builder { + if v == "" { + // Empty labels are the same as missing labels. + return b.Del(n) + } + for i, a := range b.add { + if a.Name == n { + b.add[i].Value = v + return b + } + } + b.add = append(b.add, Label{Name: n, Value: v}) + + return b +} + +func (b *Builder) Get(n string) string { + if slices.Contains(b.del, n) { + return "" + } + for _, a := range b.add { + if a.Name == n { + return a.Value + } + } + return b.base.Get(n) +} + +// Range calls f on each label in the Builder. +func (b *Builder) Range(f func(l Label)) { + // Stack-based arrays to avoid heap allocation in most cases. + var addStack [128]Label + var delStack [128]string + // Take a copy of add and del, so they are unaffected by calls to Set() or Del(). + origAdd, origDel := append(addStack[:0], b.add...), append(delStack[:0], b.del...) + b.base.Range(func(l Label) { + if !slices.Contains(origDel, l.Name) && !contains(origAdd, l.Name) { + f(l) + } + }) + for _, a := range origAdd { + f(a) + } +} + +func contains(s []Label, n string) bool { + for _, a := range s { + if a.Name == n { + return true + } + } + return false +} + +// Labels returns the labels from the builder, adding them to res if non-nil. +// Argument res can be the same as b.base, if caller wants to overwrite that slice. +// If no modifications were made, the original labels are returned. +func (b *Builder) Labels(res Labels) Labels { + if len(b.del) == 0 && len(b.add) == 0 { + return b.base + } + + slices.SortFunc(b.add, func(a, b Label) bool { return a.Name < b.Name }) + slices.Sort(b.del) + a, d := 0, 0 + + bufSize := len(b.base.data) + labelsSize(b.add) + buf := make([]byte, 0, bufSize) // TODO: see if we can re-use the buffer from res. + for pos := 0; pos < len(b.base.data); { + oldPos := pos + var lName string + lName, pos = decodeString(b.base.data, pos) + _, pos = decodeString(b.base.data, pos) + for d < len(b.del) && b.del[d] < lName { + d++ + } + if d < len(b.del) && b.del[d] == lName { + continue // This label has been deleted. + } + for ; a < len(b.add) && b.add[a].Name < lName; a++ { + buf = appendLabelTo(buf, &b.add[a]) // Insert label that was not in the base set. + } + if a < len(b.add) && b.add[a].Name == lName { + buf = appendLabelTo(buf, &b.add[a]) + a++ + continue // This label has been replaced. + } + buf = append(buf, b.base.data[oldPos:pos]...) + } + // We have come to the end of the base set; add any remaining labels. + for ; a < len(b.add); a++ { + buf = appendLabelTo(buf, &b.add[a]) + } + return Labels{data: yoloString(buf)} +} + +func marshalLabelsToSizedBuffer(lbls []Label, data []byte) int { + i := len(data) + for index := len(lbls) - 1; index >= 0; index-- { + size := marshalLabelToSizedBuffer(&lbls[index], data[:i]) + i -= size + } + return len(data) - i +} + +func marshalLabelToSizedBuffer(m *Label, data []byte) int { + i := len(data) + i -= len(m.Value) + copy(data[i:], m.Value) + i = encodeSize(data, i, len(m.Value)) + i -= len(m.Name) + copy(data[i:], m.Name) + i = encodeSize(data, i, len(m.Name)) + return len(data) - i +} + +func sizeVarint(x uint64) (n int) { + // Most common case first + if x < 1<<7 { + return 1 + } + if x >= 1<<56 { + return 9 + } + if x >= 1<<28 { + x >>= 28 + n = 4 + } + if x >= 1<<14 { + x >>= 14 + n += 2 + } + if x >= 1<<7 { + n++ + } + return n + 1 +} + +func encodeVarint(data []byte, offset int, v uint64) int { + offset -= sizeVarint(v) + base := offset + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return base +} + +// Special code for the common case that a size is less than 128 +func encodeSize(data []byte, offset, v int) int { + if v < 1<<7 { + offset-- + data[offset] = uint8(v) + return offset + } + return encodeVarint(data, offset, uint64(v)) +} + +func labelsSize(lbls []Label) (n int) { + // we just encode name/value/name/value, without any extra tags or length bytes + for _, e := range lbls { + n += labelSize(&e) + } + return n +} + +func labelSize(m *Label) (n int) { + // strings are encoded as length followed by contents. + l := len(m.Name) + n += l + sizeVarint(uint64(l)) + l = len(m.Value) + n += l + sizeVarint(uint64(l)) + return n +} + +func appendLabelTo(buf []byte, m *Label) []byte { + size := labelSize(m) + sizeRequired := len(buf) + size + if cap(buf) >= sizeRequired { + buf = buf[:sizeRequired] + } else { + bufSize := cap(buf) + // Double size of buffer each time it needs to grow, to amortise copying cost. + for bufSize < sizeRequired { + bufSize = bufSize*2 + 1 + } + newBuf := make([]byte, sizeRequired, bufSize) + copy(newBuf, buf) + buf = newBuf + } + marshalLabelToSizedBuffer(m, buf) + return buf +} + +// ScratchBuilder allows efficient construction of a Labels from scratch. +type ScratchBuilder struct { + add []Label + output Labels + overwriteBuffer []byte +} + +// NewScratchBuilder creates a ScratchBuilder initialized for Labels with n entries. +func NewScratchBuilder(n int) ScratchBuilder { + return ScratchBuilder{add: make([]Label, 0, n)} +} + +func (b *ScratchBuilder) Reset() { + b.add = b.add[:0] + b.output = EmptyLabels() +} + +// Add a name/value pair. +// Note if you Add the same name twice you will get a duplicate label, which is invalid. +func (b *ScratchBuilder) Add(name, value string) { + b.add = append(b.add, Label{Name: name, Value: value}) +} + +// Sort the labels added so far by name. +func (b *ScratchBuilder) Sort() { + slices.SortFunc(b.add, func(a, b Label) bool { return a.Name < b.Name }) +} + +// Asssign is for when you already have a Labels which you want this ScratchBuilder to return. +func (b *ScratchBuilder) Assign(l Labels) { + b.output = l +} + +// Labels returns the name/value pairs added as a Labels object. Calling Add() after Labels() has no effect. +// Note: if you want them sorted, call Sort() first. +func (b *ScratchBuilder) Labels() Labels { + if b.output.IsEmpty() { + size := labelsSize(b.add) + buf := make([]byte, size) + marshalLabelsToSizedBuffer(b.add, buf) + b.output = Labels{data: yoloString(buf)} + } + return b.output +} + +// Write the newly-built Labels out to ls, reusing an internal buffer. +// Callers must ensure that there are no other references to ls. +func (b *ScratchBuilder) Overwrite(ls *Labels) { + size := labelsSize(b.add) + if size <= cap(b.overwriteBuffer) { + b.overwriteBuffer = b.overwriteBuffer[:size] + } else { + b.overwriteBuffer = make([]byte, size) + } + marshalLabelsToSizedBuffer(b.add, b.overwriteBuffer) + ls.data = yoloString(b.overwriteBuffer) +} diff --git a/vendor/github.com/prometheus/prometheus/model/labels/test_utils.go b/vendor/github.com/prometheus/prometheus/model/labels/test_utils.go index a683588d1..05b816882 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/test_utils.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/test_utils.go @@ -17,7 +17,6 @@ import ( "bufio" "fmt" "os" - "sort" "strings" ) @@ -51,13 +50,14 @@ func ReadLabels(fn string, n int) ([]Labels, error) { defer f.Close() scanner := bufio.NewScanner(f) + b := ScratchBuilder{} var mets []Labels hashes := map[uint64]struct{}{} i := 0 for scanner.Scan() && i < n { - m := make(Labels, 0, 10) + b.Reset() r := strings.NewReplacer("\"", "", "{", "", "}", "") s := r.Replace(scanner.Text()) @@ -65,10 +65,11 @@ func ReadLabels(fn string, n int) ([]Labels, error) { labelChunks := strings.Split(s, ",") for _, labelChunk := range labelChunks { split := strings.Split(labelChunk, ":") - m = append(m, Label{Name: split[0], Value: split[1]}) + b.Add(split[0], split[1]) } // Order of the k/v labels matters, don't assume we'll always receive them already sorted. - sort.Sort(m) + b.Sort() + m := b.Labels() h := m.Hash() if _, ok := hashes[h]; ok { diff --git a/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go b/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go index e0d7f6ddf..5ef79b4a7 100644 --- a/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go +++ b/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go @@ -15,6 +15,7 @@ package relabel import ( "crypto/md5" + "encoding/binary" "fmt" "strings" @@ -45,6 +46,10 @@ const ( Keep Action = "keep" // Drop drops targets for which the input does match the regex. Drop Action = "drop" + // KeepEqual drops targets for which the input does not match the target. + KeepEqual Action = "keepequal" + // Drop drops targets for which the input does match the target. + DropEqual Action = "dropequal" // HashMod sets a label to the modulus of a hash of labels. HashMod Action = "hashmod" // LabelMap copies labels to other labelnames based on a regex. @@ -66,7 +71,7 @@ func (a *Action) UnmarshalYAML(unmarshal func(interface{}) error) error { return err } switch act := Action(strings.ToLower(s)); act { - case Replace, Keep, Drop, HashMod, LabelMap, LabelDrop, LabelKeep, Lowercase, Uppercase: + case Replace, Keep, Drop, HashMod, LabelMap, LabelDrop, LabelKeep, Lowercase, Uppercase, KeepEqual, DropEqual: *a = act return nil } @@ -109,13 +114,13 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { if c.Modulus == 0 && c.Action == HashMod { return fmt.Errorf("relabel configuration for hashmod requires non-zero modulus") } - if (c.Action == Replace || c.Action == HashMod || c.Action == Lowercase || c.Action == Uppercase) && c.TargetLabel == "" { + if (c.Action == Replace || c.Action == HashMod || c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && c.TargetLabel == "" { return fmt.Errorf("relabel configuration for %s action requires 'target_label' value", c.Action) } - if (c.Action == Replace || c.Action == Lowercase || c.Action == Uppercase) && !relabelTarget.MatchString(c.TargetLabel) { + if (c.Action == Replace || c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && !relabelTarget.MatchString(c.TargetLabel) { return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action) } - if (c.Action == Lowercase || c.Action == Uppercase) && c.Replacement != DefaultRelabelConfig.Replacement { + if (c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && c.Replacement != DefaultRelabelConfig.Replacement { return fmt.Errorf("'replacement' can not be set for %s action", c.Action) } if c.Action == LabelMap && !relabelTarget.MatchString(c.Replacement) { @@ -125,6 +130,15 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action) } + if c.Action == DropEqual || c.Action == KeepEqual { + if c.Regex != DefaultRelabelConfig.Regex || + c.Modulus != DefaultRelabelConfig.Modulus || + c.Separator != DefaultRelabelConfig.Separator || + c.Replacement != DefaultRelabelConfig.Replacement { + return fmt.Errorf("%s action requires only 'source_labels' and `target_label`, and no other fields", c.Action) + } + } + if c.Action == LabelDrop || c.Action == LabelKeep { if c.SourceLabels != nil || c.TargetLabel != DefaultRelabelConfig.TargetLabel || @@ -190,40 +204,55 @@ func (re Regexp) String() string { // Process returns a relabeled copy of the given label set. The relabel configurations // are applied in order of input. -// If a label set is dropped, nil is returned. +// If a label set is dropped, EmptyLabels and false is returned. // May return the input labelSet modified. -func Process(lbls labels.Labels, cfgs ...*Config) labels.Labels { - lb := labels.NewBuilder(nil) +func Process(lbls labels.Labels, cfgs ...*Config) (ret labels.Labels, keep bool) { + lb := labels.NewBuilder(lbls) + if !ProcessBuilder(lb, cfgs...) { + return labels.EmptyLabels(), false + } + return lb.Labels(lbls), true +} + +// ProcessBuilder is like Process, but the caller passes a labels.Builder +// containing the initial set of labels, which is mutated by the rules. +func ProcessBuilder(lb *labels.Builder, cfgs ...*Config) (keep bool) { for _, cfg := range cfgs { - lbls = relabel(lbls, cfg, lb) - if lbls == nil { - return nil + keep = relabel(cfg, lb) + if !keep { + return false } } - return lbls + return true } -func relabel(lset labels.Labels, cfg *Config, lb *labels.Builder) labels.Labels { +func relabel(cfg *Config, lb *labels.Builder) (keep bool) { var va [16]string values := va[:0] if len(cfg.SourceLabels) > cap(values) { values = make([]string, 0, len(cfg.SourceLabels)) } for _, ln := range cfg.SourceLabels { - values = append(values, lset.Get(string(ln))) + values = append(values, lb.Get(string(ln))) } val := strings.Join(values, cfg.Separator) - lb.Reset(lset) - switch cfg.Action { case Drop: if cfg.Regex.MatchString(val) { - return nil + return false } case Keep: if !cfg.Regex.MatchString(val) { - return nil + return false + } + case DropEqual: + if lb.Get(cfg.TargetLabel) == val { + return false + } + case KeepEqual: + if lb.Get(cfg.TargetLabel) != val { + return false } case Replace: indexes := cfg.Regex.FindStringSubmatchIndex(val) @@ -247,42 +276,32 @@ func relabel(lset labels.Labels, cfg *Config, lb *labels.Builder) labels.Labels case Uppercase: lb.Set(cfg.TargetLabel, strings.ToUpper(val)) case HashMod: - mod := sum64(md5.Sum([]byte(val))) % cfg.Modulus + hash := md5.Sum([]byte(val)) + // Use only the last 8 bytes of the hash to give the same result as earlier versions of this code. + mod := binary.BigEndian.Uint64(hash[8:]) % cfg.Modulus lb.Set(cfg.TargetLabel, fmt.Sprintf("%d", mod)) case LabelMap: - for _, l := range lset { + lb.Range(func(l labels.Label) { if cfg.Regex.MatchString(l.Name) { res := cfg.Regex.ReplaceAllString(l.Name, cfg.Replacement) lb.Set(res, l.Value) } - } + }) case LabelDrop: - for _, l := range lset { + lb.Range(func(l labels.Label) { if cfg.Regex.MatchString(l.Name) { lb.Del(l.Name) } - } + }) case LabelKeep: - for _, l := range lset { + lb.Range(func(l labels.Label) { if !cfg.Regex.MatchString(l.Name) { lb.Del(l.Name) } - } + }) default: panic(fmt.Errorf("relabel: unknown relabel action type %q", cfg.Action)) } - return lb.Labels(lset) -} - -// sum64 sums the md5 hash to an uint64. -func sum64(hash [md5.Size]byte) uint64 { - var s uint64 - - for i, b := range hash { - shift := uint64((md5.Size - i - 1) * 8) - - s |= uint64(b) << shift - } - return s + return true } diff --git a/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go b/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go index f1d5f3925..30b3face0 100644 --- a/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go +++ b/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go @@ -143,22 +143,24 @@ type RuleGroup struct { // Rule describes an alerting or recording rule. type Rule struct { - Record string `yaml:"record,omitempty"` - Alert string `yaml:"alert,omitempty"` - Expr string `yaml:"expr"` - For model.Duration `yaml:"for,omitempty"` - Labels map[string]string `yaml:"labels,omitempty"` - Annotations map[string]string `yaml:"annotations,omitempty"` + Record string `yaml:"record,omitempty"` + Alert string `yaml:"alert,omitempty"` + Expr string `yaml:"expr"` + For model.Duration `yaml:"for,omitempty"` + KeepFiringFor model.Duration `yaml:"keep_firing_for,omitempty"` + Labels map[string]string `yaml:"labels,omitempty"` + Annotations map[string]string `yaml:"annotations,omitempty"` } // RuleNode adds yaml.v3 layer to support line and column outputs for invalid rules. type RuleNode struct { - Record yaml.Node `yaml:"record,omitempty"` - Alert yaml.Node `yaml:"alert,omitempty"` - Expr yaml.Node `yaml:"expr"` - For model.Duration `yaml:"for,omitempty"` - Labels map[string]string `yaml:"labels,omitempty"` - Annotations map[string]string `yaml:"annotations,omitempty"` + Record yaml.Node `yaml:"record,omitempty"` + Alert yaml.Node `yaml:"alert,omitempty"` + Expr yaml.Node `yaml:"expr"` + For model.Duration `yaml:"for,omitempty"` + KeepFiringFor model.Duration `yaml:"keep_firing_for,omitempty"` + Labels map[string]string `yaml:"labels,omitempty"` + Annotations map[string]string `yaml:"annotations,omitempty"` } // Validate the rule and return a list of encountered errors. @@ -208,6 +210,12 @@ func (r *RuleNode) Validate() (nodes []WrappedError) { node: &r.Record, }) } + if r.KeepFiringFor != 0 { + nodes = append(nodes, WrappedError{ + err: fmt.Errorf("invalid field 'keep_firing_for' in recording rule"), + node: &r.Record, + }) + } if !model.IsValidMetricName(model.LabelValue(r.Record.Value)) { nodes = append(nodes, WrappedError{ err: fmt.Errorf("invalid recording rule name: %s", r.Record.Value), diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/interface.go b/vendor/github.com/prometheus/prometheus/model/textparse/interface.go index 9300e2a40..9efd942e8 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/interface.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/interface.go @@ -17,16 +17,23 @@ import ( "mime" "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" ) // Parser parses samples from a byte slice of samples in the official // Prometheus and OpenMetrics text exposition formats. type Parser interface { - // Series returns the bytes of the series, the timestamp if set, and the value - // of the current sample. + // Series returns the bytes of a series with a simple float64 as a + // value, the timestamp if set, and the value of the current sample. Series() ([]byte, *int64, float64) + // Histogram returns the bytes of a series with a sparse histogram as a + // value, the timestamp if set, and the histogram in the current sample. + // Depending on the parsed input, the function returns an (integer) Histogram + // or a FloatHistogram, with the respective other return value being nil. + Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) + // Help returns the metric name and help text in the current entry. // Must only be called after Next returned a help entry. // The returned byte slices become invalid after the next call to Next. @@ -70,22 +77,30 @@ func New(b []byte, contentType string) (Parser, error) { } mediaType, _, err := mime.ParseMediaType(contentType) - if err == nil && mediaType == "application/openmetrics-text" { + if err != nil { + return NewPromParser(b), err + } + switch mediaType { + case "application/openmetrics-text": return NewOpenMetricsParser(b), nil + case "application/vnd.google.protobuf": + return NewProtobufParser(b), nil + default: + return NewPromParser(b), nil } - return NewPromParser(b), err } // Entry represents the type of a parsed entry. type Entry int const ( - EntryInvalid Entry = -1 - EntryType Entry = 0 - EntryHelp Entry = 1 - EntrySeries Entry = 2 - EntryComment Entry = 3 - EntryUnit Entry = 4 + EntryInvalid Entry = -1 + EntryType Entry = 0 + EntryHelp Entry = 1 + EntrySeries Entry = 2 // A series with a simple float64 as value. + EntryComment Entry = 3 + EntryUnit Entry = 4 + EntryHistogram Entry = 5 // A series with a sparse histogram as a value. ) // MetricType represents metric type values. diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go index be1ffcd09..c17d40020 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go @@ -17,22 +17,19 @@ package textparse import ( - "bytes" "errors" "fmt" "io" "math" - "sort" "strings" "unicode/utf8" "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/value" ) -var allowedSuffixes = [][]byte{[]byte("_total"), []byte("_bucket")} - type openMetricsLexer struct { b []byte i int @@ -46,13 +43,6 @@ func (l *openMetricsLexer) buf() []byte { return l.b[l.start:l.i] } -func (l *openMetricsLexer) cur() byte { - if l.i < len(l.b) { - return l.b[l.i] - } - return byte(' ') -} - // next advances the openMetricsLexer to the next character. func (l *openMetricsLexer) next() byte { l.i++ @@ -81,6 +71,7 @@ func (l *openMetricsLexer) Error(es string) { // This is based on the working draft https://docs.google.com/document/u/1/d/1KwV0mAXwwbvvifBvDKH_LU1YjyXE_wxCkHNoCGq1GX0/edit type OpenMetricsParser struct { l *openMetricsLexer + builder labels.ScratchBuilder series []byte text []byte mtype MetricType @@ -112,6 +103,12 @@ func (p *OpenMetricsParser) Series() ([]byte, *int64, float64) { return p.series, nil, p.val } +// Histogram returns (nil, nil, nil, nil) for now because OpenMetrics does not +// support sparse histograms yet. +func (p *OpenMetricsParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) { + return nil, nil, nil, nil +} + // Help returns the metric name and help text in the current entry. // Must only be called after Next returned a help entry. // The returned byte slices become invalid after the next call to Next. @@ -151,14 +148,11 @@ func (p *OpenMetricsParser) Comment() []byte { // Metric writes the labels of the current sample into the passed labels. // It returns the string from which the metric was parsed. func (p *OpenMetricsParser) Metric(l *labels.Labels) string { - // Allocate the full immutable string immediately, so we just - // have to create references on it below. + // Copy the buffer to a string: this is only necessary for the return value. s := string(p.series) - *l = append(*l, labels.Label{ - Name: labels.MetricName, - Value: s[:p.offsets[0]-p.start], - }) + p.builder.Reset() + p.builder.Add(labels.MetricName, s[:p.offsets[0]-p.start]) for i := 1; i < len(p.offsets); i += 4 { a := p.offsets[i] - p.start @@ -166,16 +160,16 @@ func (p *OpenMetricsParser) Metric(l *labels.Labels) string { c := p.offsets[i+2] - p.start d := p.offsets[i+3] - p.start + value := s[c:d] // Replacer causes allocations. Replace only when necessary. if strings.IndexByte(s[c:d], byte('\\')) >= 0 { - *l = append(*l, labels.Label{Name: s[a:b], Value: lvalReplacer.Replace(s[c:d])}) - continue + value = lvalReplacer.Replace(value) } - *l = append(*l, labels.Label{Name: s[a:b], Value: s[c:d]}) + p.builder.Add(s[a:b], value) } - // Sort labels. - sort.Sort(*l) + p.builder.Sort() + *l = p.builder.Labels() return s } @@ -197,17 +191,18 @@ func (p *OpenMetricsParser) Exemplar(e *exemplar.Exemplar) bool { e.Ts = p.exemplarTs } + p.builder.Reset() for i := 0; i < len(p.eOffsets); i += 4 { a := p.eOffsets[i] - p.start b := p.eOffsets[i+1] - p.start c := p.eOffsets[i+2] - p.start d := p.eOffsets[i+3] - p.start - e.Labels = append(e.Labels, labels.Label{Name: s[a:b], Value: s[c:d]}) + p.builder.Add(s[a:b], s[c:d]) } - // Sort the labels. - sort.Sort(e.Labels) + p.builder.Sort() + e.Labels = p.builder.Labels() return true } @@ -218,6 +213,14 @@ func (p *OpenMetricsParser) nextToken() token { return tok } +func (p *OpenMetricsParser) parseError(exp string, got token) error { + e := p.l.i + 1 + if len(p.l.b) < e { + e = len(p.l.b) + } + return fmt.Errorf("%s, got %q (%q) while parsing: %q", exp, p.l.b[p.l.start:e], got, p.l.b[p.start:e]) +} + // Next advances the parser to the next sample. It returns false if no // more samples were read or an error occurred. func (p *OpenMetricsParser) Next() (Entry, error) { @@ -243,7 +246,7 @@ func (p *OpenMetricsParser) Next() (Entry, error) { case tMName: p.offsets = append(p.offsets, p.l.start, p.l.i) default: - return EntryInvalid, parseError("expected metric name after "+t.String(), t2) + return EntryInvalid, p.parseError("expected metric name after "+t.String(), t2) } switch t2 := p.nextToken(); t2 { case tText: @@ -279,7 +282,7 @@ func (p *OpenMetricsParser) Next() (Entry, error) { } case tHelp: if !utf8.Valid(p.text) { - return EntryInvalid, errors.New("help text is not a valid utf8 string") + return EntryInvalid, fmt.Errorf("help text %q is not a valid utf8 string", p.text) } } switch t { @@ -292,7 +295,7 @@ func (p *OpenMetricsParser) Next() (Entry, error) { u := yoloString(p.text) if len(u) > 0 { if !strings.HasSuffix(m, u) || len(m) < len(u)+1 || p.l.b[p.offsets[1]-len(u)-1] != '_' { - return EntryInvalid, fmt.Errorf("unit not a suffix of metric %q", m) + return EntryInvalid, fmt.Errorf("unit %q not a suffix of metric %q", u, m) } } return EntryUnit, nil @@ -331,10 +334,10 @@ func (p *OpenMetricsParser) Next() (Entry, error) { var ts float64 // A float is enough to hold what we need for millisecond resolution. if ts, err = parseFloat(yoloString(p.l.buf()[1:])); err != nil { - return EntryInvalid, err + return EntryInvalid, fmt.Errorf("%v while parsing: %q", err, p.l.b[p.start:p.l.i]) } if math.IsNaN(ts) || math.IsInf(ts, 0) { - return EntryInvalid, errors.New("invalid timestamp") + return EntryInvalid, fmt.Errorf("invalid timestamp %f", ts) } p.ts = int64(ts * 1000) switch t3 := p.nextToken(); t3 { @@ -344,26 +347,20 @@ func (p *OpenMetricsParser) Next() (Entry, error) { return EntryInvalid, err } default: - return EntryInvalid, parseError("expected next entry after timestamp", t3) + return EntryInvalid, p.parseError("expected next entry after timestamp", t3) } default: - return EntryInvalid, parseError("expected timestamp or # symbol", t2) + return EntryInvalid, p.parseError("expected timestamp or # symbol", t2) } return EntrySeries, nil default: - err = fmt.Errorf("%q %q is not a valid start token", t, string(p.l.cur())) + err = p.parseError("expected a valid start token", t) } return EntryInvalid, err } func (p *OpenMetricsParser) parseComment() error { - // Validate the name of the metric. It must have _total or _bucket as - // suffix for exemplars to be supported. - if err := p.validateNameForExemplar(p.series[:p.offsets[0]-p.start]); err != nil { - return err - } - var err error // Parse the labels. p.eOffsets, err = p.parseLVals(p.eOffsets) @@ -390,19 +387,19 @@ func (p *OpenMetricsParser) parseComment() error { var ts float64 // A float is enough to hold what we need for millisecond resolution. if ts, err = parseFloat(yoloString(p.l.buf()[1:])); err != nil { - return err + return fmt.Errorf("%v while parsing: %q", err, p.l.b[p.start:p.l.i]) } if math.IsNaN(ts) || math.IsInf(ts, 0) { - return errors.New("invalid exemplar timestamp") + return fmt.Errorf("invalid exemplar timestamp %f", ts) } p.exemplarTs = int64(ts * 1000) switch t3 := p.nextToken(); t3 { case tLinebreak: default: - return parseError("expected next entry after exemplar timestamp", t3) + return p.parseError("expected next entry after exemplar timestamp", t3) } default: - return parseError("expected timestamp or comment", t2) + return p.parseError("expected timestamp or comment", t2) } return nil } @@ -416,21 +413,21 @@ func (p *OpenMetricsParser) parseLVals(offsets []int) ([]int, error) { return offsets, nil case tComma: if first { - return nil, parseError("expected label name or left brace", t) + return nil, p.parseError("expected label name or left brace", t) } t = p.nextToken() if t != tLName { - return nil, parseError("expected label name", t) + return nil, p.parseError("expected label name", t) } case tLName: if !first { - return nil, parseError("expected comma", t) + return nil, p.parseError("expected comma", t) } default: if first { - return nil, parseError("expected label name or left brace", t) + return nil, p.parseError("expected label name or left brace", t) } - return nil, parseError("expected comma or left brace", t) + return nil, p.parseError("expected comma or left brace", t) } first = false @@ -439,13 +436,13 @@ func (p *OpenMetricsParser) parseLVals(offsets []int) ([]int, error) { offsets = append(offsets, p.l.start, p.l.i) if t := p.nextToken(); t != tEqual { - return nil, parseError("expected equal", t) + return nil, p.parseError("expected equal", t) } if t := p.nextToken(); t != tLValue { - return nil, parseError("expected label value", t) + return nil, p.parseError("expected label value", t) } if !utf8.Valid(p.l.buf()) { - return nil, errors.New("invalid UTF-8 label value") + return nil, fmt.Errorf("invalid UTF-8 label value: %q", p.l.buf()) } // The openMetricsLexer ensures the value string is quoted. Strip first @@ -456,11 +453,11 @@ func (p *OpenMetricsParser) parseLVals(offsets []int) ([]int, error) { func (p *OpenMetricsParser) getFloatValue(t token, after string) (float64, error) { if t != tValue { - return 0, parseError(fmt.Sprintf("expected value after %v", after), t) + return 0, p.parseError(fmt.Sprintf("expected value after %v", after), t) } val, err := parseFloat(yoloString(p.l.buf()[1:])) if err != nil { - return 0, err + return 0, fmt.Errorf("%v while parsing: %q", err, p.l.b[p.start:p.l.i]) } // Ensure canonical NaN value. if math.IsNaN(p.exemplarVal) { @@ -468,12 +465,3 @@ func (p *OpenMetricsParser) getFloatValue(t token, after string) (float64, error } return val, nil } - -func (p *OpenMetricsParser) validateNameForExemplar(name []byte) error { - for _, suffix := range allowedSuffixes { - if bytes.HasSuffix(name, suffix) { - return nil - } - } - return fmt.Errorf("metric name %v does not support exemplars", string(name)) -} diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go index 758ea5a66..2c981f050 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go @@ -21,13 +21,13 @@ import ( "fmt" "io" "math" - "sort" "strconv" "strings" "unicode/utf8" "unsafe" "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/value" ) @@ -143,6 +143,7 @@ func (l *promlexer) Error(es string) { // Prometheus text exposition format. type PromParser struct { l *promlexer + builder labels.ScratchBuilder series []byte text []byte mtype MetricType @@ -167,6 +168,12 @@ func (p *PromParser) Series() ([]byte, *int64, float64) { return p.series, nil, p.val } +// Histogram returns (nil, nil, nil, nil) for now because the Prometheus text +// format does not support sparse histograms yet. +func (p *PromParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) { + return nil, nil, nil, nil +} + // Help returns the metric name and help text in the current entry. // Must only be called after Next returned a help entry. // The returned byte slices become invalid after the next call to Next. @@ -205,14 +212,11 @@ func (p *PromParser) Comment() []byte { // Metric writes the labels of the current sample into the passed labels. // It returns the string from which the metric was parsed. func (p *PromParser) Metric(l *labels.Labels) string { - // Allocate the full immutable string immediately, so we just - // have to create references on it below. + // Copy the buffer to a string: this is only necessary for the return value. s := string(p.series) - *l = append(*l, labels.Label{ - Name: labels.MetricName, - Value: s[:p.offsets[0]-p.start], - }) + p.builder.Reset() + p.builder.Add(labels.MetricName, s[:p.offsets[0]-p.start]) for i := 1; i < len(p.offsets); i += 4 { a := p.offsets[i] - p.start @@ -220,16 +224,16 @@ func (p *PromParser) Metric(l *labels.Labels) string { c := p.offsets[i+2] - p.start d := p.offsets[i+3] - p.start + value := s[c:d] // Replacer causes allocations. Replace only when necessary. if strings.IndexByte(s[c:d], byte('\\')) >= 0 { - *l = append(*l, labels.Label{Name: s[a:b], Value: lvalReplacer.Replace(s[c:d])}) - continue + value = lvalReplacer.Replace(value) } - *l = append(*l, labels.Label{Name: s[a:b], Value: s[c:d]}) + p.builder.Add(s[a:b], value) } - // Sort labels to maintain the sorted labels invariant. - sort.Sort(*l) + p.builder.Sort() + *l = p.builder.Labels() return s } @@ -250,8 +254,12 @@ func (p *PromParser) nextToken() token { } } -func parseError(exp string, got token) error { - return fmt.Errorf("%s, got %q", exp, got) +func (p *PromParser) parseError(exp string, got token) error { + e := p.l.i + 1 + if len(p.l.b) < e { + e = len(p.l.b) + } + return fmt.Errorf("%s, got %q (%q) while parsing: %q", exp, p.l.b[p.l.start:e], got, p.l.b[p.start:e]) } // Next advances the parser to the next sample. It returns false if no @@ -274,7 +282,7 @@ func (p *PromParser) Next() (Entry, error) { case tMName: p.offsets = append(p.offsets, p.l.start, p.l.i) default: - return EntryInvalid, parseError("expected metric name after "+t.String(), t2) + return EntryInvalid, p.parseError("expected metric name after "+t.String(), t2) } switch t2 := p.nextToken(); t2 { case tText: @@ -304,11 +312,11 @@ func (p *PromParser) Next() (Entry, error) { } case tHelp: if !utf8.Valid(p.text) { - return EntryInvalid, fmt.Errorf("help text is not a valid utf8 string") + return EntryInvalid, fmt.Errorf("help text %q is not a valid utf8 string", p.text) } } if t := p.nextToken(); t != tLinebreak { - return EntryInvalid, parseError("linebreak expected after metadata", t) + return EntryInvalid, p.parseError("linebreak expected after metadata", t) } switch t { case tHelp: @@ -319,7 +327,7 @@ func (p *PromParser) Next() (Entry, error) { case tComment: p.text = p.l.buf() if t := p.nextToken(); t != tLinebreak { - return EntryInvalid, parseError("linebreak expected after comment", t) + return EntryInvalid, p.parseError("linebreak expected after comment", t) } return EntryComment, nil @@ -336,34 +344,34 @@ func (p *PromParser) Next() (Entry, error) { t2 = p.nextToken() } if t2 != tValue { - return EntryInvalid, parseError("expected value after metric", t) + return EntryInvalid, p.parseError("expected value after metric", t2) } if p.val, err = parseFloat(yoloString(p.l.buf())); err != nil { - return EntryInvalid, err + return EntryInvalid, fmt.Errorf("%v while parsing: %q", err, p.l.b[p.start:p.l.i]) } // Ensure canonical NaN value. if math.IsNaN(p.val) { p.val = math.Float64frombits(value.NormalNaN) } p.hasTS = false - switch p.nextToken() { + switch t := p.nextToken(); t { case tLinebreak: break case tTimestamp: p.hasTS = true if p.ts, err = strconv.ParseInt(yoloString(p.l.buf()), 10, 64); err != nil { - return EntryInvalid, err + return EntryInvalid, fmt.Errorf("%v while parsing: %q", err, p.l.b[p.start:p.l.i]) } if t2 := p.nextToken(); t2 != tLinebreak { - return EntryInvalid, parseError("expected next entry after timestamp", t) + return EntryInvalid, p.parseError("expected next entry after timestamp", t2) } default: - return EntryInvalid, parseError("expected timestamp or new record", t) + return EntryInvalid, p.parseError("expected timestamp or new record", t) } return EntrySeries, nil default: - err = fmt.Errorf("%q is not a valid start token", t) + err = p.parseError("expected a valid start token", t) } return EntryInvalid, err } @@ -376,18 +384,18 @@ func (p *PromParser) parseLVals() error { return nil case tLName: default: - return parseError("expected label name", t) + return p.parseError("expected label name", t) } p.offsets = append(p.offsets, p.l.start, p.l.i) if t := p.nextToken(); t != tEqual { - return parseError("expected equal", t) + return p.parseError("expected equal", t) } if t := p.nextToken(); t != tLValue { - return parseError("expected label value", t) + return p.parseError("expected label value", t) } if !utf8.Valid(p.l.buf()) { - return fmt.Errorf("invalid UTF-8 label value") + return fmt.Errorf("invalid UTF-8 label value: %q", p.l.buf()) } // The promlexer ensures the value string is quoted. Strip first diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go new file mode 100644 index 000000000..eca145955 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go @@ -0,0 +1,538 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package textparse + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "math" + "strings" + "unicode/utf8" + + "github.com/gogo/protobuf/proto" + "github.com/pkg/errors" + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" + + dto "github.com/prometheus/prometheus/prompb/io/prometheus/client" +) + +// ProtobufParser is a very inefficient way of unmarshaling the old Prometheus +// protobuf format and then present it as it if were parsed by a +// Prometheus-2-style text parser. This is only done so that we can easily plug +// in the protobuf format into Prometheus 2. For future use (with the final +// format that will be used for native histograms), we have to revisit the +// parsing. A lot of the efficiency tricks of the Prometheus-2-style parsing +// could be used in a similar fashion (byte-slice pointers into the raw +// payload), which requires some hand-coded protobuf handling. But the current +// parsers all expect the full series name (metric name plus label pairs) as one +// string, which is not how things are represented in the protobuf format. If +// the re-arrangement work is actually causing problems (which has to be seen), +// that expectation needs to be changed. +type ProtobufParser struct { + in []byte // The intput to parse. + inPos int // Position within the input. + metricPos int // Position within Metric slice. + // fieldPos is the position within a Summary or (legacy) Histogram. -2 + // is the count. -1 is the sum. Otherwise it is the index within + // quantiles/buckets. + fieldPos int + fieldsDone bool // true if no more fields of a Summary or (legacy) Histogram to be processed. + // state is marked by the entry we are processing. EntryInvalid implies + // that we have to decode the next MetricFamily. + state Entry + + builder labels.ScratchBuilder // held here to reduce allocations when building Labels + + mf *dto.MetricFamily + + // The following are just shenanigans to satisfy the Parser interface. + metricBytes *bytes.Buffer // A somewhat fluid representation of the current metric. +} + +// NewProtobufParser returns a parser for the payload in the byte slice. +func NewProtobufParser(b []byte) Parser { + return &ProtobufParser{ + in: b, + state: EntryInvalid, + mf: &dto.MetricFamily{}, + metricBytes: &bytes.Buffer{}, + } +} + +// Series returns the bytes of a series with a simple float64 as a +// value, the timestamp if set, and the value of the current sample. +func (p *ProtobufParser) Series() ([]byte, *int64, float64) { + var ( + m = p.mf.GetMetric()[p.metricPos] + ts = m.GetTimestampMs() + v float64 + ) + switch p.mf.GetType() { + case dto.MetricType_COUNTER: + v = m.GetCounter().GetValue() + case dto.MetricType_GAUGE: + v = m.GetGauge().GetValue() + case dto.MetricType_UNTYPED: + v = m.GetUntyped().GetValue() + case dto.MetricType_SUMMARY: + s := m.GetSummary() + switch p.fieldPos { + case -2: + v = float64(s.GetSampleCount()) + case -1: + v = s.GetSampleSum() + // Need to detect a summaries without quantile here. + if len(s.GetQuantile()) == 0 { + p.fieldsDone = true + } + default: + v = s.GetQuantile()[p.fieldPos].GetValue() + } + case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: + // This should only happen for a legacy histogram. + h := m.GetHistogram() + switch p.fieldPos { + case -2: + v = float64(h.GetSampleCount()) + case -1: + v = h.GetSampleSum() + default: + bb := h.GetBucket() + if p.fieldPos >= len(bb) { + v = float64(h.GetSampleCount()) + } else { + v = float64(bb[p.fieldPos].GetCumulativeCount()) + } + } + default: + panic("encountered unexpected metric type, this is a bug") + } + if ts != 0 { + return p.metricBytes.Bytes(), &ts, v + } + // Nasty hack: Assume that ts==0 means no timestamp. That's not true in + // general, but proto3 has no distinction between unset and + // default. Need to avoid in the final format. + return p.metricBytes.Bytes(), nil, v +} + +// Histogram returns the bytes of a series with a native histogram as a value, +// the timestamp if set, and the native histogram in the current sample. +// +// The Compact method is called before returning the Histogram (or FloatHistogram). +// +// If the SampleCountFloat or the ZeroCountFloat in the proto message is > 0, +// the histogram is parsed and returned as a FloatHistogram and nil is returned +// as the (integer) Histogram return value. Otherwise, it is parsed and returned +// as an (integer) Histogram and nil is returned as the FloatHistogram return +// value. +func (p *ProtobufParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) { + var ( + m = p.mf.GetMetric()[p.metricPos] + ts = m.GetTimestampMs() + h = m.GetHistogram() + ) + if h.GetSampleCountFloat() > 0 || h.GetZeroCountFloat() > 0 { + // It is a float histogram. + fh := histogram.FloatHistogram{ + Count: h.GetSampleCountFloat(), + Sum: h.GetSampleSum(), + ZeroThreshold: h.GetZeroThreshold(), + ZeroCount: h.GetZeroCountFloat(), + Schema: h.GetSchema(), + PositiveSpans: make([]histogram.Span, len(h.GetPositiveSpan())), + PositiveBuckets: h.GetPositiveCount(), + NegativeSpans: make([]histogram.Span, len(h.GetNegativeSpan())), + NegativeBuckets: h.GetNegativeCount(), + } + for i, span := range h.GetPositiveSpan() { + fh.PositiveSpans[i].Offset = span.GetOffset() + fh.PositiveSpans[i].Length = span.GetLength() + } + for i, span := range h.GetNegativeSpan() { + fh.NegativeSpans[i].Offset = span.GetOffset() + fh.NegativeSpans[i].Length = span.GetLength() + } + if p.mf.GetType() == dto.MetricType_GAUGE_HISTOGRAM { + fh.CounterResetHint = histogram.GaugeType + } + fh.Compact(0) + if ts != 0 { + return p.metricBytes.Bytes(), &ts, nil, &fh + } + // Nasty hack: Assume that ts==0 means no timestamp. That's not true in + // general, but proto3 has no distinction between unset and + // default. Need to avoid in the final format. + return p.metricBytes.Bytes(), nil, nil, &fh + } + + sh := histogram.Histogram{ + Count: h.GetSampleCount(), + Sum: h.GetSampleSum(), + ZeroThreshold: h.GetZeroThreshold(), + ZeroCount: h.GetZeroCount(), + Schema: h.GetSchema(), + PositiveSpans: make([]histogram.Span, len(h.GetPositiveSpan())), + PositiveBuckets: h.GetPositiveDelta(), + NegativeSpans: make([]histogram.Span, len(h.GetNegativeSpan())), + NegativeBuckets: h.GetNegativeDelta(), + } + for i, span := range h.GetPositiveSpan() { + sh.PositiveSpans[i].Offset = span.GetOffset() + sh.PositiveSpans[i].Length = span.GetLength() + } + for i, span := range h.GetNegativeSpan() { + sh.NegativeSpans[i].Offset = span.GetOffset() + sh.NegativeSpans[i].Length = span.GetLength() + } + if p.mf.GetType() == dto.MetricType_GAUGE_HISTOGRAM { + sh.CounterResetHint = histogram.GaugeType + } + sh.Compact(0) + if ts != 0 { + return p.metricBytes.Bytes(), &ts, &sh, nil + } + return p.metricBytes.Bytes(), nil, &sh, nil +} + +// Help returns the metric name and help text in the current entry. +// Must only be called after Next returned a help entry. +// The returned byte slices become invalid after the next call to Next. +func (p *ProtobufParser) Help() ([]byte, []byte) { + return p.metricBytes.Bytes(), []byte(p.mf.GetHelp()) +} + +// Type returns the metric name and type in the current entry. +// Must only be called after Next returned a type entry. +// The returned byte slices become invalid after the next call to Next. +func (p *ProtobufParser) Type() ([]byte, MetricType) { + n := p.metricBytes.Bytes() + switch p.mf.GetType() { + case dto.MetricType_COUNTER: + return n, MetricTypeCounter + case dto.MetricType_GAUGE: + return n, MetricTypeGauge + case dto.MetricType_HISTOGRAM: + return n, MetricTypeHistogram + case dto.MetricType_GAUGE_HISTOGRAM: + return n, MetricTypeGaugeHistogram + case dto.MetricType_SUMMARY: + return n, MetricTypeSummary + } + return n, MetricTypeUnknown +} + +// Unit always returns (nil, nil) because units aren't supported by the protobuf +// format. +func (p *ProtobufParser) Unit() ([]byte, []byte) { + return nil, nil +} + +// Comment always returns nil because comments aren't supported by the protobuf +// format. +func (p *ProtobufParser) Comment() []byte { + return nil +} + +// Metric writes the labels of the current sample into the passed labels. +// It returns the string from which the metric was parsed. +func (p *ProtobufParser) Metric(l *labels.Labels) string { + p.builder.Reset() + p.builder.Add(labels.MetricName, p.getMagicName()) + + for _, lp := range p.mf.GetMetric()[p.metricPos].GetLabel() { + p.builder.Add(lp.GetName(), lp.GetValue()) + } + if needed, name, value := p.getMagicLabel(); needed { + p.builder.Add(name, value) + } + + // Sort labels to maintain the sorted labels invariant. + p.builder.Sort() + *l = p.builder.Labels() + + return p.metricBytes.String() +} + +// Exemplar writes the exemplar of the current sample into the passed +// exemplar. It returns if an exemplar exists or not. In case of a native +// histogram, the legacy bucket section is still used for exemplars. To ingest +// all examplars, call the Exemplar method repeatedly until it returns false. +func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool { + m := p.mf.GetMetric()[p.metricPos] + var exProto *dto.Exemplar + switch p.mf.GetType() { + case dto.MetricType_COUNTER: + exProto = m.GetCounter().GetExemplar() + case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: + bb := m.GetHistogram().GetBucket() + if p.fieldPos < 0 { + if p.state == EntrySeries { + return false // At _count or _sum. + } + p.fieldPos = 0 // Start at 1st bucket for native histograms. + } + for p.fieldPos < len(bb) { + exProto = bb[p.fieldPos].GetExemplar() + if p.state == EntrySeries { + break + } + p.fieldPos++ + if exProto != nil { + break + } + } + default: + return false + } + if exProto == nil { + return false + } + ex.Value = exProto.GetValue() + if ts := exProto.GetTimestamp(); ts != nil { + ex.HasTs = true + ex.Ts = ts.GetSeconds()*1000 + int64(ts.GetNanos()/1_000_000) + } + p.builder.Reset() + for _, lp := range exProto.GetLabel() { + p.builder.Add(lp.GetName(), lp.GetValue()) + } + p.builder.Sort() + ex.Labels = p.builder.Labels() + return true +} + +// Next advances the parser to the next "sample" (emulating the behavior of a +// text format parser). It returns (EntryInvalid, io.EOF) if no samples were +// read. +func (p *ProtobufParser) Next() (Entry, error) { + switch p.state { + case EntryInvalid: + p.metricPos = 0 + p.fieldPos = -2 + n, err := readDelimited(p.in[p.inPos:], p.mf) + p.inPos += n + if err != nil { + return p.state, err + } + + // Skip empty metric families. + if len(p.mf.GetMetric()) == 0 { + return p.Next() + } + + // We are at the beginning of a metric family. Put only the name + // into metricBytes and validate only name, help, and type for now. + name := p.mf.GetName() + if !model.IsValidMetricName(model.LabelValue(name)) { + return EntryInvalid, errors.Errorf("invalid metric name: %s", name) + } + if help := p.mf.GetHelp(); !utf8.ValidString(help) { + return EntryInvalid, errors.Errorf("invalid help for metric %q: %s", name, help) + } + switch p.mf.GetType() { + case dto.MetricType_COUNTER, + dto.MetricType_GAUGE, + dto.MetricType_HISTOGRAM, + dto.MetricType_GAUGE_HISTOGRAM, + dto.MetricType_SUMMARY, + dto.MetricType_UNTYPED: + // All good. + default: + return EntryInvalid, errors.Errorf("unknown metric type for metric %q: %s", name, p.mf.GetType()) + } + p.metricBytes.Reset() + p.metricBytes.WriteString(name) + + p.state = EntryHelp + case EntryHelp: + p.state = EntryType + case EntryType: + t := p.mf.GetType() + if (t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM) && + isNativeHistogram(p.mf.GetMetric()[0].GetHistogram()) { + p.state = EntryHistogram + } else { + p.state = EntrySeries + } + if err := p.updateMetricBytes(); err != nil { + return EntryInvalid, err + } + case EntryHistogram, EntrySeries: + t := p.mf.GetType() + if p.state == EntrySeries && !p.fieldsDone && + (t == dto.MetricType_SUMMARY || + t == dto.MetricType_HISTOGRAM || + t == dto.MetricType_GAUGE_HISTOGRAM) { + p.fieldPos++ + } else { + p.metricPos++ + p.fieldPos = -2 + p.fieldsDone = false + } + if p.metricPos >= len(p.mf.GetMetric()) { + p.state = EntryInvalid + return p.Next() + } + if err := p.updateMetricBytes(); err != nil { + return EntryInvalid, err + } + default: + return EntryInvalid, errors.Errorf("invalid protobuf parsing state: %d", p.state) + } + return p.state, nil +} + +func (p *ProtobufParser) updateMetricBytes() error { + b := p.metricBytes + b.Reset() + b.WriteString(p.getMagicName()) + for _, lp := range p.mf.GetMetric()[p.metricPos].GetLabel() { + b.WriteByte(model.SeparatorByte) + n := lp.GetName() + if !model.LabelName(n).IsValid() { + return errors.Errorf("invalid label name: %s", n) + } + b.WriteString(n) + b.WriteByte(model.SeparatorByte) + v := lp.GetValue() + if !utf8.ValidString(v) { + return errors.Errorf("invalid label value: %s", v) + } + b.WriteString(v) + } + if needed, n, v := p.getMagicLabel(); needed { + b.WriteByte(model.SeparatorByte) + b.WriteString(n) + b.WriteByte(model.SeparatorByte) + b.WriteString(v) + } + return nil +} + +// getMagicName usually just returns p.mf.GetType() but adds a magic suffix +// ("_count", "_sum", "_bucket") if needed according to the current parser +// state. +func (p *ProtobufParser) getMagicName() string { + t := p.mf.GetType() + if p.state == EntryHistogram || (t != dto.MetricType_HISTOGRAM && t != dto.MetricType_SUMMARY) { + return p.mf.GetName() + } + if p.fieldPos == -2 { + return p.mf.GetName() + "_count" + } + if p.fieldPos == -1 { + return p.mf.GetName() + "_sum" + } + if t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM { + return p.mf.GetName() + "_bucket" + } + return p.mf.GetName() +} + +// getMagicLabel returns if a magic label ("quantile" or "le") is needed and, if +// so, its name and value. It also sets p.fieldsDone if applicable. +func (p *ProtobufParser) getMagicLabel() (bool, string, string) { + if p.state == EntryHistogram || p.fieldPos < 0 { + return false, "", "" + } + switch p.mf.GetType() { + case dto.MetricType_SUMMARY: + qq := p.mf.GetMetric()[p.metricPos].GetSummary().GetQuantile() + q := qq[p.fieldPos] + p.fieldsDone = p.fieldPos == len(qq)-1 + return true, model.QuantileLabel, formatOpenMetricsFloat(q.GetQuantile()) + case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: + bb := p.mf.GetMetric()[p.metricPos].GetHistogram().GetBucket() + if p.fieldPos >= len(bb) { + p.fieldsDone = true + return true, model.BucketLabel, "+Inf" + } + b := bb[p.fieldPos] + p.fieldsDone = math.IsInf(b.GetUpperBound(), +1) + return true, model.BucketLabel, formatOpenMetricsFloat(b.GetUpperBound()) + } + return false, "", "" +} + +var errInvalidVarint = errors.New("protobufparse: invalid varint encountered") + +// readDelimited is essentially doing what the function of the same name in +// github.com/matttproud/golang_protobuf_extensions/pbutil is doing, but it is +// specific to a MetricFamily, utilizes the more efficient gogo-protobuf +// unmarshaling, and acts on a byte slice directly without any additional +// staging buffers. +func readDelimited(b []byte, mf *dto.MetricFamily) (n int, err error) { + if len(b) == 0 { + return 0, io.EOF + } + messageLength, varIntLength := proto.DecodeVarint(b) + if varIntLength == 0 || varIntLength > binary.MaxVarintLen32 { + return 0, errInvalidVarint + } + totalLength := varIntLength + int(messageLength) + if totalLength > len(b) { + return 0, errors.Errorf("protobufparse: insufficient length of buffer, expected at least %d bytes, got %d bytes", totalLength, len(b)) + } + mf.Reset() + return totalLength, mf.Unmarshal(b[varIntLength:totalLength]) +} + +// formatOpenMetricsFloat works like the usual Go string formatting of a fleat +// but appends ".0" if the resulting number would otherwise contain neither a +// "." nor an "e". +func formatOpenMetricsFloat(f float64) string { + // A few common cases hardcoded. + switch { + case f == 1: + return "1.0" + case f == 0: + return "0.0" + case f == -1: + return "-1.0" + case math.IsNaN(f): + return "NaN" + case math.IsInf(f, +1): + return "+Inf" + case math.IsInf(f, -1): + return "-Inf" + } + s := fmt.Sprint(f) + if strings.ContainsAny(s, "e.") { + return s + } + return s + ".0" +} + +// isNativeHistogram returns false iff the provided histograms has no sparse +// buckets and a zero threshold of 0 and a zero count of 0. In principle, this +// could still be meant to be a native histogram (with a zero threshold of 0 and +// no observations yet), but for now, we'll treat this case as a conventional +// histogram. +// +// TODO(beorn7): In the final format, there should be an unambiguous way of +// deciding if a histogram should be ingested as a conventional one or a native +// one. +func isNativeHistogram(h *dto.Histogram) bool { + return len(h.GetNegativeDelta()) > 0 || + len(h.GetPositiveDelta()) > 0 || + h.GetZeroCount() > 0 || + h.GetZeroThreshold() > 0 +} diff --git a/vendor/github.com/prometheus/prometheus/prompb/buf.yaml b/vendor/github.com/prometheus/prometheus/prompb/buf.yaml index acd7af9cf..0f7ec1340 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/buf.yaml +++ b/vendor/github.com/prometheus/prometheus/prompb/buf.yaml @@ -5,14 +5,17 @@ lint: ENUM_VALUE_PREFIX: - remote.proto - types.proto + - io/prometheus/client/metrics.proto ENUM_ZERO_VALUE_SUFFIX: - remote.proto - types.proto + - io/prometheus/client/metrics.proto PACKAGE_DIRECTORY_MATCH: - remote.proto - types.proto PACKAGE_VERSION_SUFFIX: - remote.proto - types.proto + - io/prometheus/client/metrics.proto deps: - buf.build/gogo/protobuf diff --git a/vendor/github.com/prometheus/prometheus/prompb/custom.go b/vendor/github.com/prometheus/prometheus/prompb/custom.go index 0b3820c4d..4b07187bd 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/custom.go +++ b/vendor/github.com/prometheus/prometheus/prompb/custom.go @@ -13,5 +13,22 @@ package prompb +import ( + "sync" +) + func (m Sample) T() int64 { return m.Timestamp } func (m Sample) V() float64 { return m.Value } + +func (r *ChunkedReadResponse) PooledMarshal(p *sync.Pool) ([]byte, error) { + size := r.Size() + data, ok := p.Get().(*[]byte) + if ok && cap(*data) >= size { + n, err := r.MarshalToSizedBuffer((*data)[:size]) + if err != nil { + return nil, err + } + return (*data)[:n], nil + } + return r.Marshal() +} diff --git a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.pb.go b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.pb.go new file mode 100644 index 000000000..3e4bc7df8 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.pb.go @@ -0,0 +1,3997 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: io/prometheus/client/metrics.proto + +package io_prometheus_client + +import ( + encoding_binary "encoding/binary" + fmt "fmt" + io "io" + math "math" + math_bits "math/bits" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + types "github.com/gogo/protobuf/types" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type MetricType int32 + +const ( + // COUNTER must use the Metric field "counter". + MetricType_COUNTER MetricType = 0 + // GAUGE must use the Metric field "gauge". + MetricType_GAUGE MetricType = 1 + // SUMMARY must use the Metric field "summary". + MetricType_SUMMARY MetricType = 2 + // UNTYPED must use the Metric field "untyped". + MetricType_UNTYPED MetricType = 3 + // HISTOGRAM must use the Metric field "histogram". + MetricType_HISTOGRAM MetricType = 4 + // GAUGE_HISTOGRAM must use the Metric field "histogram". + MetricType_GAUGE_HISTOGRAM MetricType = 5 +) + +var MetricType_name = map[int32]string{ + 0: "COUNTER", + 1: "GAUGE", + 2: "SUMMARY", + 3: "UNTYPED", + 4: "HISTOGRAM", + 5: "GAUGE_HISTOGRAM", +} + +var MetricType_value = map[string]int32{ + "COUNTER": 0, + "GAUGE": 1, + "SUMMARY": 2, + "UNTYPED": 3, + "HISTOGRAM": 4, + "GAUGE_HISTOGRAM": 5, +} + +func (x MetricType) String() string { + return proto.EnumName(MetricType_name, int32(x)) +} + +func (MetricType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{0} +} + +type LabelPair struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LabelPair) Reset() { *m = LabelPair{} } +func (m *LabelPair) String() string { return proto.CompactTextString(m) } +func (*LabelPair) ProtoMessage() {} +func (*LabelPair) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{0} +} +func (m *LabelPair) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LabelPair) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelPair.Merge(m, src) +} +func (m *LabelPair) XXX_Size() int { + return m.Size() +} +func (m *LabelPair) XXX_DiscardUnknown() { + xxx_messageInfo_LabelPair.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelPair proto.InternalMessageInfo + +func (m *LabelPair) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *LabelPair) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +type Gauge struct { + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Gauge) Reset() { *m = Gauge{} } +func (m *Gauge) String() string { return proto.CompactTextString(m) } +func (*Gauge) ProtoMessage() {} +func (*Gauge) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{1} +} +func (m *Gauge) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Gauge.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Gauge) XXX_Merge(src proto.Message) { + xxx_messageInfo_Gauge.Merge(m, src) +} +func (m *Gauge) XXX_Size() int { + return m.Size() +} +func (m *Gauge) XXX_DiscardUnknown() { + xxx_messageInfo_Gauge.DiscardUnknown(m) +} + +var xxx_messageInfo_Gauge proto.InternalMessageInfo + +func (m *Gauge) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +type Counter struct { + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar,proto3" json:"exemplar,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Counter) Reset() { *m = Counter{} } +func (m *Counter) String() string { return proto.CompactTextString(m) } +func (*Counter) ProtoMessage() {} +func (*Counter) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{2} +} +func (m *Counter) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Counter.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Counter) XXX_Merge(src proto.Message) { + xxx_messageInfo_Counter.Merge(m, src) +} +func (m *Counter) XXX_Size() int { + return m.Size() +} +func (m *Counter) XXX_DiscardUnknown() { + xxx_messageInfo_Counter.DiscardUnknown(m) +} + +var xxx_messageInfo_Counter proto.InternalMessageInfo + +func (m *Counter) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *Counter) GetExemplar() *Exemplar { + if m != nil { + return m.Exemplar + } + return nil +} + +type Quantile struct { + Quantile float64 `protobuf:"fixed64,1,opt,name=quantile,proto3" json:"quantile,omitempty"` + Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Quantile) Reset() { *m = Quantile{} } +func (m *Quantile) String() string { return proto.CompactTextString(m) } +func (*Quantile) ProtoMessage() {} +func (*Quantile) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{3} +} +func (m *Quantile) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Quantile.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Quantile) XXX_Merge(src proto.Message) { + xxx_messageInfo_Quantile.Merge(m, src) +} +func (m *Quantile) XXX_Size() int { + return m.Size() +} +func (m *Quantile) XXX_DiscardUnknown() { + xxx_messageInfo_Quantile.DiscardUnknown(m) +} + +var xxx_messageInfo_Quantile proto.InternalMessageInfo + +func (m *Quantile) GetQuantile() float64 { + if m != nil { + return m.Quantile + } + return 0 +} + +func (m *Quantile) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +type Summary struct { + SampleCount uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount,proto3" json:"sample_count,omitempty"` + SampleSum float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum,proto3" json:"sample_sum,omitempty"` + Quantile []Quantile `protobuf:"bytes,3,rep,name=quantile,proto3" json:"quantile"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Summary) Reset() { *m = Summary{} } +func (m *Summary) String() string { return proto.CompactTextString(m) } +func (*Summary) ProtoMessage() {} +func (*Summary) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{4} +} +func (m *Summary) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Summary.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Summary) XXX_Merge(src proto.Message) { + xxx_messageInfo_Summary.Merge(m, src) +} +func (m *Summary) XXX_Size() int { + return m.Size() +} +func (m *Summary) XXX_DiscardUnknown() { + xxx_messageInfo_Summary.DiscardUnknown(m) +} + +var xxx_messageInfo_Summary proto.InternalMessageInfo + +func (m *Summary) GetSampleCount() uint64 { + if m != nil { + return m.SampleCount + } + return 0 +} + +func (m *Summary) GetSampleSum() float64 { + if m != nil { + return m.SampleSum + } + return 0 +} + +func (m *Summary) GetQuantile() []Quantile { + if m != nil { + return m.Quantile + } + return nil +} + +type Untyped struct { + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Untyped) Reset() { *m = Untyped{} } +func (m *Untyped) String() string { return proto.CompactTextString(m) } +func (*Untyped) ProtoMessage() {} +func (*Untyped) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{5} +} +func (m *Untyped) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Untyped.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Untyped) XXX_Merge(src proto.Message) { + xxx_messageInfo_Untyped.Merge(m, src) +} +func (m *Untyped) XXX_Size() int { + return m.Size() +} +func (m *Untyped) XXX_DiscardUnknown() { + xxx_messageInfo_Untyped.DiscardUnknown(m) +} + +var xxx_messageInfo_Untyped proto.InternalMessageInfo + +func (m *Untyped) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +type Histogram struct { + SampleCount uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount,proto3" json:"sample_count,omitempty"` + SampleCountFloat float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat,proto3" json:"sample_count_float,omitempty"` + SampleSum float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum,proto3" json:"sample_sum,omitempty"` + // Buckets for the conventional histogram. + Bucket []Bucket `protobuf:"bytes,3,rep,name=bucket,proto3" json:"bucket"` + // schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8. + // They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and + // then each power of two is divided into 2^n logarithmic buckets. + // Or in other words, each bucket boundary is the previous boundary times 2^(2^-n). + // In the future, more bucket schemas may be added using numbers < -4 or > 8. + Schema int32 `protobuf:"zigzag32,5,opt,name=schema,proto3" json:"schema,omitempty"` + ZeroThreshold float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold,proto3" json:"zero_threshold,omitempty"` + ZeroCount uint64 `protobuf:"varint,7,opt,name=zero_count,json=zeroCount,proto3" json:"zero_count,omitempty"` + ZeroCountFloat float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat,proto3" json:"zero_count_float,omitempty"` + // Negative buckets for the native histogram. + NegativeSpan []BucketSpan `protobuf:"bytes,9,rep,name=negative_span,json=negativeSpan,proto3" json:"negative_span"` + // Use either "negative_delta" or "negative_count", the former for + // regular histograms with integer counts, the latter for float + // histograms. + NegativeDelta []int64 `protobuf:"zigzag64,10,rep,packed,name=negative_delta,json=negativeDelta,proto3" json:"negative_delta,omitempty"` + NegativeCount []float64 `protobuf:"fixed64,11,rep,packed,name=negative_count,json=negativeCount,proto3" json:"negative_count,omitempty"` + // Positive buckets for the native histogram. + PositiveSpan []BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan,proto3" json:"positive_span"` + // Use either "positive_delta" or "positive_count", the former for + // regular histograms with integer counts, the latter for float + // histograms. + PositiveDelta []int64 `protobuf:"zigzag64,13,rep,packed,name=positive_delta,json=positiveDelta,proto3" json:"positive_delta,omitempty"` + PositiveCount []float64 `protobuf:"fixed64,14,rep,packed,name=positive_count,json=positiveCount,proto3" json:"positive_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Histogram) Reset() { *m = Histogram{} } +func (m *Histogram) String() string { return proto.CompactTextString(m) } +func (*Histogram) ProtoMessage() {} +func (*Histogram) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{6} +} +func (m *Histogram) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Histogram) XXX_Merge(src proto.Message) { + xxx_messageInfo_Histogram.Merge(m, src) +} +func (m *Histogram) XXX_Size() int { + return m.Size() +} +func (m *Histogram) XXX_DiscardUnknown() { + xxx_messageInfo_Histogram.DiscardUnknown(m) +} + +var xxx_messageInfo_Histogram proto.InternalMessageInfo + +func (m *Histogram) GetSampleCount() uint64 { + if m != nil { + return m.SampleCount + } + return 0 +} + +func (m *Histogram) GetSampleCountFloat() float64 { + if m != nil { + return m.SampleCountFloat + } + return 0 +} + +func (m *Histogram) GetSampleSum() float64 { + if m != nil { + return m.SampleSum + } + return 0 +} + +func (m *Histogram) GetBucket() []Bucket { + if m != nil { + return m.Bucket + } + return nil +} + +func (m *Histogram) GetSchema() int32 { + if m != nil { + return m.Schema + } + return 0 +} + +func (m *Histogram) GetZeroThreshold() float64 { + if m != nil { + return m.ZeroThreshold + } + return 0 +} + +func (m *Histogram) GetZeroCount() uint64 { + if m != nil { + return m.ZeroCount + } + return 0 +} + +func (m *Histogram) GetZeroCountFloat() float64 { + if m != nil { + return m.ZeroCountFloat + } + return 0 +} + +func (m *Histogram) GetNegativeSpan() []BucketSpan { + if m != nil { + return m.NegativeSpan + } + return nil +} + +func (m *Histogram) GetNegativeDelta() []int64 { + if m != nil { + return m.NegativeDelta + } + return nil +} + +func (m *Histogram) GetNegativeCount() []float64 { + if m != nil { + return m.NegativeCount + } + return nil +} + +func (m *Histogram) GetPositiveSpan() []BucketSpan { + if m != nil { + return m.PositiveSpan + } + return nil +} + +func (m *Histogram) GetPositiveDelta() []int64 { + if m != nil { + return m.PositiveDelta + } + return nil +} + +func (m *Histogram) GetPositiveCount() []float64 { + if m != nil { + return m.PositiveCount + } + return nil +} + +type Bucket struct { + CumulativeCount uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount,proto3" json:"cumulative_count,omitempty"` + CumulativeCountFloat float64 `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat,proto3" json:"cumulative_count_float,omitempty"` + UpperBound float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound,proto3" json:"upper_bound,omitempty"` + Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar,proto3" json:"exemplar,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Bucket) Reset() { *m = Bucket{} } +func (m *Bucket) String() string { return proto.CompactTextString(m) } +func (*Bucket) ProtoMessage() {} +func (*Bucket) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{7} +} +func (m *Bucket) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Bucket.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Bucket) XXX_Merge(src proto.Message) { + xxx_messageInfo_Bucket.Merge(m, src) +} +func (m *Bucket) XXX_Size() int { + return m.Size() +} +func (m *Bucket) XXX_DiscardUnknown() { + xxx_messageInfo_Bucket.DiscardUnknown(m) +} + +var xxx_messageInfo_Bucket proto.InternalMessageInfo + +func (m *Bucket) GetCumulativeCount() uint64 { + if m != nil { + return m.CumulativeCount + } + return 0 +} + +func (m *Bucket) GetCumulativeCountFloat() float64 { + if m != nil { + return m.CumulativeCountFloat + } + return 0 +} + +func (m *Bucket) GetUpperBound() float64 { + if m != nil { + return m.UpperBound + } + return 0 +} + +func (m *Bucket) GetExemplar() *Exemplar { + if m != nil { + return m.Exemplar + } + return nil +} + +// A BucketSpan defines a number of consecutive buckets in a native +// histogram with their offset. Logically, it would be more +// straightforward to include the bucket counts in the Span. However, +// the protobuf representation is more compact in the way the data is +// structured here (with all the buckets in a single array separate +// from the Spans). +type BucketSpan struct { + Offset int32 `protobuf:"zigzag32,1,opt,name=offset,proto3" json:"offset,omitempty"` + Length uint32 `protobuf:"varint,2,opt,name=length,proto3" json:"length,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BucketSpan) Reset() { *m = BucketSpan{} } +func (m *BucketSpan) String() string { return proto.CompactTextString(m) } +func (*BucketSpan) ProtoMessage() {} +func (*BucketSpan) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{8} +} +func (m *BucketSpan) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BucketSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BucketSpan.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BucketSpan) XXX_Merge(src proto.Message) { + xxx_messageInfo_BucketSpan.Merge(m, src) +} +func (m *BucketSpan) XXX_Size() int { + return m.Size() +} +func (m *BucketSpan) XXX_DiscardUnknown() { + xxx_messageInfo_BucketSpan.DiscardUnknown(m) +} + +var xxx_messageInfo_BucketSpan proto.InternalMessageInfo + +func (m *BucketSpan) GetOffset() int32 { + if m != nil { + return m.Offset + } + return 0 +} + +func (m *BucketSpan) GetLength() uint32 { + if m != nil { + return m.Length + } + return 0 +} + +type Exemplar struct { + Label []LabelPair `protobuf:"bytes,1,rep,name=label,proto3" json:"label"` + Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` + Timestamp *types.Timestamp `protobuf:"bytes,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Exemplar) Reset() { *m = Exemplar{} } +func (m *Exemplar) String() string { return proto.CompactTextString(m) } +func (*Exemplar) ProtoMessage() {} +func (*Exemplar) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{9} +} +func (m *Exemplar) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Exemplar) XXX_Merge(src proto.Message) { + xxx_messageInfo_Exemplar.Merge(m, src) +} +func (m *Exemplar) XXX_Size() int { + return m.Size() +} +func (m *Exemplar) XXX_DiscardUnknown() { + xxx_messageInfo_Exemplar.DiscardUnknown(m) +} + +var xxx_messageInfo_Exemplar proto.InternalMessageInfo + +func (m *Exemplar) GetLabel() []LabelPair { + if m != nil { + return m.Label + } + return nil +} + +func (m *Exemplar) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *Exemplar) GetTimestamp() *types.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +type Metric struct { + Label []LabelPair `protobuf:"bytes,1,rep,name=label,proto3" json:"label"` + Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge,proto3" json:"gauge,omitempty"` + Counter *Counter `protobuf:"bytes,3,opt,name=counter,proto3" json:"counter,omitempty"` + Summary *Summary `protobuf:"bytes,4,opt,name=summary,proto3" json:"summary,omitempty"` + Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped,proto3" json:"untyped,omitempty"` + Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram,proto3" json:"histogram,omitempty"` + TimestampMs int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs,proto3" json:"timestamp_ms,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Metric) Reset() { *m = Metric{} } +func (m *Metric) String() string { return proto.CompactTextString(m) } +func (*Metric) ProtoMessage() {} +func (*Metric) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{10} +} +func (m *Metric) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Metric.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Metric) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metric.Merge(m, src) +} +func (m *Metric) XXX_Size() int { + return m.Size() +} +func (m *Metric) XXX_DiscardUnknown() { + xxx_messageInfo_Metric.DiscardUnknown(m) +} + +var xxx_messageInfo_Metric proto.InternalMessageInfo + +func (m *Metric) GetLabel() []LabelPair { + if m != nil { + return m.Label + } + return nil +} + +func (m *Metric) GetGauge() *Gauge { + if m != nil { + return m.Gauge + } + return nil +} + +func (m *Metric) GetCounter() *Counter { + if m != nil { + return m.Counter + } + return nil +} + +func (m *Metric) GetSummary() *Summary { + if m != nil { + return m.Summary + } + return nil +} + +func (m *Metric) GetUntyped() *Untyped { + if m != nil { + return m.Untyped + } + return nil +} + +func (m *Metric) GetHistogram() *Histogram { + if m != nil { + return m.Histogram + } + return nil +} + +func (m *Metric) GetTimestampMs() int64 { + if m != nil { + return m.TimestampMs + } + return 0 +} + +type MetricFamily struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Help string `protobuf:"bytes,2,opt,name=help,proto3" json:"help,omitempty"` + Type MetricType `protobuf:"varint,3,opt,name=type,proto3,enum=io.prometheus.client.MetricType" json:"type,omitempty"` + Metric []Metric `protobuf:"bytes,4,rep,name=metric,proto3" json:"metric"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MetricFamily) Reset() { *m = MetricFamily{} } +func (m *MetricFamily) String() string { return proto.CompactTextString(m) } +func (*MetricFamily) ProtoMessage() {} +func (*MetricFamily) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{11} +} +func (m *MetricFamily) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MetricFamily) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricFamily.Merge(m, src) +} +func (m *MetricFamily) XXX_Size() int { + return m.Size() +} +func (m *MetricFamily) XXX_DiscardUnknown() { + xxx_messageInfo_MetricFamily.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricFamily proto.InternalMessageInfo + +func (m *MetricFamily) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *MetricFamily) GetHelp() string { + if m != nil { + return m.Help + } + return "" +} + +func (m *MetricFamily) GetType() MetricType { + if m != nil { + return m.Type + } + return MetricType_COUNTER +} + +func (m *MetricFamily) GetMetric() []Metric { + if m != nil { + return m.Metric + } + return nil +} + +func init() { + proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) + proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair") + proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge") + proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter") + proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile") + proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary") + proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped") + proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram") + proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket") + proto.RegisterType((*BucketSpan)(nil), "io.prometheus.client.BucketSpan") + proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar") + proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric") + proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily") +} + +func init() { + proto.RegisterFile("io/prometheus/client/metrics.proto", fileDescriptor_d1e5ddb18987a258) +} + +var fileDescriptor_d1e5ddb18987a258 = []byte{ + // 923 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdd, 0x8e, 0xdb, 0x44, + 0x18, 0xad, 0x1b, 0xe7, 0xc7, 0x5f, 0x36, 0xdb, 0x74, 0x88, 0x2a, 0x6b, 0x61, 0x37, 0xc1, 0x12, + 0xd2, 0x82, 0x50, 0x22, 0xa0, 0x08, 0x54, 0x40, 0x62, 0xb7, 0xdd, 0x6e, 0x51, 0x49, 0x5b, 0x26, + 0xc9, 0x45, 0xe1, 0xc2, 0x9a, 0x64, 0x67, 0x1d, 0x0b, 0xdb, 0x63, 0xec, 0x71, 0xc5, 0x72, 0xcf, + 0x25, 0xd7, 0xbc, 0x02, 0x4f, 0x82, 0x7a, 0xc9, 0x13, 0x20, 0xb4, 0xef, 0xc0, 0x3d, 0x9a, 0x3f, + 0x3b, 0x5b, 0x39, 0x85, 0x15, 0x77, 0x33, 0xc7, 0xe7, 0xfb, 0xe6, 0x9c, 0x99, 0xc9, 0x99, 0x80, + 0x17, 0xb2, 0x49, 0x9a, 0xb1, 0x98, 0xf2, 0x35, 0x2d, 0xf2, 0xc9, 0x2a, 0x0a, 0x69, 0xc2, 0x27, + 0x31, 0xe5, 0x59, 0xb8, 0xca, 0xc7, 0x69, 0xc6, 0x38, 0x43, 0x83, 0x90, 0x8d, 0x2b, 0xce, 0x58, + 0x71, 0xf6, 0x06, 0x01, 0x0b, 0x98, 0x24, 0x4c, 0xc4, 0x48, 0x71, 0xf7, 0x86, 0x01, 0x63, 0x41, + 0x44, 0x27, 0x72, 0xb6, 0x2c, 0xce, 0x27, 0x3c, 0x8c, 0x69, 0xce, 0x49, 0x9c, 0x2a, 0x82, 0xf7, + 0x31, 0x38, 0x5f, 0x93, 0x25, 0x8d, 0x9e, 0x91, 0x30, 0x43, 0x08, 0xec, 0x84, 0xc4, 0xd4, 0xb5, + 0x46, 0xd6, 0xa1, 0x83, 0xe5, 0x18, 0x0d, 0xa0, 0xf9, 0x82, 0x44, 0x05, 0x75, 0x6f, 0x4a, 0x50, + 0x4d, 0xbc, 0x7d, 0x68, 0x9e, 0x92, 0x22, 0xd8, 0xf8, 0x2c, 0x6a, 0x2c, 0xf3, 0xf9, 0x3b, 0x68, + 0xdf, 0x67, 0x45, 0xc2, 0x69, 0x56, 0x4f, 0x40, 0xf7, 0xa0, 0x43, 0x7f, 0xa4, 0x71, 0x1a, 0x91, + 0x4c, 0x36, 0xee, 0x7e, 0x78, 0x30, 0xae, 0xb3, 0x35, 0x3e, 0xd1, 0x2c, 0x5c, 0xf2, 0xbd, 0xcf, + 0xa1, 0xf3, 0x4d, 0x41, 0x12, 0x1e, 0x46, 0x14, 0xed, 0x41, 0xe7, 0x07, 0x3d, 0xd6, 0x0b, 0x94, + 0xf3, 0xab, 0xca, 0x4b, 0x69, 0xbf, 0x58, 0xd0, 0x9e, 0x15, 0x71, 0x4c, 0xb2, 0x0b, 0xf4, 0x36, + 0xec, 0xe4, 0x24, 0x4e, 0x23, 0xea, 0xaf, 0x84, 0x5a, 0xd9, 0xc1, 0xc6, 0x5d, 0x85, 0x49, 0x03, + 0x68, 0x1f, 0x40, 0x53, 0xf2, 0x22, 0xd6, 0x9d, 0x1c, 0x85, 0xcc, 0x8a, 0x18, 0x7d, 0xb9, 0xb1, + 0x7e, 0x63, 0xd4, 0xd8, 0xee, 0xc3, 0x28, 0x3e, 0xb6, 0x5f, 0xfe, 0x39, 0xbc, 0x51, 0xa9, 0xf4, + 0x86, 0xd0, 0x5e, 0x24, 0xfc, 0x22, 0xa5, 0x67, 0x5b, 0xf6, 0xf2, 0x6f, 0x1b, 0x9c, 0x47, 0x61, + 0xce, 0x59, 0x90, 0x91, 0xf8, 0xbf, 0x48, 0x7e, 0x1f, 0xd0, 0x26, 0xc5, 0x3f, 0x8f, 0x18, 0xe1, + 0xae, 0x2d, 0x7b, 0xf6, 0x37, 0x88, 0x0f, 0x05, 0xfe, 0x6f, 0x06, 0xef, 0x41, 0x6b, 0x59, 0xac, + 0xbe, 0xa7, 0x5c, 0xdb, 0x7b, 0xab, 0xde, 0xde, 0xb1, 0xe4, 0x68, 0x73, 0xba, 0x02, 0xdd, 0x81, + 0x56, 0xbe, 0x5a, 0xd3, 0x98, 0xb8, 0xcd, 0x91, 0x75, 0x78, 0x1b, 0xeb, 0x19, 0x7a, 0x07, 0x76, + 0x7f, 0xa2, 0x19, 0xf3, 0xf9, 0x3a, 0xa3, 0xf9, 0x9a, 0x45, 0x67, 0x6e, 0x4b, 0x2e, 0xdb, 0x13, + 0xe8, 0xdc, 0x80, 0x42, 0x99, 0xa4, 0x29, 0xa3, 0x6d, 0x69, 0xd4, 0x11, 0x88, 0xb2, 0x79, 0x08, + 0xfd, 0xea, 0xb3, 0x36, 0xd9, 0x91, 0x7d, 0x76, 0x4b, 0x92, 0xb2, 0xf8, 0x18, 0x7a, 0x09, 0x0d, + 0x08, 0x0f, 0x5f, 0x50, 0x3f, 0x4f, 0x49, 0xe2, 0x3a, 0xd2, 0xca, 0xe8, 0x75, 0x56, 0x66, 0x29, + 0x49, 0xb4, 0x9d, 0x1d, 0x53, 0x2c, 0x30, 0x21, 0xbe, 0x6c, 0x76, 0x46, 0x23, 0x4e, 0x5c, 0x18, + 0x35, 0x0e, 0x11, 0x2e, 0x97, 0x78, 0x20, 0xc0, 0x2b, 0x34, 0x65, 0xa0, 0x3b, 0x6a, 0x08, 0x8f, + 0x06, 0x55, 0x26, 0x1e, 0x43, 0x2f, 0x65, 0x79, 0x58, 0x49, 0xdb, 0xb9, 0x9e, 0x34, 0x53, 0x6c, + 0xa4, 0x95, 0xcd, 0x94, 0xb4, 0x9e, 0x92, 0x66, 0xd0, 0x52, 0x5a, 0x49, 0x53, 0xd2, 0x76, 0x95, + 0x34, 0x83, 0x4a, 0x69, 0xde, 0xef, 0x16, 0xb4, 0xd4, 0x82, 0xe8, 0x5d, 0xe8, 0xaf, 0x8a, 0xb8, + 0x88, 0x36, 0xed, 0xa8, 0x8b, 0x77, 0xab, 0xc2, 0x95, 0xa1, 0xbb, 0x70, 0xe7, 0x55, 0xea, 0x95, + 0x0b, 0x38, 0x78, 0xa5, 0x40, 0x9d, 0xd0, 0x10, 0xba, 0x45, 0x9a, 0xd2, 0xcc, 0x5f, 0xb2, 0x22, + 0x39, 0xd3, 0xb7, 0x10, 0x24, 0x74, 0x2c, 0x90, 0x2b, 0x79, 0xd1, 0xb8, 0x76, 0x5e, 0x40, 0xb5, + 0x71, 0xe2, 0x52, 0xb2, 0xf3, 0xf3, 0x9c, 0x2a, 0x07, 0xb7, 0xb1, 0x9e, 0x09, 0x3c, 0xa2, 0x49, + 0xc0, 0xd7, 0x72, 0xf5, 0x1e, 0xd6, 0x33, 0xef, 0x57, 0x0b, 0x3a, 0xa6, 0x29, 0xfa, 0x0c, 0x9a, + 0x91, 0x48, 0x4b, 0xd7, 0x92, 0xc7, 0x34, 0xac, 0xd7, 0x50, 0x06, 0xaa, 0x3e, 0x25, 0x55, 0x53, + 0x9f, 0x47, 0xe8, 0x53, 0x70, 0xca, 0x4c, 0xd6, 0xd6, 0xf6, 0xc6, 0x2a, 0xb5, 0xc7, 0x26, 0xb5, + 0xc7, 0x73, 0xc3, 0xc0, 0x15, 0xd9, 0xfb, 0xb9, 0x01, 0xad, 0xa9, 0x7c, 0x19, 0xfe, 0x9f, 0xae, + 0x0f, 0xa0, 0x19, 0x88, 0x2c, 0xd7, 0x41, 0xfc, 0x66, 0x7d, 0xb1, 0x8c, 0x7b, 0xac, 0x98, 0xe8, + 0x13, 0x68, 0xaf, 0x54, 0xbe, 0x6b, 0xc9, 0xfb, 0xf5, 0x45, 0xfa, 0x11, 0xc0, 0x86, 0x2d, 0x0a, + 0x73, 0x15, 0xbe, 0xf2, 0x3e, 0x6c, 0x2d, 0xd4, 0x09, 0x8d, 0x0d, 0x5b, 0x14, 0x16, 0x2a, 0x26, + 0x65, 0x98, 0x6c, 0x2d, 0xd4, 0x59, 0x8a, 0x0d, 0x1b, 0x7d, 0x01, 0xce, 0xda, 0xa4, 0xa7, 0x0c, + 0x91, 0xad, 0xdb, 0x53, 0x86, 0x2c, 0xae, 0x2a, 0x44, 0xde, 0x96, 0x3b, 0xee, 0xc7, 0xb9, 0x4c, + 0xaa, 0x06, 0xee, 0x96, 0xd8, 0x34, 0xf7, 0x7e, 0xb3, 0x60, 0x47, 0x9d, 0xc3, 0x43, 0x12, 0x87, + 0xd1, 0x45, 0xed, 0x33, 0x8a, 0xc0, 0x5e, 0xd3, 0x28, 0xd5, 0xaf, 0xa8, 0x1c, 0xa3, 0xbb, 0x60, + 0x0b, 0x8d, 0x72, 0x0b, 0x77, 0xb7, 0xfd, 0xe6, 0x55, 0xe7, 0xf9, 0x45, 0x4a, 0xb1, 0x64, 0x8b, + 0x44, 0x56, 0xff, 0x07, 0x5c, 0xfb, 0x75, 0x89, 0xac, 0xea, 0x4c, 0x22, 0xab, 0x8a, 0xf7, 0x96, + 0x00, 0x55, 0x3f, 0xd4, 0x85, 0xf6, 0xfd, 0xa7, 0x8b, 0x27, 0xf3, 0x13, 0xdc, 0xbf, 0x81, 0x1c, + 0x68, 0x9e, 0x1e, 0x2d, 0x4e, 0x4f, 0xfa, 0x96, 0xc0, 0x67, 0x8b, 0xe9, 0xf4, 0x08, 0x3f, 0xef, + 0xdf, 0x14, 0x93, 0xc5, 0x93, 0xf9, 0xf3, 0x67, 0x27, 0x0f, 0xfa, 0x0d, 0xd4, 0x03, 0xe7, 0xd1, + 0x57, 0xb3, 0xf9, 0xd3, 0x53, 0x7c, 0x34, 0xed, 0xdb, 0xe8, 0x0d, 0xb8, 0x25, 0x6b, 0xfc, 0x0a, + 0x6c, 0x1e, 0x7b, 0x2f, 0x2f, 0x0f, 0xac, 0x3f, 0x2e, 0x0f, 0xac, 0xbf, 0x2e, 0x0f, 0xac, 0x6f, + 0x07, 0x21, 0xf3, 0x2b, 0x71, 0xbe, 0x12, 0xb7, 0x6c, 0xc9, 0x9b, 0xfd, 0xd1, 0x3f, 0x01, 0x00, + 0x00, 0xff, 0xff, 0x52, 0x2d, 0xb5, 0x31, 0xef, 0x08, 0x00, 0x00, +} + +func (m *LabelPair) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelPair) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelPair) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Gauge) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Gauge) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Gauge) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *Counter) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Counter) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Counter) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Exemplar != nil { + { + size, err := m.Exemplar.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *Quantile) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Quantile) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Quantile) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x11 + } + if m.Quantile != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Quantile)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *Summary) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Summary) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Summary) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Quantile) > 0 { + for iNdEx := len(m.Quantile) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Quantile[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.SampleSum != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.SampleSum)))) + i-- + dAtA[i] = 0x11 + } + if m.SampleCount != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.SampleCount)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Untyped) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Untyped) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Untyped) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *Histogram) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Histogram) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.PositiveCount) > 0 { + for iNdEx := len(m.PositiveCount) - 1; iNdEx >= 0; iNdEx-- { + f2 := math.Float64bits(float64(m.PositiveCount[iNdEx])) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f2)) + } + i = encodeVarintMetrics(dAtA, i, uint64(len(m.PositiveCount)*8)) + i-- + dAtA[i] = 0x72 + } + if len(m.PositiveDelta) > 0 { + var j3 int + dAtA5 := make([]byte, len(m.PositiveDelta)*10) + for _, num := range m.PositiveDelta { + x4 := (uint64(num) << 1) ^ uint64((num >> 63)) + for x4 >= 1<<7 { + dAtA5[j3] = uint8(uint64(x4)&0x7f | 0x80) + j3++ + x4 >>= 7 + } + dAtA5[j3] = uint8(x4) + j3++ + } + i -= j3 + copy(dAtA[i:], dAtA5[:j3]) + i = encodeVarintMetrics(dAtA, i, uint64(j3)) + i-- + dAtA[i] = 0x6a + } + if len(m.PositiveSpan) > 0 { + for iNdEx := len(m.PositiveSpan) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PositiveSpan[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + } + if len(m.NegativeCount) > 0 { + for iNdEx := len(m.NegativeCount) - 1; iNdEx >= 0; iNdEx-- { + f6 := math.Float64bits(float64(m.NegativeCount[iNdEx])) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f6)) + } + i = encodeVarintMetrics(dAtA, i, uint64(len(m.NegativeCount)*8)) + i-- + dAtA[i] = 0x5a + } + if len(m.NegativeDelta) > 0 { + var j7 int + dAtA9 := make([]byte, len(m.NegativeDelta)*10) + for _, num := range m.NegativeDelta { + x8 := (uint64(num) << 1) ^ uint64((num >> 63)) + for x8 >= 1<<7 { + dAtA9[j7] = uint8(uint64(x8)&0x7f | 0x80) + j7++ + x8 >>= 7 + } + dAtA9[j7] = uint8(x8) + j7++ + } + i -= j7 + copy(dAtA[i:], dAtA9[:j7]) + i = encodeVarintMetrics(dAtA, i, uint64(j7)) + i-- + dAtA[i] = 0x52 + } + if len(m.NegativeSpan) > 0 { + for iNdEx := len(m.NegativeSpan) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.NegativeSpan[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + } + if m.ZeroCountFloat != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroCountFloat)))) + i-- + dAtA[i] = 0x41 + } + if m.ZeroCount != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.ZeroCount)) + i-- + dAtA[i] = 0x38 + } + if m.ZeroThreshold != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroThreshold)))) + i-- + dAtA[i] = 0x31 + } + if m.Schema != 0 { + i = encodeVarintMetrics(dAtA, i, uint64((uint32(m.Schema)<<1)^uint32((m.Schema>>31)))) + i-- + dAtA[i] = 0x28 + } + if m.SampleCountFloat != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.SampleCountFloat)))) + i-- + dAtA[i] = 0x21 + } + if len(m.Bucket) > 0 { + for iNdEx := len(m.Bucket) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Bucket[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.SampleSum != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.SampleSum)))) + i-- + dAtA[i] = 0x11 + } + if m.SampleCount != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.SampleCount)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Bucket) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Bucket) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Bucket) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.CumulativeCountFloat != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.CumulativeCountFloat)))) + i-- + dAtA[i] = 0x21 + } + if m.Exemplar != nil { + { + size, err := m.Exemplar.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.UpperBound != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.UpperBound)))) + i-- + dAtA[i] = 0x11 + } + if m.CumulativeCount != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.CumulativeCount)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *BucketSpan) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BucketSpan) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BucketSpan) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Length != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Length)) + i-- + dAtA[i] = 0x10 + } + if m.Offset != 0 { + i = encodeVarintMetrics(dAtA, i, uint64((uint32(m.Offset)<<1)^uint32((m.Offset>>31)))) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Exemplar) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Exemplar) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Exemplar) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Timestamp != nil { + { + size, err := m.Timestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x11 + } + if len(m.Label) > 0 { + for iNdEx := len(m.Label) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Label[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Metric) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Metric) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Metric) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Histogram != nil { + { + size, err := m.Histogram.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.TimestampMs != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TimestampMs)) + i-- + dAtA[i] = 0x30 + } + if m.Untyped != nil { + { + size, err := m.Untyped.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Summary != nil { + { + size, err := m.Summary.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Counter != nil { + { + size, err := m.Counter.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Gauge != nil { + { + size, err := m.Gauge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Label) > 0 { + for iNdEx := len(m.Label) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Label[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *MetricFamily) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricFamily) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricFamily) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Metric) > 0 { + for iNdEx := len(m.Metric) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Metric[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.Type != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x18 + } + if len(m.Help) > 0 { + i -= len(m.Help) + copy(dAtA[i:], m.Help) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Help))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int { + offset -= sovMetrics(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *LabelPair) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Gauge) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 9 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Counter) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 9 + } + if m.Exemplar != nil { + l = m.Exemplar.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Quantile) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Quantile != 0 { + n += 9 + } + if m.Value != 0 { + n += 9 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Summary) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SampleCount != 0 { + n += 1 + sovMetrics(uint64(m.SampleCount)) + } + if m.SampleSum != 0 { + n += 9 + } + if len(m.Quantile) > 0 { + for _, e := range m.Quantile { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Untyped) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 9 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Histogram) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SampleCount != 0 { + n += 1 + sovMetrics(uint64(m.SampleCount)) + } + if m.SampleSum != 0 { + n += 9 + } + if len(m.Bucket) > 0 { + for _, e := range m.Bucket { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.SampleCountFloat != 0 { + n += 9 + } + if m.Schema != 0 { + n += 1 + sozMetrics(uint64(m.Schema)) + } + if m.ZeroThreshold != 0 { + n += 9 + } + if m.ZeroCount != 0 { + n += 1 + sovMetrics(uint64(m.ZeroCount)) + } + if m.ZeroCountFloat != 0 { + n += 9 + } + if len(m.NegativeSpan) > 0 { + for _, e := range m.NegativeSpan { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.NegativeDelta) > 0 { + l = 0 + for _, e := range m.NegativeDelta { + l += sozMetrics(uint64(e)) + } + n += 1 + sovMetrics(uint64(l)) + l + } + if len(m.NegativeCount) > 0 { + n += 1 + sovMetrics(uint64(len(m.NegativeCount)*8)) + len(m.NegativeCount)*8 + } + if len(m.PositiveSpan) > 0 { + for _, e := range m.PositiveSpan { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.PositiveDelta) > 0 { + l = 0 + for _, e := range m.PositiveDelta { + l += sozMetrics(uint64(e)) + } + n += 1 + sovMetrics(uint64(l)) + l + } + if len(m.PositiveCount) > 0 { + n += 1 + sovMetrics(uint64(len(m.PositiveCount)*8)) + len(m.PositiveCount)*8 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Bucket) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CumulativeCount != 0 { + n += 1 + sovMetrics(uint64(m.CumulativeCount)) + } + if m.UpperBound != 0 { + n += 9 + } + if m.Exemplar != nil { + l = m.Exemplar.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.CumulativeCountFloat != 0 { + n += 9 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BucketSpan) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Offset != 0 { + n += 1 + sozMetrics(uint64(m.Offset)) + } + if m.Length != 0 { + n += 1 + sovMetrics(uint64(m.Length)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Exemplar) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Label) > 0 { + for _, e := range m.Label { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.Value != 0 { + n += 9 + } + if m.Timestamp != nil { + l = m.Timestamp.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Metric) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Label) > 0 { + for _, e := range m.Label { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.Gauge != nil { + l = m.Gauge.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.Counter != nil { + l = m.Counter.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.Summary != nil { + l = m.Summary.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.Untyped != nil { + l = m.Untyped.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.TimestampMs != 0 { + n += 1 + sovMetrics(uint64(m.TimestampMs)) + } + if m.Histogram != nil { + l = m.Histogram.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *MetricFamily) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + l = len(m.Help) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + if m.Type != 0 { + n += 1 + sovMetrics(uint64(m.Type)) + } + if len(m.Metric) > 0 { + for _, e := range m.Metric { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovMetrics(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozMetrics(x uint64) (n int) { + return sovMetrics(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *LabelPair) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelPair: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelPair: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Gauge) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Gauge: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Gauge: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Counter) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Counter: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Counter: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exemplar", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Exemplar == nil { + m.Exemplar = &Exemplar{} + } + if err := m.Exemplar.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Quantile) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Quantile: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Quantile: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Quantile", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Quantile = float64(math.Float64frombits(v)) + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Summary) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Summary: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Summary: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SampleCount", wireType) + } + m.SampleCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SampleCount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field SampleSum", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.SampleSum = float64(math.Float64frombits(v)) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Quantile", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Quantile = append(m.Quantile, Quantile{}) + if err := m.Quantile[len(m.Quantile)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Untyped) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Untyped: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Untyped: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Histogram) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Histogram: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Histogram: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SampleCount", wireType) + } + m.SampleCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SampleCount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field SampleSum", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.SampleSum = float64(math.Float64frombits(v)) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Bucket", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Bucket = append(m.Bucket, Bucket{}) + if err := m.Bucket[len(m.Bucket)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field SampleCountFloat", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.SampleCountFloat = float64(math.Float64frombits(v)) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31)) + m.Schema = v + case 6: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroThreshold", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.ZeroThreshold = float64(math.Float64frombits(v)) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroCount", wireType) + } + m.ZeroCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ZeroCount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroCountFloat", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.ZeroCountFloat = float64(math.Float64frombits(v)) + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeSpan", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NegativeSpan = append(m.NegativeSpan, BucketSpan{}) + if err := m.NegativeSpan[len(m.NegativeSpan)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.NegativeDelta = append(m.NegativeDelta, int64(v)) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.NegativeDelta) == 0 { + m.NegativeDelta = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.NegativeDelta = append(m.NegativeDelta, int64(v)) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeDelta", wireType) + } + case 11: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.NegativeCount = append(m.NegativeCount, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.NegativeCount) == 0 { + m.NegativeCount = make([]float64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.NegativeCount = append(m.NegativeCount, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeCount", wireType) + } + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveSpan", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PositiveSpan = append(m.PositiveSpan, BucketSpan{}) + if err := m.PositiveSpan[len(m.PositiveSpan)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.PositiveDelta = append(m.PositiveDelta, int64(v)) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.PositiveDelta) == 0 { + m.PositiveDelta = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.PositiveDelta = append(m.PositiveDelta, int64(v)) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveDelta", wireType) + } + case 14: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.PositiveCount = append(m.PositiveCount, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.PositiveCount) == 0 { + m.PositiveCount = make([]float64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.PositiveCount = append(m.PositiveCount, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveCount", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Bucket) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Bucket: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Bucket: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CumulativeCount", wireType) + } + m.CumulativeCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CumulativeCount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field UpperBound", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.UpperBound = float64(math.Float64frombits(v)) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exemplar", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Exemplar == nil { + m.Exemplar = &Exemplar{} + } + if err := m.Exemplar.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field CumulativeCountFloat", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.CumulativeCountFloat = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BucketSpan) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BucketSpan: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BucketSpan: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31)) + m.Offset = v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Length", wireType) + } + m.Length = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Length |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Exemplar) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Exemplar: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Exemplar: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Label", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Label = append(m.Label, LabelPair{}) + if err := m.Label[len(m.Label)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Timestamp == nil { + m.Timestamp = &types.Timestamp{} + } + if err := m.Timestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Metric) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Metric: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Metric: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Label", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Label = append(m.Label, LabelPair{}) + if err := m.Label[len(m.Label)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Gauge", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Gauge == nil { + m.Gauge = &Gauge{} + } + if err := m.Gauge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Counter", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Counter == nil { + m.Counter = &Counter{} + } + if err := m.Counter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Summary", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Summary == nil { + m.Summary = &Summary{} + } + if err := m.Summary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Untyped", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Untyped == nil { + m.Untyped = &Untyped{} + } + if err := m.Untyped.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimestampMs", wireType) + } + m.TimestampMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimestampMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Histogram", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Histogram == nil { + m.Histogram = &Histogram{} + } + if err := m.Histogram.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricFamily) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricFamily: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricFamily: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Help", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Help = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= MetricType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Metric = append(m.Metric, Metric{}) + if err := m.Metric[len(m.Metric)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipMetrics(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetrics + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetrics + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetrics + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthMetrics + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupMetrics + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthMetrics + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthMetrics = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMetrics = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupMetrics = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.proto b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.proto new file mode 100644 index 000000000..6bbea622f --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.proto @@ -0,0 +1,147 @@ +// Copyright 2013 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This is copied and lightly edited from +// github.com/prometheus/client_model/io/prometheus/client/metrics.proto +// and finally converted to proto3 syntax to make it usable for the +// gogo-protobuf approach taken within prometheus/prometheus. + +syntax = "proto3"; + +package io.prometheus.client; +option go_package = "io_prometheus_client"; + +import "gogoproto/gogo.proto"; +import "google/protobuf/timestamp.proto"; + +message LabelPair { + string name = 1; + string value = 2; +} + +enum MetricType { + // COUNTER must use the Metric field "counter". + COUNTER = 0; + // GAUGE must use the Metric field "gauge". + GAUGE = 1; + // SUMMARY must use the Metric field "summary". + SUMMARY = 2; + // UNTYPED must use the Metric field "untyped". + UNTYPED = 3; + // HISTOGRAM must use the Metric field "histogram". + HISTOGRAM = 4; + // GAUGE_HISTOGRAM must use the Metric field "histogram". + GAUGE_HISTOGRAM = 5; +} + +message Gauge { + double value = 1; +} + +message Counter { + double value = 1; + Exemplar exemplar = 2; +} + +message Quantile { + double quantile = 1; + double value = 2; +} + +message Summary { + uint64 sample_count = 1; + double sample_sum = 2; + repeated Quantile quantile = 3 [(gogoproto.nullable) = false]; +} + +message Untyped { + double value = 1; +} + +message Histogram { + uint64 sample_count = 1; + double sample_count_float = 4; // Overrides sample_count if > 0. + double sample_sum = 2; + // Buckets for the conventional histogram. + repeated Bucket bucket = 3 [(gogoproto.nullable) = false]; // Ordered in increasing order of upper_bound, +Inf bucket is optional. + + // Everything below here is for native histograms (also known as sparse histograms). + // Native histograms are an experimental feature without stability guarantees. + + // schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8. + // They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and + // then each power of two is divided into 2^n logarithmic buckets. + // Or in other words, each bucket boundary is the previous boundary times 2^(2^-n). + // In the future, more bucket schemas may be added using numbers < -4 or > 8. + sint32 schema = 5; + double zero_threshold = 6; // Breadth of the zero bucket. + uint64 zero_count = 7; // Count in zero bucket. + double zero_count_float = 8; // Overrides sb_zero_count if > 0. + + // Negative buckets for the native histogram. + repeated BucketSpan negative_span = 9 [(gogoproto.nullable) = false]; + // Use either "negative_delta" or "negative_count", the former for + // regular histograms with integer counts, the latter for float + // histograms. + repeated sint64 negative_delta = 10; // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + repeated double negative_count = 11; // Absolute count of each bucket. + + // Positive buckets for the native histogram. + repeated BucketSpan positive_span = 12 [(gogoproto.nullable) = false]; + // Use either "positive_delta" or "positive_count", the former for + // regular histograms with integer counts, the latter for float + // histograms. + repeated sint64 positive_delta = 13; // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + repeated double positive_count = 14; // Absolute count of each bucket. +} + +message Bucket { + uint64 cumulative_count = 1; // Cumulative in increasing order. + double cumulative_count_float = 4; // Overrides cumulative_count if > 0. + double upper_bound = 2; // Inclusive. + Exemplar exemplar = 3; +} + +// A BucketSpan defines a number of consecutive buckets in a native +// histogram with their offset. Logically, it would be more +// straightforward to include the bucket counts in the Span. However, +// the protobuf representation is more compact in the way the data is +// structured here (with all the buckets in a single array separate +// from the Spans). +message BucketSpan { + sint32 offset = 1; // Gap to previous span, or starting point for 1st span (which can be negative). + uint32 length = 2; // Length of consecutive buckets. +} + +message Exemplar { + repeated LabelPair label = 1 [(gogoproto.nullable) = false]; + double value = 2; + google.protobuf.Timestamp timestamp = 3; // OpenMetrics-style. +} + +message Metric { + repeated LabelPair label = 1 [(gogoproto.nullable) = false]; + Gauge gauge = 2; + Counter counter = 3; + Summary summary = 4; + Untyped untyped = 5; + Histogram histogram = 7; + int64 timestamp_ms = 6; +} + +message MetricFamily { + string name = 1; + string help = 2; + MetricType type = 3; + repeated Metric metric = 4 [(gogoproto.nullable) = false]; +} diff --git a/vendor/github.com/prometheus/prometheus/prompb/remote.pb.go b/vendor/github.com/prometheus/prometheus/prompb/remote.pb.go index b3cf44884..19318878d 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/remote.pb.go +++ b/vendor/github.com/prometheus/prometheus/prompb/remote.pb.go @@ -34,8 +34,10 @@ const ( // Content-Type: "application/x-protobuf" // Content-Encoding: "snappy" ReadRequest_SAMPLES ReadRequest_ResponseType = 0 - // Server will stream a delimited ChunkedReadResponse message that contains XOR encoded chunks for a single series. - // Each message is following varint size and fixed size bigendian uint32 for CRC32 Castagnoli checksum. + // Server will stream a delimited ChunkedReadResponse message that + // contains XOR or HISTOGRAM(!) encoded chunks for a single series. + // Each message is following varint size and fixed size bigendian + // uint32 for CRC32 Castagnoli checksum. // // Response headers: // Content-Type: "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse" diff --git a/vendor/github.com/prometheus/prometheus/prompb/remote.proto b/vendor/github.com/prometheus/prometheus/prompb/remote.proto index 70c6dd3fb..b4f82f5f9 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/remote.proto +++ b/vendor/github.com/prometheus/prometheus/prompb/remote.proto @@ -39,8 +39,10 @@ message ReadRequest { // Content-Type: "application/x-protobuf" // Content-Encoding: "snappy" SAMPLES = 0; - // Server will stream a delimited ChunkedReadResponse message that contains XOR encoded chunks for a single series. - // Each message is following varint size and fixed size bigendian uint32 for CRC32 Castagnoli checksum. + // Server will stream a delimited ChunkedReadResponse message that + // contains XOR or HISTOGRAM(!) encoded chunks for a single series. + // Each message is following varint size and fixed size bigendian + // uint32 for CRC32 Castagnoli checksum. // // Response headers: // Content-Type: "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse" diff --git a/vendor/github.com/prometheus/prometheus/prompb/types.pb.go b/vendor/github.com/prometheus/prometheus/prompb/types.pb.go index e91dd55c4..125f868e9 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/types.pb.go +++ b/vendor/github.com/prometheus/prometheus/prompb/types.pb.go @@ -68,6 +68,37 @@ func (MetricMetadata_MetricType) EnumDescriptor() ([]byte, []int) { return fileDescriptor_d938547f84707355, []int{0, 0} } +type Histogram_ResetHint int32 + +const ( + Histogram_UNKNOWN Histogram_ResetHint = 0 + Histogram_YES Histogram_ResetHint = 1 + Histogram_NO Histogram_ResetHint = 2 + Histogram_GAUGE Histogram_ResetHint = 3 +) + +var Histogram_ResetHint_name = map[int32]string{ + 0: "UNKNOWN", + 1: "YES", + 2: "NO", + 3: "GAUGE", +} + +var Histogram_ResetHint_value = map[string]int32{ + "UNKNOWN": 0, + "YES": 1, + "NO": 2, + "GAUGE": 3, +} + +func (x Histogram_ResetHint) String() string { + return proto.EnumName(Histogram_ResetHint_name, int32(x)) +} + +func (Histogram_ResetHint) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{3, 0} +} + type LabelMatcher_Type int32 const ( @@ -96,25 +127,31 @@ func (x LabelMatcher_Type) String() string { } func (LabelMatcher_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{6, 0} + return fileDescriptor_d938547f84707355, []int{8, 0} } // We require this to match chunkenc.Encoding. type Chunk_Encoding int32 const ( - Chunk_UNKNOWN Chunk_Encoding = 0 - Chunk_XOR Chunk_Encoding = 1 + Chunk_UNKNOWN Chunk_Encoding = 0 + Chunk_XOR Chunk_Encoding = 1 + Chunk_HISTOGRAM Chunk_Encoding = 2 + Chunk_FLOAT_HISTOGRAM Chunk_Encoding = 3 ) var Chunk_Encoding_name = map[int32]string{ 0: "UNKNOWN", 1: "XOR", + 2: "HISTOGRAM", + 3: "FLOAT_HISTOGRAM", } var Chunk_Encoding_value = map[string]int32{ - "UNKNOWN": 0, - "XOR": 1, + "UNKNOWN": 0, + "XOR": 1, + "HISTOGRAM": 2, + "FLOAT_HISTOGRAM": 3, } func (x Chunk_Encoding) String() string { @@ -122,7 +159,7 @@ func (x Chunk_Encoding) String() string { } func (Chunk_Encoding) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{8, 0} + return fileDescriptor_d938547f84707355, []int{10, 0} } type MetricMetadata struct { @@ -321,23 +358,324 @@ func (m *Exemplar) GetTimestamp() int64 { return 0 } +// A native histogram, also known as a sparse histogram. +// Original design doc: +// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit +// The appendix of this design doc also explains the concept of float +// histograms. This Histogram message can represent both, the usual +// integer histogram as well as a float histogram. +type Histogram struct { + // Types that are valid to be assigned to Count: + // + // *Histogram_CountInt + // *Histogram_CountFloat + Count isHistogram_Count `protobuf_oneof:"count"` + Sum float64 `protobuf:"fixed64,3,opt,name=sum,proto3" json:"sum,omitempty"` + // The schema defines the bucket schema. Currently, valid numbers + // are -4 <= n <= 8. They are all for base-2 bucket schemas, where 1 + // is a bucket boundary in each case, and then each power of two is + // divided into 2^n logarithmic buckets. Or in other words, each + // bucket boundary is the previous boundary times 2^(2^-n). In the + // future, more bucket schemas may be added using numbers < -4 or > + // 8. + Schema int32 `protobuf:"zigzag32,4,opt,name=schema,proto3" json:"schema,omitempty"` + ZeroThreshold float64 `protobuf:"fixed64,5,opt,name=zero_threshold,json=zeroThreshold,proto3" json:"zero_threshold,omitempty"` + // Types that are valid to be assigned to ZeroCount: + // + // *Histogram_ZeroCountInt + // *Histogram_ZeroCountFloat + ZeroCount isHistogram_ZeroCount `protobuf_oneof:"zero_count"` + // Negative Buckets. + NegativeSpans []BucketSpan `protobuf:"bytes,8,rep,name=negative_spans,json=negativeSpans,proto3" json:"negative_spans"` + // Use either "negative_deltas" or "negative_counts", the former for + // regular histograms with integer counts, the latter for float + // histograms. + NegativeDeltas []int64 `protobuf:"zigzag64,9,rep,packed,name=negative_deltas,json=negativeDeltas,proto3" json:"negative_deltas,omitempty"` + NegativeCounts []float64 `protobuf:"fixed64,10,rep,packed,name=negative_counts,json=negativeCounts,proto3" json:"negative_counts,omitempty"` + // Positive Buckets. + PositiveSpans []BucketSpan `protobuf:"bytes,11,rep,name=positive_spans,json=positiveSpans,proto3" json:"positive_spans"` + // Use either "positive_deltas" or "positive_counts", the former for + // regular histograms with integer counts, the latter for float + // histograms. + PositiveDeltas []int64 `protobuf:"zigzag64,12,rep,packed,name=positive_deltas,json=positiveDeltas,proto3" json:"positive_deltas,omitempty"` + PositiveCounts []float64 `protobuf:"fixed64,13,rep,packed,name=positive_counts,json=positiveCounts,proto3" json:"positive_counts,omitempty"` + ResetHint Histogram_ResetHint `protobuf:"varint,14,opt,name=reset_hint,json=resetHint,proto3,enum=prometheus.Histogram_ResetHint" json:"reset_hint,omitempty"` + // timestamp is in ms format, see model/timestamp/timestamp.go for + // conversion from time.Time to Prometheus timestamp. + Timestamp int64 `protobuf:"varint,15,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Histogram) Reset() { *m = Histogram{} } +func (m *Histogram) String() string { return proto.CompactTextString(m) } +func (*Histogram) ProtoMessage() {} +func (*Histogram) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{3} +} +func (m *Histogram) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Histogram) XXX_Merge(src proto.Message) { + xxx_messageInfo_Histogram.Merge(m, src) +} +func (m *Histogram) XXX_Size() int { + return m.Size() +} +func (m *Histogram) XXX_DiscardUnknown() { + xxx_messageInfo_Histogram.DiscardUnknown(m) +} + +var xxx_messageInfo_Histogram proto.InternalMessageInfo + +type isHistogram_Count interface { + isHistogram_Count() + MarshalTo([]byte) (int, error) + Size() int +} +type isHistogram_ZeroCount interface { + isHistogram_ZeroCount() + MarshalTo([]byte) (int, error) + Size() int +} + +type Histogram_CountInt struct { + CountInt uint64 `protobuf:"varint,1,opt,name=count_int,json=countInt,proto3,oneof" json:"count_int,omitempty"` +} +type Histogram_CountFloat struct { + CountFloat float64 `protobuf:"fixed64,2,opt,name=count_float,json=countFloat,proto3,oneof" json:"count_float,omitempty"` +} +type Histogram_ZeroCountInt struct { + ZeroCountInt uint64 `protobuf:"varint,6,opt,name=zero_count_int,json=zeroCountInt,proto3,oneof" json:"zero_count_int,omitempty"` +} +type Histogram_ZeroCountFloat struct { + ZeroCountFloat float64 `protobuf:"fixed64,7,opt,name=zero_count_float,json=zeroCountFloat,proto3,oneof" json:"zero_count_float,omitempty"` +} + +func (*Histogram_CountInt) isHistogram_Count() {} +func (*Histogram_CountFloat) isHistogram_Count() {} +func (*Histogram_ZeroCountInt) isHistogram_ZeroCount() {} +func (*Histogram_ZeroCountFloat) isHistogram_ZeroCount() {} + +func (m *Histogram) GetCount() isHistogram_Count { + if m != nil { + return m.Count + } + return nil +} +func (m *Histogram) GetZeroCount() isHistogram_ZeroCount { + if m != nil { + return m.ZeroCount + } + return nil +} + +func (m *Histogram) GetCountInt() uint64 { + if x, ok := m.GetCount().(*Histogram_CountInt); ok { + return x.CountInt + } + return 0 +} + +func (m *Histogram) GetCountFloat() float64 { + if x, ok := m.GetCount().(*Histogram_CountFloat); ok { + return x.CountFloat + } + return 0 +} + +func (m *Histogram) GetSum() float64 { + if m != nil { + return m.Sum + } + return 0 +} + +func (m *Histogram) GetSchema() int32 { + if m != nil { + return m.Schema + } + return 0 +} + +func (m *Histogram) GetZeroThreshold() float64 { + if m != nil { + return m.ZeroThreshold + } + return 0 +} + +func (m *Histogram) GetZeroCountInt() uint64 { + if x, ok := m.GetZeroCount().(*Histogram_ZeroCountInt); ok { + return x.ZeroCountInt + } + return 0 +} + +func (m *Histogram) GetZeroCountFloat() float64 { + if x, ok := m.GetZeroCount().(*Histogram_ZeroCountFloat); ok { + return x.ZeroCountFloat + } + return 0 +} + +func (m *Histogram) GetNegativeSpans() []BucketSpan { + if m != nil { + return m.NegativeSpans + } + return nil +} + +func (m *Histogram) GetNegativeDeltas() []int64 { + if m != nil { + return m.NegativeDeltas + } + return nil +} + +func (m *Histogram) GetNegativeCounts() []float64 { + if m != nil { + return m.NegativeCounts + } + return nil +} + +func (m *Histogram) GetPositiveSpans() []BucketSpan { + if m != nil { + return m.PositiveSpans + } + return nil +} + +func (m *Histogram) GetPositiveDeltas() []int64 { + if m != nil { + return m.PositiveDeltas + } + return nil +} + +func (m *Histogram) GetPositiveCounts() []float64 { + if m != nil { + return m.PositiveCounts + } + return nil +} + +func (m *Histogram) GetResetHint() Histogram_ResetHint { + if m != nil { + return m.ResetHint + } + return Histogram_UNKNOWN +} + +func (m *Histogram) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Histogram) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Histogram_CountInt)(nil), + (*Histogram_CountFloat)(nil), + (*Histogram_ZeroCountInt)(nil), + (*Histogram_ZeroCountFloat)(nil), + } +} + +// A BucketSpan defines a number of consecutive buckets with their +// offset. Logically, it would be more straightforward to include the +// bucket counts in the Span. However, the protobuf representation is +// more compact in the way the data is structured here (with all the +// buckets in a single array separate from the Spans). +type BucketSpan struct { + Offset int32 `protobuf:"zigzag32,1,opt,name=offset,proto3" json:"offset,omitempty"` + Length uint32 `protobuf:"varint,2,opt,name=length,proto3" json:"length,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BucketSpan) Reset() { *m = BucketSpan{} } +func (m *BucketSpan) String() string { return proto.CompactTextString(m) } +func (*BucketSpan) ProtoMessage() {} +func (*BucketSpan) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{4} +} +func (m *BucketSpan) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BucketSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BucketSpan.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BucketSpan) XXX_Merge(src proto.Message) { + xxx_messageInfo_BucketSpan.Merge(m, src) +} +func (m *BucketSpan) XXX_Size() int { + return m.Size() +} +func (m *BucketSpan) XXX_DiscardUnknown() { + xxx_messageInfo_BucketSpan.DiscardUnknown(m) +} + +var xxx_messageInfo_BucketSpan proto.InternalMessageInfo + +func (m *BucketSpan) GetOffset() int32 { + if m != nil { + return m.Offset + } + return 0 +} + +func (m *BucketSpan) GetLength() uint32 { + if m != nil { + return m.Length + } + return 0 +} + // TimeSeries represents samples and labels for a single time series. type TimeSeries struct { // For a timeseries to be valid, and for the samples and exemplars // to be ingested by the remote system properly, the labels field is required. - Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"` - Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"` - Exemplars []Exemplar `protobuf:"bytes,3,rep,name=exemplars,proto3" json:"exemplars"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"` + Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"` + Exemplars []Exemplar `protobuf:"bytes,3,rep,name=exemplars,proto3" json:"exemplars"` + Histograms []Histogram `protobuf:"bytes,4,rep,name=histograms,proto3" json:"histograms"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *TimeSeries) Reset() { *m = TimeSeries{} } func (m *TimeSeries) String() string { return proto.CompactTextString(m) } func (*TimeSeries) ProtoMessage() {} func (*TimeSeries) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{3} + return fileDescriptor_d938547f84707355, []int{5} } func (m *TimeSeries) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -387,6 +725,13 @@ func (m *TimeSeries) GetExemplars() []Exemplar { return nil } +func (m *TimeSeries) GetHistograms() []Histogram { + if m != nil { + return m.Histograms + } + return nil +} + type Label struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` @@ -399,7 +744,7 @@ func (m *Label) Reset() { *m = Label{} } func (m *Label) String() string { return proto.CompactTextString(m) } func (*Label) ProtoMessage() {} func (*Label) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{4} + return fileDescriptor_d938547f84707355, []int{6} } func (m *Label) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -453,7 +798,7 @@ func (m *Labels) Reset() { *m = Labels{} } func (m *Labels) String() string { return proto.CompactTextString(m) } func (*Labels) ProtoMessage() {} func (*Labels) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{5} + return fileDescriptor_d938547f84707355, []int{7} } func (m *Labels) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -503,7 +848,7 @@ func (m *LabelMatcher) Reset() { *m = LabelMatcher{} } func (m *LabelMatcher) String() string { return proto.CompactTextString(m) } func (*LabelMatcher) ProtoMessage() {} func (*LabelMatcher) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{6} + return fileDescriptor_d938547f84707355, []int{8} } func (m *LabelMatcher) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -570,7 +915,7 @@ func (m *ReadHints) Reset() { *m = ReadHints{} } func (m *ReadHints) String() string { return proto.CompactTextString(m) } func (*ReadHints) ProtoMessage() {} func (*ReadHints) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{7} + return fileDescriptor_d938547f84707355, []int{9} } func (m *ReadHints) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -664,7 +1009,7 @@ func (m *Chunk) Reset() { *m = Chunk{} } func (m *Chunk) String() string { return proto.CompactTextString(m) } func (*Chunk) ProtoMessage() {} func (*Chunk) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{8} + return fileDescriptor_d938547f84707355, []int{10} } func (m *Chunk) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -736,7 +1081,7 @@ func (m *ChunkedSeries) Reset() { *m = ChunkedSeries{} } func (m *ChunkedSeries) String() string { return proto.CompactTextString(m) } func (*ChunkedSeries) ProtoMessage() {} func (*ChunkedSeries) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{9} + return fileDescriptor_d938547f84707355, []int{11} } func (m *ChunkedSeries) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -781,11 +1126,14 @@ func (m *ChunkedSeries) GetChunks() []Chunk { func init() { proto.RegisterEnum("prometheus.MetricMetadata_MetricType", MetricMetadata_MetricType_name, MetricMetadata_MetricType_value) + proto.RegisterEnum("prometheus.Histogram_ResetHint", Histogram_ResetHint_name, Histogram_ResetHint_value) proto.RegisterEnum("prometheus.LabelMatcher_Type", LabelMatcher_Type_name, LabelMatcher_Type_value) proto.RegisterEnum("prometheus.Chunk_Encoding", Chunk_Encoding_name, Chunk_Encoding_value) proto.RegisterType((*MetricMetadata)(nil), "prometheus.MetricMetadata") proto.RegisterType((*Sample)(nil), "prometheus.Sample") proto.RegisterType((*Exemplar)(nil), "prometheus.Exemplar") + proto.RegisterType((*Histogram)(nil), "prometheus.Histogram") + proto.RegisterType((*BucketSpan)(nil), "prometheus.BucketSpan") proto.RegisterType((*TimeSeries)(nil), "prometheus.TimeSeries") proto.RegisterType((*Label)(nil), "prometheus.Label") proto.RegisterType((*Labels)(nil), "prometheus.Labels") @@ -798,53 +1146,76 @@ func init() { func init() { proto.RegisterFile("types.proto", fileDescriptor_d938547f84707355) } var fileDescriptor_d938547f84707355 = []byte{ - // 734 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0xcb, 0x6e, 0xdb, 0x46, - 0x14, 0xf5, 0xf0, 0x29, 0x5e, 0xd9, 0x02, 0x3d, 0x50, 0x51, 0xd6, 0x68, 0x55, 0x81, 0x40, 0x01, - 0x2d, 0x0a, 0x19, 0x76, 0x37, 0x35, 0xd0, 0x8d, 0x6c, 0xd0, 0x0f, 0xd4, 0x94, 0xe0, 0x91, 0x84, - 0x3e, 0x36, 0xc2, 0x48, 0x1a, 0x4b, 0x44, 0xc4, 0x47, 0x38, 0x54, 0x60, 0x7d, 0x48, 0x76, 0xf9, - 0x83, 0x20, 0x8b, 0xfc, 0x85, 0x97, 0xf9, 0x82, 0x20, 0xf0, 0x97, 0x04, 0x33, 0xa4, 0x4c, 0x29, - 0x4e, 0x16, 0xce, 0xee, 0xde, 0x7b, 0xce, 0xb9, 0x8f, 0xb9, 0x97, 0x84, 0x6a, 0xb6, 0x4a, 0x18, - 0x6f, 0x27, 0x69, 0x9c, 0xc5, 0x18, 0x92, 0x34, 0x0e, 0x59, 0x36, 0x67, 0x4b, 0x7e, 0x50, 0x9f, - 0xc5, 0xb3, 0x58, 0x86, 0x0f, 0x85, 0x95, 0x33, 0xdc, 0x37, 0x0a, 0xd4, 0x7c, 0x96, 0xa5, 0xc1, - 0xc4, 0x67, 0x19, 0x9d, 0xd2, 0x8c, 0xe2, 0x13, 0xd0, 0x44, 0x0e, 0x07, 0x35, 0x51, 0xab, 0x76, - 0xfc, 0x5b, 0xbb, 0xcc, 0xd1, 0xde, 0x66, 0x16, 0xee, 0x60, 0x95, 0x30, 0x22, 0x25, 0xf8, 0x77, - 0xc0, 0xa1, 0x8c, 0x8d, 0x6e, 0x69, 0x18, 0x2c, 0x56, 0xa3, 0x88, 0x86, 0xcc, 0x51, 0x9a, 0xa8, - 0x65, 0x11, 0x3b, 0x47, 0xce, 0x25, 0xd0, 0xa5, 0x21, 0xc3, 0x18, 0xb4, 0x39, 0x5b, 0x24, 0x8e, - 0x26, 0x71, 0x69, 0x8b, 0xd8, 0x32, 0x0a, 0x32, 0x47, 0xcf, 0x63, 0xc2, 0x76, 0x57, 0x00, 0x65, - 0x25, 0x5c, 0x05, 0x73, 0xd8, 0xfd, 0xbb, 0xdb, 0xfb, 0xa7, 0x6b, 0xef, 0x08, 0xe7, 0xac, 0x37, - 0xec, 0x0e, 0x3c, 0x62, 0x23, 0x6c, 0x81, 0x7e, 0xd1, 0x19, 0x5e, 0x78, 0xb6, 0x82, 0xf7, 0xc0, - 0xba, 0xbc, 0xea, 0x0f, 0x7a, 0x17, 0xa4, 0xe3, 0xdb, 0x2a, 0xc6, 0x50, 0x93, 0x48, 0x19, 0xd3, - 0x84, 0xb4, 0x3f, 0xf4, 0xfd, 0x0e, 0xf9, 0xcf, 0xd6, 0x71, 0x05, 0xb4, 0xab, 0xee, 0x79, 0xcf, - 0x36, 0xf0, 0x2e, 0x54, 0xfa, 0x83, 0xce, 0xc0, 0xeb, 0x7b, 0x03, 0xdb, 0x74, 0xff, 0x02, 0xa3, - 0x4f, 0xc3, 0x64, 0xc1, 0x70, 0x1d, 0xf4, 0x57, 0x74, 0xb1, 0xcc, 0x9f, 0x05, 0x91, 0xdc, 0xc1, - 0x3f, 0x83, 0x95, 0x05, 0x21, 0xe3, 0x19, 0x0d, 0x13, 0x39, 0xa7, 0x4a, 0xca, 0x80, 0x1b, 0x43, - 0xc5, 0xbb, 0x63, 0x61, 0xb2, 0xa0, 0x29, 0x3e, 0x04, 0x63, 0x41, 0xc7, 0x6c, 0xc1, 0x1d, 0xd4, - 0x54, 0x5b, 0xd5, 0xe3, 0xfd, 0xcd, 0x77, 0xbd, 0x16, 0xc8, 0xa9, 0x76, 0xff, 0xf1, 0xd7, 0x1d, - 0x52, 0xd0, 0xca, 0x82, 0xca, 0x37, 0x0b, 0xaa, 0x5f, 0x16, 0x7c, 0x8b, 0x00, 0x06, 0x41, 0xc8, - 0xfa, 0x2c, 0x0d, 0x18, 0x7f, 0x7e, 0xcd, 0x63, 0x30, 0xb9, 0x1c, 0x97, 0x3b, 0x8a, 0x54, 0xe0, - 0x4d, 0x45, 0xfe, 0x12, 0x85, 0x64, 0x4d, 0xc4, 0x7f, 0x82, 0xc5, 0x8a, 0x21, 0xb9, 0xa3, 0x4a, - 0x55, 0x7d, 0x53, 0xb5, 0x7e, 0x81, 0x42, 0x57, 0x92, 0xdd, 0x23, 0xd0, 0x65, 0x13, 0x62, 0xe9, - 0xf2, 0x50, 0x50, 0xbe, 0x74, 0x61, 0x6f, 0x8f, 0x6f, 0x15, 0xe3, 0xbb, 0x27, 0x60, 0x5c, 0xe7, - 0xad, 0x3e, 0x77, 0x36, 0xf7, 0x35, 0x82, 0x5d, 0x19, 0xf7, 0x69, 0x36, 0x99, 0xb3, 0x14, 0x1f, - 0x6d, 0xdd, 0xf9, 0x2f, 0x4f, 0xf4, 0x05, 0xaf, 0xbd, 0x71, 0xdf, 0xeb, 0x46, 0x95, 0xaf, 0x35, - 0xaa, 0x6e, 0x36, 0xda, 0x02, 0x4d, 0x5e, 0xab, 0x01, 0x8a, 0x77, 0x63, 0xef, 0x60, 0x13, 0xd4, - 0xae, 0x77, 0x63, 0x23, 0x11, 0x20, 0xe2, 0x42, 0x45, 0x80, 0x78, 0xb6, 0xea, 0xbe, 0x47, 0x60, - 0x11, 0x46, 0xa7, 0x97, 0x41, 0x94, 0x71, 0xfc, 0x23, 0x98, 0x3c, 0x63, 0xc9, 0x28, 0xe4, 0xb2, - 0x2f, 0x95, 0x18, 0xc2, 0xf5, 0xb9, 0x28, 0x7d, 0xbb, 0x8c, 0x26, 0xeb, 0xd2, 0xc2, 0xc6, 0x3f, - 0x41, 0x85, 0x67, 0x34, 0xcd, 0x04, 0x3b, 0xbf, 0x05, 0x53, 0xfa, 0x3e, 0xc7, 0x3f, 0x80, 0xc1, - 0xa2, 0xa9, 0x00, 0x34, 0x09, 0xe8, 0x2c, 0x9a, 0xfa, 0x1c, 0x1f, 0x40, 0x65, 0x96, 0xc6, 0xcb, - 0x24, 0x88, 0x66, 0x8e, 0xde, 0x54, 0x5b, 0x16, 0x79, 0xf4, 0x71, 0x0d, 0x94, 0xf1, 0xca, 0x31, - 0x9a, 0xa8, 0x55, 0x21, 0xca, 0x78, 0x25, 0xb2, 0xa7, 0x34, 0x9a, 0x31, 0x91, 0xc4, 0xcc, 0xb3, - 0x4b, 0xdf, 0xe7, 0xee, 0x3b, 0x04, 0xfa, 0xd9, 0x7c, 0x19, 0xbd, 0xc0, 0x0d, 0xa8, 0x86, 0x41, - 0x34, 0x12, 0x27, 0x58, 0xf6, 0x6c, 0x85, 0x41, 0x24, 0xce, 0xd0, 0xe7, 0x12, 0xa7, 0x77, 0x8f, - 0x78, 0xf1, 0x89, 0x84, 0xf4, 0xae, 0xc0, 0xdb, 0xc5, 0x12, 0x54, 0xb9, 0x84, 0x83, 0xcd, 0x25, - 0xc8, 0x02, 0x6d, 0x2f, 0x9a, 0xc4, 0xd3, 0x20, 0x9a, 0x95, 0x1b, 0x10, 0xbf, 0x1e, 0x39, 0xd5, - 0x2e, 0x91, 0xb6, 0xdb, 0x84, 0xca, 0x9a, 0xb5, 0xfd, 0x77, 0x30, 0x41, 0xfd, 0xb7, 0x47, 0x6c, - 0xe4, 0xbe, 0x84, 0x3d, 0x99, 0x8d, 0x4d, 0xbf, 0xf7, 0xcb, 0x38, 0x04, 0x63, 0x22, 0x32, 0xac, - 0x3f, 0x8c, 0xfd, 0x27, 0x9d, 0xae, 0x05, 0x39, 0xed, 0xb4, 0x7e, 0xff, 0xd0, 0x40, 0x1f, 0x1e, - 0x1a, 0xe8, 0xd3, 0x43, 0x03, 0xfd, 0x6f, 0x08, 0x76, 0x32, 0x1e, 0x1b, 0xf2, 0xaf, 0xfb, 0xc7, - 0xe7, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa3, 0x6c, 0x23, 0xa6, 0x05, 0x00, 0x00, + // 1092 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xdb, 0x6e, 0xdb, 0x46, + 0x13, 0x36, 0x49, 0x89, 0x12, 0x47, 0x87, 0xd0, 0xfb, 0x3b, 0xf9, 0x59, 0xa3, 0x71, 0x54, 0x02, + 0x69, 0x85, 0xa2, 0x90, 0x11, 0xb7, 0x17, 0x0d, 0x1a, 0x14, 0xb0, 0x1d, 0xf9, 0x80, 0x5a, 0x12, + 0xb2, 0x92, 0xd1, 0xa6, 0x37, 0xc2, 0x5a, 0x5a, 0x4b, 0x44, 0xc4, 0x43, 0xb9, 0xab, 0xc0, 0xea, + 0x7b, 0xf4, 0xae, 0x2f, 0xd1, 0xb7, 0x08, 0xd0, 0x9b, 0xf6, 0x05, 0x8a, 0xc2, 0x57, 0x7d, 0x8c, + 0x62, 0x87, 0xa4, 0x48, 0xc5, 0x29, 0xd0, 0xf4, 0x6e, 0xe7, 0x9b, 0x6f, 0x76, 0x3e, 0xee, 0xce, + 0xcc, 0x12, 0x6a, 0x72, 0x15, 0x71, 0xd1, 0x89, 0xe2, 0x50, 0x86, 0x04, 0xa2, 0x38, 0xf4, 0xb9, + 0x9c, 0xf3, 0xa5, 0xd8, 0xdd, 0x99, 0x85, 0xb3, 0x10, 0xe1, 0x7d, 0xb5, 0x4a, 0x18, 0xee, 0xcf, + 0x3a, 0x34, 0x7b, 0x5c, 0xc6, 0xde, 0xa4, 0xc7, 0x25, 0x9b, 0x32, 0xc9, 0xc8, 0x53, 0x28, 0xa9, + 0x3d, 0x1c, 0xad, 0xa5, 0xb5, 0x9b, 0x07, 0x8f, 0x3b, 0xf9, 0x1e, 0x9d, 0x4d, 0x66, 0x6a, 0x8e, + 0x56, 0x11, 0xa7, 0x18, 0x42, 0x3e, 0x03, 0xe2, 0x23, 0x36, 0xbe, 0x66, 0xbe, 0xb7, 0x58, 0x8d, + 0x03, 0xe6, 0x73, 0x47, 0x6f, 0x69, 0x6d, 0x8b, 0xda, 0x89, 0xe7, 0x04, 0x1d, 0x7d, 0xe6, 0x73, + 0x42, 0xa0, 0x34, 0xe7, 0x8b, 0xc8, 0x29, 0xa1, 0x1f, 0xd7, 0x0a, 0x5b, 0x06, 0x9e, 0x74, 0xca, + 0x09, 0xa6, 0xd6, 0xee, 0x0a, 0x20, 0xcf, 0x44, 0x6a, 0x50, 0xb9, 0xec, 0x7f, 0xd3, 0x1f, 0x7c, + 0xdb, 0xb7, 0xb7, 0x94, 0x71, 0x3c, 0xb8, 0xec, 0x8f, 0xba, 0xd4, 0xd6, 0x88, 0x05, 0xe5, 0xd3, + 0xc3, 0xcb, 0xd3, 0xae, 0xad, 0x93, 0x06, 0x58, 0x67, 0xe7, 0xc3, 0xd1, 0xe0, 0x94, 0x1e, 0xf6, + 0x6c, 0x83, 0x10, 0x68, 0xa2, 0x27, 0xc7, 0x4a, 0x2a, 0x74, 0x78, 0xd9, 0xeb, 0x1d, 0xd2, 0x97, + 0x76, 0x99, 0x54, 0xa1, 0x74, 0xde, 0x3f, 0x19, 0xd8, 0x26, 0xa9, 0x43, 0x75, 0x38, 0x3a, 0x1c, + 0x75, 0x87, 0xdd, 0x91, 0x5d, 0x71, 0x9f, 0x81, 0x39, 0x64, 0x7e, 0xb4, 0xe0, 0x64, 0x07, 0xca, + 0xaf, 0xd9, 0x62, 0x99, 0x1c, 0x8b, 0x46, 0x13, 0x83, 0x7c, 0x08, 0x96, 0xf4, 0x7c, 0x2e, 0x24, + 0xf3, 0x23, 0xfc, 0x4e, 0x83, 0xe6, 0x80, 0x1b, 0x42, 0xb5, 0x7b, 0xc3, 0xfd, 0x68, 0xc1, 0x62, + 0xb2, 0x0f, 0xe6, 0x82, 0x5d, 0xf1, 0x85, 0x70, 0xb4, 0x96, 0xd1, 0xae, 0x1d, 0x6c, 0x17, 0xcf, + 0xf5, 0x42, 0x79, 0x8e, 0x4a, 0x6f, 0xfe, 0x78, 0xb4, 0x45, 0x53, 0x5a, 0x9e, 0x50, 0xff, 0xc7, + 0x84, 0xc6, 0xdb, 0x09, 0x7f, 0x2d, 0x83, 0x75, 0xe6, 0x09, 0x19, 0xce, 0x62, 0xe6, 0x93, 0x87, + 0x60, 0x4d, 0xc2, 0x65, 0x20, 0xc7, 0x5e, 0x20, 0x51, 0x76, 0xe9, 0x6c, 0x8b, 0x56, 0x11, 0x3a, + 0x0f, 0x24, 0xf9, 0x08, 0x6a, 0x89, 0xfb, 0x7a, 0x11, 0x32, 0x99, 0xa4, 0x39, 0xdb, 0xa2, 0x80, + 0xe0, 0x89, 0xc2, 0x88, 0x0d, 0x86, 0x58, 0xfa, 0x98, 0x47, 0xa3, 0x6a, 0x49, 0x1e, 0x80, 0x29, + 0x26, 0x73, 0xee, 0x33, 0xbc, 0xb5, 0x6d, 0x9a, 0x5a, 0xe4, 0x31, 0x34, 0x7f, 0xe4, 0x71, 0x38, + 0x96, 0xf3, 0x98, 0x8b, 0x79, 0xb8, 0x98, 0xe2, 0x0d, 0x6a, 0xb4, 0xa1, 0xd0, 0x51, 0x06, 0x92, + 0x8f, 0x53, 0x5a, 0xae, 0xcb, 0x44, 0x5d, 0x1a, 0xad, 0x2b, 0xfc, 0x38, 0xd3, 0xf6, 0x29, 0xd8, + 0x05, 0x5e, 0x22, 0xb0, 0x82, 0x02, 0x35, 0xda, 0x5c, 0x33, 0x13, 0x91, 0xc7, 0xd0, 0x0c, 0xf8, + 0x8c, 0x49, 0xef, 0x35, 0x1f, 0x8b, 0x88, 0x05, 0xc2, 0xa9, 0xe2, 0x09, 0x3f, 0x28, 0x9e, 0xf0, + 0xd1, 0x72, 0xf2, 0x8a, 0xcb, 0x61, 0xc4, 0x82, 0xf4, 0x98, 0x1b, 0x59, 0x8c, 0xc2, 0x04, 0xf9, + 0x04, 0xee, 0xad, 0x37, 0x99, 0xf2, 0x85, 0x64, 0xc2, 0xb1, 0x5a, 0x46, 0x9b, 0xd0, 0xf5, 0xde, + 0xcf, 0x11, 0xdd, 0x20, 0xa2, 0x3a, 0xe1, 0x40, 0xcb, 0x68, 0x6b, 0x39, 0x11, 0xa5, 0x09, 0x25, + 0x2b, 0x0a, 0x85, 0x57, 0x90, 0x55, 0xfb, 0x37, 0xb2, 0xb2, 0x98, 0xb5, 0xac, 0xf5, 0x26, 0xa9, + 0xac, 0x7a, 0x22, 0x2b, 0x83, 0x73, 0x59, 0x6b, 0x62, 0x2a, 0xab, 0x91, 0xc8, 0xca, 0xe0, 0x54, + 0xd6, 0xd7, 0x00, 0x31, 0x17, 0x5c, 0x8e, 0xe7, 0xea, 0xf4, 0x9b, 0xd8, 0xe3, 0x8f, 0x8a, 0x92, + 0xd6, 0xf5, 0xd3, 0xa1, 0x8a, 0x77, 0xe6, 0x05, 0x92, 0x5a, 0x71, 0xb6, 0xdc, 0x2c, 0xc0, 0x7b, + 0x6f, 0x17, 0xe0, 0x17, 0x60, 0xad, 0xa3, 0x36, 0x3b, 0xb5, 0x02, 0xc6, 0xcb, 0xee, 0xd0, 0xd6, + 0x88, 0x09, 0x7a, 0x7f, 0x60, 0xeb, 0x79, 0xb7, 0x1a, 0x47, 0x15, 0x28, 0xa3, 0xe6, 0xa3, 0x3a, + 0x40, 0x7e, 0xed, 0xee, 0x33, 0x80, 0xfc, 0x7c, 0x54, 0xe5, 0x85, 0xd7, 0xd7, 0x82, 0x27, 0xa5, + 0xbc, 0x4d, 0x53, 0x4b, 0xe1, 0x0b, 0x1e, 0xcc, 0xe4, 0x1c, 0x2b, 0xb8, 0x41, 0x53, 0xcb, 0xfd, + 0x4b, 0x03, 0x18, 0x79, 0x3e, 0x1f, 0xf2, 0xd8, 0xe3, 0xe2, 0xfd, 0xfb, 0xef, 0x00, 0x2a, 0x02, + 0x5b, 0x5f, 0x38, 0x3a, 0x46, 0x90, 0x62, 0x44, 0x32, 0x15, 0xd2, 0x90, 0x8c, 0x48, 0xbe, 0x04, + 0x8b, 0xa7, 0x0d, 0x2f, 0x1c, 0x03, 0xa3, 0x76, 0x8a, 0x51, 0xd9, 0x34, 0x48, 0xe3, 0x72, 0x32, + 0xf9, 0x0a, 0x60, 0x9e, 0x1d, 0xbc, 0x70, 0x4a, 0x18, 0x7a, 0xff, 0x9d, 0xd7, 0x92, 0xc6, 0x16, + 0xe8, 0xee, 0x13, 0x28, 0xe3, 0x17, 0xa8, 0xe9, 0x89, 0x13, 0x57, 0x4b, 0xa6, 0xa7, 0x5a, 0x6f, + 0xce, 0x11, 0x2b, 0x9d, 0x23, 0xee, 0x53, 0x30, 0x2f, 0x92, 0xef, 0x7c, 0xdf, 0x83, 0x71, 0x7f, + 0xd2, 0xa0, 0x8e, 0x78, 0x8f, 0xc9, 0xc9, 0x9c, 0xc7, 0xe4, 0xc9, 0xc6, 0x83, 0xf1, 0xf0, 0x4e, + 0x7c, 0xca, 0xeb, 0x14, 0x1e, 0x8a, 0x4c, 0xa8, 0xfe, 0x2e, 0xa1, 0x46, 0x51, 0x68, 0x1b, 0x4a, + 0x38, 0xf6, 0x4d, 0xd0, 0xbb, 0x2f, 0x92, 0x3a, 0xea, 0x77, 0x5f, 0x24, 0x75, 0x44, 0xd5, 0xa8, + 0x57, 0x00, 0xed, 0xda, 0x86, 0xfb, 0x8b, 0xa6, 0x8a, 0x8f, 0x4d, 0x55, 0xed, 0x09, 0xf2, 0x7f, + 0xa8, 0x08, 0xc9, 0xa3, 0xb1, 0x2f, 0x50, 0x97, 0x41, 0x4d, 0x65, 0xf6, 0x84, 0x4a, 0x7d, 0xbd, + 0x0c, 0x26, 0x59, 0x6a, 0xb5, 0x26, 0x1f, 0x40, 0x55, 0x48, 0x16, 0x4b, 0xc5, 0x4e, 0x86, 0x6a, + 0x05, 0xed, 0x9e, 0x20, 0xf7, 0xc1, 0xe4, 0xc1, 0x74, 0x8c, 0x97, 0xa2, 0x1c, 0x65, 0x1e, 0x4c, + 0x7b, 0x82, 0xec, 0x42, 0x75, 0x16, 0x87, 0xcb, 0xc8, 0x0b, 0x66, 0x4e, 0xb9, 0x65, 0xb4, 0x2d, + 0xba, 0xb6, 0x49, 0x13, 0xf4, 0xab, 0x15, 0x0e, 0xb6, 0x2a, 0xd5, 0xaf, 0x56, 0x6a, 0xf7, 0x98, + 0x05, 0x33, 0xae, 0x36, 0xa9, 0x24, 0xbb, 0xa3, 0xdd, 0x13, 0xee, 0xef, 0x1a, 0x94, 0x8f, 0xe7, + 0xcb, 0xe0, 0x15, 0xd9, 0x83, 0x9a, 0xef, 0x05, 0x63, 0xd5, 0x4a, 0xb9, 0x66, 0xcb, 0xf7, 0x02, + 0x55, 0xc3, 0x3d, 0x81, 0x7e, 0x76, 0xb3, 0xf6, 0xa7, 0x6f, 0x8d, 0xcf, 0x6e, 0x52, 0x7f, 0x27, + 0xbd, 0x04, 0x03, 0x2f, 0x61, 0xb7, 0x78, 0x09, 0x98, 0xa0, 0xd3, 0x0d, 0x26, 0xe1, 0xd4, 0x0b, + 0x66, 0xf9, 0x0d, 0xa8, 0x37, 0x1c, 0xbf, 0xaa, 0x4e, 0x71, 0xed, 0x3e, 0x87, 0x6a, 0xc6, 0xba, + 0xd3, 0xbc, 0xdf, 0x0d, 0xd4, 0x13, 0xbb, 0xf1, 0xae, 0xea, 0xe4, 0x7f, 0x70, 0xef, 0xe4, 0x62, + 0x70, 0x38, 0x1a, 0x17, 0x1e, 0x5b, 0xf7, 0x07, 0x68, 0x60, 0x46, 0x3e, 0xfd, 0xaf, 0xad, 0xb7, + 0x0f, 0xe6, 0x44, 0xed, 0x90, 0x75, 0xde, 0xf6, 0x9d, 0xaf, 0xc9, 0x02, 0x12, 0xda, 0xd1, 0xce, + 0x9b, 0xdb, 0x3d, 0xed, 0xb7, 0xdb, 0x3d, 0xed, 0xcf, 0xdb, 0x3d, 0xed, 0x7b, 0x53, 0xb1, 0xa3, + 0xab, 0x2b, 0x13, 0x7f, 0x71, 0x3e, 0xff, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xfb, 0x5f, 0xf2, 0x4d, + 0x13, 0x09, 0x00, 0x00, } func (m *MetricMetadata) Marshal() (dAtA []byte, err error) { @@ -990,7 +1361,7 @@ func (m *Exemplar) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *TimeSeries) Marshal() (dAtA []byte, err error) { +func (m *Histogram) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1000,12 +1371,12 @@ func (m *TimeSeries) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *TimeSeries) MarshalTo(dAtA []byte) (int, error) { +func (m *Histogram) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *TimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -1014,10 +1385,49 @@ func (m *TimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - if len(m.Exemplars) > 0 { - for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- { + if m.Timestamp != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x78 + } + if m.ResetHint != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.ResetHint)) + i-- + dAtA[i] = 0x70 + } + if len(m.PositiveCounts) > 0 { + for iNdEx := len(m.PositiveCounts) - 1; iNdEx >= 0; iNdEx-- { + f1 := math.Float64bits(float64(m.PositiveCounts[iNdEx])) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f1)) + } + i = encodeVarintTypes(dAtA, i, uint64(len(m.PositiveCounts)*8)) + i-- + dAtA[i] = 0x6a + } + if len(m.PositiveDeltas) > 0 { + var j2 int + dAtA4 := make([]byte, len(m.PositiveDeltas)*10) + for _, num := range m.PositiveDeltas { + x3 := (uint64(num) << 1) ^ uint64((num >> 63)) + for x3 >= 1<<7 { + dAtA4[j2] = uint8(uint64(x3)&0x7f | 0x80) + j2++ + x3 >>= 7 + } + dAtA4[j2] = uint8(x3) + j2++ + } + i -= j2 + copy(dAtA[i:], dAtA4[:j2]) + i = encodeVarintTypes(dAtA, i, uint64(j2)) + i-- + dAtA[i] = 0x62 + } + if len(m.PositiveSpans) > 0 { + for iNdEx := len(m.PositiveSpans) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.PositiveSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1025,13 +1435,42 @@ func (m *TimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1a + dAtA[i] = 0x5a } } - if len(m.Samples) > 0 { - for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- { + if len(m.NegativeCounts) > 0 { + for iNdEx := len(m.NegativeCounts) - 1; iNdEx >= 0; iNdEx-- { + f5 := math.Float64bits(float64(m.NegativeCounts[iNdEx])) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f5)) + } + i = encodeVarintTypes(dAtA, i, uint64(len(m.NegativeCounts)*8)) + i-- + dAtA[i] = 0x52 + } + if len(m.NegativeDeltas) > 0 { + var j6 int + dAtA8 := make([]byte, len(m.NegativeDeltas)*10) + for _, num := range m.NegativeDeltas { + x7 := (uint64(num) << 1) ^ uint64((num >> 63)) + for x7 >= 1<<7 { + dAtA8[j6] = uint8(uint64(x7)&0x7f | 0x80) + j6++ + x7 >>= 7 + } + dAtA8[j6] = uint8(x7) + j6++ + } + i -= j6 + copy(dAtA[i:], dAtA8[:j6]) + i = encodeVarintTypes(dAtA, i, uint64(j6)) + i-- + dAtA[i] = 0x4a + } + if len(m.NegativeSpans) > 0 { + for iNdEx := len(m.NegativeSpans) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.NegativeSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1039,48 +1478,239 @@ func (m *TimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 + dAtA[i] = 0x42 } } - if len(m.Labels) > 0 { - for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) + if m.ZeroCount != nil { + { + size := m.ZeroCount.Size() + i -= size + if _, err := m.ZeroCount.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + if m.ZeroThreshold != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroThreshold)))) + i-- + dAtA[i] = 0x29 + } + if m.Schema != 0 { + i = encodeVarintTypes(dAtA, i, uint64((uint32(m.Schema)<<1)^uint32((m.Schema>>31)))) + i-- + dAtA[i] = 0x20 + } + if m.Sum != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Sum)))) + i-- + dAtA[i] = 0x19 + } + if m.Count != nil { + { + size := m.Count.Size() + i -= size + if _, err := m.Count.MarshalTo(dAtA[i:]); err != nil { + return 0, err } - i-- - dAtA[i] = 0xa } } return len(dAtA) - i, nil } -func (m *Label) Marshal() (dAtA []byte, err error) { +func (m *Histogram_CountInt) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Label) MarshalTo(dAtA []byte) (int, error) { +func (m *Histogram_CountInt) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i = encodeVarintTypes(dAtA, i, uint64(m.CountInt)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} +func (m *Histogram_CountFloat) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Label) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Histogram_CountFloat) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.CountFloat)))) + i-- + dAtA[i] = 0x11 + return len(dAtA) - i, nil +} +func (m *Histogram_ZeroCountInt) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram_ZeroCountInt) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i = encodeVarintTypes(dAtA, i, uint64(m.ZeroCountInt)) + i-- + dAtA[i] = 0x30 + return len(dAtA) - i, nil +} +func (m *Histogram_ZeroCountFloat) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram_ZeroCountFloat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroCountFloat)))) + i-- + dAtA[i] = 0x39 + return len(dAtA) - i, nil +} +func (m *BucketSpan) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BucketSpan) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BucketSpan) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Length != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Length)) + i-- + dAtA[i] = 0x10 + } + if m.Offset != 0 { + i = encodeVarintTypes(dAtA, i, uint64((uint32(m.Offset)<<1)^uint32((m.Offset>>31)))) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *TimeSeries) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TimeSeries) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Histograms) > 0 { + for iNdEx := len(m.Histograms) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Histograms[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Exemplars) > 0 { + for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Samples) > 0 { + for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Label) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Label) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Label) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } if len(m.Value) > 0 { @@ -1444,6 +2074,125 @@ func (m *Exemplar) Size() (n int) { return n } +func (m *Histogram) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Count != nil { + n += m.Count.Size() + } + if m.Sum != 0 { + n += 9 + } + if m.Schema != 0 { + n += 1 + sozTypes(uint64(m.Schema)) + } + if m.ZeroThreshold != 0 { + n += 9 + } + if m.ZeroCount != nil { + n += m.ZeroCount.Size() + } + if len(m.NegativeSpans) > 0 { + for _, e := range m.NegativeSpans { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.NegativeDeltas) > 0 { + l = 0 + for _, e := range m.NegativeDeltas { + l += sozTypes(uint64(e)) + } + n += 1 + sovTypes(uint64(l)) + l + } + if len(m.NegativeCounts) > 0 { + n += 1 + sovTypes(uint64(len(m.NegativeCounts)*8)) + len(m.NegativeCounts)*8 + } + if len(m.PositiveSpans) > 0 { + for _, e := range m.PositiveSpans { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.PositiveDeltas) > 0 { + l = 0 + for _, e := range m.PositiveDeltas { + l += sozTypes(uint64(e)) + } + n += 1 + sovTypes(uint64(l)) + l + } + if len(m.PositiveCounts) > 0 { + n += 1 + sovTypes(uint64(len(m.PositiveCounts)*8)) + len(m.PositiveCounts)*8 + } + if m.ResetHint != 0 { + n += 1 + sovTypes(uint64(m.ResetHint)) + } + if m.Timestamp != 0 { + n += 1 + sovTypes(uint64(m.Timestamp)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Histogram_CountInt) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovTypes(uint64(m.CountInt)) + return n +} +func (m *Histogram_CountFloat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *Histogram_ZeroCountInt) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovTypes(uint64(m.ZeroCountInt)) + return n +} +func (m *Histogram_ZeroCountFloat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *BucketSpan) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Offset != 0 { + n += 1 + sozTypes(uint64(m.Offset)) + } + if m.Length != 0 { + n += 1 + sovTypes(uint64(m.Length)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *TimeSeries) Size() (n int) { if m == nil { return 0 @@ -1468,6 +2217,12 @@ func (m *TimeSeries) Size() (n int) { n += 1 + l + sovTypes(uint64(l)) } } + if len(m.Histograms) > 0 { + for _, e := range m.Histograms { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -1989,7 +2744,7 @@ func (m *Exemplar) Unmarshal(dAtA []byte) error { } return nil } -func (m *TimeSeries) Unmarshal(dAtA []byte) error { +func (m *Histogram) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2012,17 +2767,17 @@ func (m *TimeSeries) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: TimeSeries: wiretype end group for non-group") + return fmt.Errorf("proto: Histogram: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: TimeSeries: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Histogram: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CountInt", wireType) } - var msglen int + var v uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -2032,26 +2787,643 @@ func (m *TimeSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes + m.Count = &Histogram_CountInt{v} + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field CountFloat", wireType) } - if postIndex > l { + var v uint64 + if (iNdEx + 8) > l { return io.ErrUnexpectedEOF } - m.Labels = append(m.Labels, Label{}) - if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Count = &Histogram_CountFloat{float64(math.Float64frombits(v))} + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType) } - iNdEx = postIndex + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Sum = float64(math.Float64frombits(v)) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31)) + m.Schema = v + case 5: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroThreshold", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.ZeroThreshold = float64(math.Float64frombits(v)) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroCountInt", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ZeroCount = &Histogram_ZeroCountInt{v} + case 7: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroCountFloat", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.ZeroCount = &Histogram_ZeroCountFloat{float64(math.Float64frombits(v))} + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeSpans", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NegativeSpans = append(m.NegativeSpans, BucketSpan{}) + if err := m.NegativeSpans[len(m.NegativeSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.NegativeDeltas = append(m.NegativeDeltas, int64(v)) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.NegativeDeltas) == 0 { + m.NegativeDeltas = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.NegativeDeltas = append(m.NegativeDeltas, int64(v)) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeDeltas", wireType) + } + case 10: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.NegativeCounts = append(m.NegativeCounts, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.NegativeCounts) == 0 { + m.NegativeCounts = make([]float64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.NegativeCounts = append(m.NegativeCounts, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeCounts", wireType) + } + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveSpans", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PositiveSpans = append(m.PositiveSpans, BucketSpan{}) + if err := m.PositiveSpans[len(m.PositiveSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.PositiveDeltas = append(m.PositiveDeltas, int64(v)) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.PositiveDeltas) == 0 { + m.PositiveDeltas = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.PositiveDeltas = append(m.PositiveDeltas, int64(v)) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveDeltas", wireType) + } + case 13: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.PositiveCounts = append(m.PositiveCounts, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.PositiveCounts) == 0 { + m.PositiveCounts = make([]float64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.PositiveCounts = append(m.PositiveCounts, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveCounts", wireType) + } + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ResetHint", wireType) + } + m.ResetHint = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ResetHint |= Histogram_ResetHint(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BucketSpan) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BucketSpan: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BucketSpan: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31)) + m.Offset = v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Length", wireType) + } + m.Length = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Length |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TimeSeries) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TimeSeries: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TimeSeries: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, Label{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType) @@ -2120,6 +3492,40 @@ func (m *TimeSeries) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Histograms", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Histograms = append(m.Histograms, Histogram{}) + if err := m.Histograms[len(m.Histograms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) diff --git a/vendor/github.com/prometheus/prometheus/prompb/types.proto b/vendor/github.com/prometheus/prometheus/prompb/types.proto index 6ba7074a5..aa322515c 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/types.proto +++ b/vendor/github.com/prometheus/prometheus/prompb/types.proto @@ -54,13 +54,79 @@ message Exemplar { int64 timestamp = 3; } +// A native histogram, also known as a sparse histogram. +// Original design doc: +// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit +// The appendix of this design doc also explains the concept of float +// histograms. This Histogram message can represent both, the usual +// integer histogram as well as a float histogram. +message Histogram { + enum ResetHint { + UNKNOWN = 0; // Need to test for a counter reset explicitly. + YES = 1; // This is the 1st histogram after a counter reset. + NO = 2; // There was no counter reset between this and the previous Histogram. + GAUGE = 3; // This is a gauge histogram where counter resets don't happen. + } + + oneof count { // Count of observations in the histogram. + uint64 count_int = 1; + double count_float = 2; + } + double sum = 3; // Sum of observations in the histogram. + // The schema defines the bucket schema. Currently, valid numbers + // are -4 <= n <= 8. They are all for base-2 bucket schemas, where 1 + // is a bucket boundary in each case, and then each power of two is + // divided into 2^n logarithmic buckets. Or in other words, each + // bucket boundary is the previous boundary times 2^(2^-n). In the + // future, more bucket schemas may be added using numbers < -4 or > + // 8. + sint32 schema = 4; + double zero_threshold = 5; // Breadth of the zero bucket. + oneof zero_count { // Count in zero bucket. + uint64 zero_count_int = 6; + double zero_count_float = 7; + } + + // Negative Buckets. + repeated BucketSpan negative_spans = 8 [(gogoproto.nullable) = false]; + // Use either "negative_deltas" or "negative_counts", the former for + // regular histograms with integer counts, the latter for float + // histograms. + repeated sint64 negative_deltas = 9; // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + repeated double negative_counts = 10; // Absolute count of each bucket. + + // Positive Buckets. + repeated BucketSpan positive_spans = 11 [(gogoproto.nullable) = false]; + // Use either "positive_deltas" or "positive_counts", the former for + // regular histograms with integer counts, the latter for float + // histograms. + repeated sint64 positive_deltas = 12; // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + repeated double positive_counts = 13; // Absolute count of each bucket. + + ResetHint reset_hint = 14; + // timestamp is in ms format, see model/timestamp/timestamp.go for + // conversion from time.Time to Prometheus timestamp. + int64 timestamp = 15; +} + +// A BucketSpan defines a number of consecutive buckets with their +// offset. Logically, it would be more straightforward to include the +// bucket counts in the Span. However, the protobuf representation is +// more compact in the way the data is structured here (with all the +// buckets in a single array separate from the Spans). +message BucketSpan { + sint32 offset = 1; // Gap to previous span, or starting point for 1st span (which can be negative). + uint32 length = 2; // Length of consecutive buckets. +} + // TimeSeries represents samples and labels for a single time series. message TimeSeries { // For a timeseries to be valid, and for the samples and exemplars // to be ingested by the remote system properly, the labels field is required. - repeated Label labels = 1 [(gogoproto.nullable) = false]; - repeated Sample samples = 2 [(gogoproto.nullable) = false]; - repeated Exemplar exemplars = 3 [(gogoproto.nullable) = false]; + repeated Label labels = 1 [(gogoproto.nullable) = false]; + repeated Sample samples = 2 [(gogoproto.nullable) = false]; + repeated Exemplar exemplars = 3 [(gogoproto.nullable) = false]; + repeated Histogram histograms = 4 [(gogoproto.nullable) = false]; } message Label { @@ -103,8 +169,10 @@ message Chunk { // We require this to match chunkenc.Encoding. enum Encoding { - UNKNOWN = 0; - XOR = 1; + UNKNOWN = 0; + XOR = 1; + HISTOGRAM = 2; + FLOAT_HISTOGRAM = 3; } Encoding type = 3; bytes data = 4; diff --git a/vendor/github.com/prometheus/prometheus/promql/engine.go b/vendor/github.com/prometheus/prometheus/promql/engine.go index b6842e8f4..ddfb26b13 100644 --- a/vendor/github.com/prometheus/prometheus/promql/engine.go +++ b/vendor/github.com/prometheus/prometheus/promql/engine.go @@ -35,12 +35,15 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" + "golang.org/x/exp/slices" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/util/stats" ) @@ -197,7 +200,6 @@ func (q *query) Exec(ctx context.Context) *Result { // Exec query. res, warnings, err := q.ng.exec(ctx, q) - return &Result{Err: err, Value: res, Warnings: warnings} } @@ -651,12 +653,13 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval query.sampleStats.InitStepTracking(start, start, 1) val, warnings, err := evaluator.Eval(s.Expr) + + evalSpanTimer.Finish() + if err != nil { return nil, warnings, err } - evalSpanTimer.Finish() - var mat Matrix switch result := val.(type) { @@ -676,7 +679,7 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval for i, s := range mat { // Point might have a different timestamp, force it to the evaluation // timestamp as that is when we ran the evaluation. - vector[i] = Sample{Metric: s.Metric, Point: Point{V: s.Points[0].V, T: start}} + vector[i] = Sample{Metric: s.Metric, Point: Point{V: s.Points[0].V, H: s.Points[0].H, T: start}} } return vector, warnings, nil case parser.ValueTypeScalar: @@ -702,10 +705,12 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval } query.sampleStats.InitStepTracking(evaluator.startTimestamp, evaluator.endTimestamp, evaluator.interval) val, warnings, err := evaluator.Eval(s.Expr) + + evalSpanTimer.Finish() + if err != nil { return nil, warnings, err } - evalSpanTimer.Finish() mat, ok := val.(Matrix) if !ok { @@ -823,6 +828,20 @@ func (ng *Engine) getTimeRangesForSelector(s *parser.EvalStmt, n *parser.VectorS return start, end } +func (ng *Engine) getLastSubqueryInterval(path []parser.Node) time.Duration { + var interval time.Duration + for _, node := range path { + switch n := node.(type) { + case *parser.SubqueryExpr: + interval = n.Step + if n.Step == 0 { + interval = time.Duration(ng.noStepSubqueryIntervalFn(durationMilliseconds(n.Range))) * time.Millisecond + } + } + } + return interval +} + func (ng *Engine) populateSeries(querier storage.Querier, s *parser.EvalStmt) { // Whenever a MatrixSelector is evaluated, evalRange is set to the corresponding range. // The evaluation of the VectorSelector inside then evaluates the given range and unsets @@ -833,10 +852,14 @@ func (ng *Engine) populateSeries(querier storage.Querier, s *parser.EvalStmt) { switch n := node.(type) { case *parser.VectorSelector: start, end := ng.getTimeRangesForSelector(s, n, path, evalRange) + interval := ng.getLastSubqueryInterval(path) + if interval == 0 { + interval = s.Interval + } hints := &storage.SelectHints{ Start: start, End: end, - Step: durationMilliseconds(s.Interval), + Step: durationMilliseconds(interval), Range: durationMilliseconds(evalRange), Func: extractFuncFromPath(path), } @@ -962,8 +985,10 @@ func (ev *evaluator) recover(expr parser.Expr, ws *storage.Warnings, errp *error case errWithWarnings: *errp = err.err *ws = append(*ws, err.warnings...) + case error: + *errp = err default: - *errp = e.(error) + *errp = fmt.Errorf("%v", err) } } @@ -992,7 +1017,7 @@ type EvalNodeHelper struct { // Caches. // DropMetricName and label_*. Dmn map[uint64]labels.Labels - // funcHistogramQuantile. + // funcHistogramQuantile for conventional histograms. signatureToMetricWithBuckets map[string]*metricWithBuckets // label_replace. regex *regexp.Regexp @@ -1007,6 +1032,14 @@ type EvalNodeHelper struct { resultMetric map[string]labels.Labels } +func (enh *EvalNodeHelper) resetBuilder(lbls labels.Labels) { + if enh.lb == nil { + enh.lb = labels.NewBuilder(lbls) + } else { + enh.lb.Reset(lbls) + } +} + // DropMetricName is a cached version of DropMetricName. func (enh *EvalNodeHelper) DropMetricName(l labels.Labels) labels.Labels { if enh.Dmn == nil { @@ -1243,7 +1276,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { case *parser.AggregateExpr: // Grouping labels must be sorted (expected both by generateGroupingKey() and aggregation()). sortedGrouping := e.Grouping - sort.Strings(sortedGrouping) + slices.Sort(sortedGrouping) // Prepare a function to initialise series helpers with the grouping key. buf := make([]byte, 0, 1024) @@ -1368,10 +1401,12 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { enh := &EvalNodeHelper{Out: make(Vector, 0, 1)} // Process all the calls for one time series at a time. it := storage.NewBuffer(selRange) + var chkIter chunkenc.Iterator for i, s := range selVS.Series { ev.currentSamples -= len(points) points = points[:0] - it.Reset(s.Iterator()) + chkIter = s.Iterator(chkIter) + it.Reset(chkIter) metric := selVS.Series[i].Labels() // The last_over_time function acts like offset; thus, it // should keep the metric name. For all the other range @@ -1409,7 +1444,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { ev.samplesStats.IncrementSamplesAtStep(step, int64(len(points))) enh.Out = outVec[:0] if len(outVec) > 0 { - ss.Points = append(ss.Points, Point{V: outVec[0].Point.V, T: ts}) + ss.Points = append(ss.Points, Point{V: outVec[0].Point.V, H: outVec[0].Point.H, T: ts}) } // Only buffer stepRange milliseconds from the second step on. it.ReduceDelta(stepRange) @@ -1540,7 +1575,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { case *parser.NumberLiteral: return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { - return append(enh.Out, Sample{Point: Point{V: e.Val}}), nil + return append(enh.Out, Sample{Point: Point{V: e.Val}, Metric: labels.EmptyLabels()}), nil }) case *parser.StringLiteral: @@ -1553,8 +1588,10 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { } mat := make(Matrix, 0, len(e.Series)) it := storage.NewMemoizedEmptyIterator(durationMilliseconds(ev.lookbackDelta)) + var chkIter chunkenc.Iterator for i, s := range e.Series { - it.Reset(s.Iterator()) + chkIter = s.Iterator(chkIter) + it.Reset(chkIter) ss := Series{ Metric: e.Series[i].Labels(), Points: getPointSlice(numSteps), @@ -1562,10 +1599,10 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval { step++ - _, v, ok := ev.vectorSelectorSingle(it, e, ts) + _, v, h, ok := ev.vectorSelectorSingle(it, e, ts) if ok { if ev.currentSamples < ev.maxSamples { - ss.Points = append(ss.Points, Point{V: v, T: ts}) + ss.Points = append(ss.Points, Point{V: v, H: h, T: ts}) ev.samplesStats.IncrementSamplesAtStep(step, 1) ev.currentSamples++ } else { @@ -1675,6 +1712,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { mat[i].Points = append(mat[i].Points, Point{ T: ts, V: mat[i].Points[0].V, + H: mat[i].Points[0].H, }) ev.currentSamples++ if ev.currentSamples > ev.maxSamples { @@ -1697,14 +1735,16 @@ func (ev *evaluator) vectorSelector(node *parser.VectorSelector, ts int64) (Vect } vec := make(Vector, 0, len(node.Series)) it := storage.NewMemoizedEmptyIterator(durationMilliseconds(ev.lookbackDelta)) + var chkIter chunkenc.Iterator for i, s := range node.Series { - it.Reset(s.Iterator()) + chkIter = s.Iterator(chkIter) + it.Reset(chkIter) - t, v, ok := ev.vectorSelectorSingle(it, node, ts) + t, v, h, ok := ev.vectorSelectorSingle(it, node, ts) if ok { vec = append(vec, Sample{ Metric: node.Series[i].Labels(), - Point: Point{V: v, T: t}, + Point: Point{V: v, H: h, T: t}, }) ev.currentSamples++ @@ -1719,33 +1759,39 @@ func (ev *evaluator) vectorSelector(node *parser.VectorSelector, ts int64) (Vect return vec, ws } -// vectorSelectorSingle evaluates a instant vector for the iterator of one time series. -func (ev *evaluator) vectorSelectorSingle(it *storage.MemoizedSeriesIterator, node *parser.VectorSelector, ts int64) (int64, float64, bool) { +// vectorSelectorSingle evaluates an instant vector for the iterator of one time series. +func (ev *evaluator) vectorSelectorSingle(it *storage.MemoizedSeriesIterator, node *parser.VectorSelector, ts int64) ( + int64, float64, *histogram.FloatHistogram, bool, +) { refTime := ts - durationMilliseconds(node.Offset) var t int64 var v float64 + var h *histogram.FloatHistogram - ok := it.Seek(refTime) - if !ok { + valueType := it.Seek(refTime) + switch valueType { + case chunkenc.ValNone: if it.Err() != nil { ev.error(it.Err()) } - } - - if ok { + case chunkenc.ValFloat: t, v = it.At() + case chunkenc.ValHistogram, chunkenc.ValFloatHistogram: + t, h = it.AtFloatHistogram() + default: + panic(fmt.Errorf("unknown value type %v", valueType)) } - - if !ok || t > refTime { - t, v, ok = it.PeekPrev() + if valueType == chunkenc.ValNone || t > refTime { + var ok bool + t, v, _, h, ok = it.PeekPrev() if !ok || t < refTime-durationMilliseconds(ev.lookbackDelta) { - return 0, 0, false + return 0, 0, nil, false } } - if value.IsStaleNaN(v) { - return 0, 0, false + if value.IsStaleNaN(v) || (h != nil && value.IsStaleNaN(h.Sum)) { + return 0, 0, nil, false } - return t, v, true + return t, v, h, true } var pointPool = sync.Pool{} @@ -1780,12 +1826,14 @@ func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) (Matrix, storag ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws}) } + var chkIter chunkenc.Iterator series := vs.Series for i, s := range series { if err := contextDone(ev.ctx, "expression evaluation"); err != nil { ev.error(err) } - it.Reset(s.Iterator()) + chkIter = s.Iterator(chkIter) + it.Reset(chkIter) ss := Series{ Metric: series[i].Labels(), } @@ -1830,30 +1878,59 @@ func (ev *evaluator) matrixIterSlice(it *storage.BufferedSeriesIterator, mint, m out = out[:0] } - ok := it.Seek(maxt) - if !ok { + soughtValueType := it.Seek(maxt) + if soughtValueType == chunkenc.ValNone { if it.Err() != nil { ev.error(it.Err()) } } buf := it.Buffer() - for buf.Next() { - t, v := buf.At() - if value.IsStaleNaN(v) { - continue +loop: + for { + switch buf.Next() { + case chunkenc.ValNone: + break loop + case chunkenc.ValFloatHistogram, chunkenc.ValHistogram: + t, h := buf.AtFloatHistogram() + if value.IsStaleNaN(h.Sum) { + continue loop + } + // Values in the buffer are guaranteed to be smaller than maxt. + if t >= mint { + if ev.currentSamples >= ev.maxSamples { + ev.error(ErrTooManySamples(env)) + } + ev.currentSamples++ + out = append(out, Point{T: t, H: h}) + } + case chunkenc.ValFloat: + t, v := buf.At() + if value.IsStaleNaN(v) { + continue loop + } + // Values in the buffer are guaranteed to be smaller than maxt. + if t >= mint { + if ev.currentSamples >= ev.maxSamples { + ev.error(ErrTooManySamples(env)) + } + ev.currentSamples++ + out = append(out, Point{T: t, V: v}) + } } - // Values in the buffer are guaranteed to be smaller than maxt. - if t >= mint { + } + // The sought sample might also be in the range. + switch soughtValueType { + case chunkenc.ValFloatHistogram, chunkenc.ValHistogram: + t, h := it.AtFloatHistogram() + if t == maxt && !value.IsStaleNaN(h.Sum) { if ev.currentSamples >= ev.maxSamples { ev.error(ErrTooManySamples(env)) } + out = append(out, Point{T: t, H: h}) ev.currentSamples++ - out = append(out, Point{T: t, V: v}) } - } - // The seeked sample might also be in the range. - if ok { + case chunkenc.ValFloat: t, v := it.At() if t == maxt && !value.IsStaleNaN(v) { if ev.currentSamples >= ev.maxSamples { @@ -2011,10 +2088,12 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching * // Account for potentially swapped sidedness. vl, vr := ls.V, rs.V + hl, hr := ls.H, rs.H if matching.Card == parser.CardOneToMany { vl, vr = vr, vl + hl, hr = hr, hl } - value, keep := vectorElemBinop(op, vl, vr) + value, histogramValue, keep := vectorElemBinop(op, vl, vr, hl, hr) if returnBool { if keep { value = 1.0 @@ -2049,23 +2128,26 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching * insertedSigs[insertSig] = struct{}{} } - enh.Out = append(enh.Out, Sample{ - Metric: metric, - Point: Point{V: value}, - }) + if (hl != nil && hr != nil) || (hl == nil && hr == nil) { + // Both lhs and rhs are of same type. + enh.Out = append(enh.Out, Sample{ + Metric: metric, + Point: Point{V: value, H: histogramValue}, + }) + } } return enh.Out } func signatureFunc(on bool, b []byte, names ...string) func(labels.Labels) string { if on { - sort.Strings(names) + slices.Sort(names) return func(lset labels.Labels) string { return string(lset.BytesWithLabels(b, names...)) } } names = append([]string{labels.MetricName}, names...) - sort.Strings(names) + slices.Sort(names) return func(lset labels.Labels) string { return string(lset.BytesWithoutLabels(b, names...)) } @@ -2078,12 +2160,7 @@ func resultMetric(lhs, rhs labels.Labels, op parser.ItemType, matching *parser.V enh.resultMetric = make(map[string]labels.Labels, len(enh.Out)) } - if enh.lb == nil { - enh.lb = labels.NewBuilder(lhs) - } else { - enh.lb.Reset(lhs) - } - + enh.resetBuilder(lhs) buf := bytes.NewBuffer(enh.lblResultBuf[:0]) enh.lblBuf = lhs.Bytes(enh.lblBuf) buf.Write(enh.lblBuf) @@ -2116,7 +2193,7 @@ func resultMetric(lhs, rhs labels.Labels, op parser.ItemType, matching *parser.V } } - ret := enh.lb.Labels(nil) + ret := enh.lb.Labels(labels.EmptyLabels()) enh.resultMetric[str] = ret return ret } @@ -2130,7 +2207,7 @@ func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scala if swap { lv, rv = rv, lv } - value, keep := vectorElemBinop(op, lv, rv) + value, _, keep := vectorElemBinop(op, lv, rv, nil, nil) // Catch cases where the scalar is the LHS in a scalar-vector comparison operation. // We want to always keep the vector element value as the output value, even if it's on the RHS. if op.IsComparisonOperator() && swap { @@ -2156,7 +2233,7 @@ func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scala } func dropMetricName(l labels.Labels) labels.Labels { - return labels.NewBuilder(l).Del(labels.MetricName).Labels(nil) + return labels.NewBuilder(l).Del(labels.MetricName).Labels(labels.EmptyLabels()) } // scalarBinop evaluates a binary operation between two Scalars. @@ -2193,45 +2270,56 @@ func scalarBinop(op parser.ItemType, lhs, rhs float64) float64 { } // vectorElemBinop evaluates a binary operation between two Vector elements. -func vectorElemBinop(op parser.ItemType, lhs, rhs float64) (float64, bool) { +func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool) { switch op { case parser.ADD: - return lhs + rhs, true + if hlhs != nil && hrhs != nil { + // The histogram being added must have the larger schema + // code (i.e. the higher resolution). + if hrhs.Schema >= hlhs.Schema { + return 0, hlhs.Copy().Add(hrhs), true + } + return 0, hrhs.Copy().Add(hlhs), true + } + return lhs + rhs, nil, true case parser.SUB: - return lhs - rhs, true + return lhs - rhs, nil, true case parser.MUL: - return lhs * rhs, true + return lhs * rhs, nil, true case parser.DIV: - return lhs / rhs, true + return lhs / rhs, nil, true case parser.POW: - return math.Pow(lhs, rhs), true + return math.Pow(lhs, rhs), nil, true case parser.MOD: - return math.Mod(lhs, rhs), true + return math.Mod(lhs, rhs), nil, true case parser.EQLC: - return lhs, lhs == rhs + return lhs, nil, lhs == rhs case parser.NEQ: - return lhs, lhs != rhs + return lhs, nil, lhs != rhs case parser.GTR: - return lhs, lhs > rhs + return lhs, nil, lhs > rhs case parser.LSS: - return lhs, lhs < rhs + return lhs, nil, lhs < rhs case parser.GTE: - return lhs, lhs >= rhs + return lhs, nil, lhs >= rhs case parser.LTE: - return lhs, lhs <= rhs + return lhs, nil, lhs <= rhs case parser.ATAN2: - return math.Atan2(lhs, rhs), true + return math.Atan2(lhs, rhs), nil, true } panic(fmt.Errorf("operator %q not allowed for operations between Vectors", op)) } type groupedAggregation struct { - labels labels.Labels - value float64 - mean float64 - groupCount int - heap vectorByValueHeap - reverseHeap vectorByReverseValueHeap + hasFloat bool // Has at least 1 float64 sample aggregated. + hasHistogram bool // Has at least 1 histogram sample aggregated. + labels labels.Labels + value float64 + histogramValue *histogram.FloatHistogram + mean float64 + groupCount int + heap vectorByValueHeap + reverseHeap vectorByReverseValueHeap } // aggregation evaluates an aggregation operation on a Vector. The provided grouping labels @@ -2267,20 +2355,19 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without // operator is less frequently used than other aggregations, we're fine having to // re-compute the grouping key on each step for this case. grouping = append(grouping, valueLabel) - sort.Strings(grouping) + slices.Sort(grouping) recomputeGroupingKey = true } } - lb := labels.NewBuilder(nil) var buf []byte for si, s := range vec { metric := s.Metric if op == parser.COUNT_VALUES { - lb.Reset(metric) - lb.Set(valueLabel, strconv.FormatFloat(s.V, 'f', -1, 64)) - metric = lb.Labels(nil) + enh.resetBuilder(metric) + enh.lb.Set(valueLabel, strconv.FormatFloat(s.V, 'f', -1, 64)) + metric = enh.lb.Labels(labels.EmptyLabels()) // We've changed the metric so we have to recompute the grouping key. recomputeGroupingKey = true @@ -2297,20 +2384,30 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without group, ok := result[groupingKey] // Add a new group if it doesn't exist. if !ok { - lb.Reset(metric) + var m labels.Labels + enh.resetBuilder(metric) if without { - lb.Del(grouping...) - lb.Del(labels.MetricName) + enh.lb.Del(grouping...) + enh.lb.Del(labels.MetricName) + m = enh.lb.Labels(labels.EmptyLabels()) + } else if len(grouping) > 0 { + enh.lb.Keep(grouping...) + m = enh.lb.Labels(labels.EmptyLabels()) } else { - lb.Keep(grouping...) + m = labels.EmptyLabels() } - m := lb.Labels(nil) newAgg := &groupedAggregation{ labels: m, value: s.V, mean: s.V, groupCount: 1, } + if s.H == nil { + newAgg.hasFloat = true + } else if op == parser.SUM { + newAgg.histogramValue = s.H.Copy() + newAgg.hasHistogram = true + } result[groupingKey] = newAgg orderedResult = append(orderedResult, newAgg) @@ -2345,7 +2442,26 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without switch op { case parser.SUM: - group.value += s.V + if s.H != nil { + group.hasHistogram = true + if group.histogramValue != nil { + // The histogram being added must have + // an equal or larger schema. + if s.H.Schema >= group.histogramValue.Schema { + group.histogramValue.Add(s.H) + } else { + h := s.H.Copy() + h.Add(group.histogramValue) + group.histogramValue = h + } + } + // Otherwise the aggregation contained floats + // previously and will be invalid anyway. No + // point in copying the histogram in that case. + } else { + group.hasFloat = true + group.value += s.V + } case parser.AVG: group.groupCount++ @@ -2479,13 +2595,18 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without case parser.QUANTILE: aggr.value = quantile(q, aggr.heap) + case parser.SUM: + if aggr.hasFloat && aggr.hasHistogram { + // We cannot aggregate histogram sample with a float64 sample. + continue + } default: // For other aggregations, we already have the right value. } enh.Out = append(enh.Out, Sample{ Metric: aggr.labels, - Point: Point{V: aggr.value}, + Point: Point{V: aggr.value, H: aggr.histogramValue}, }) } return enh.Out diff --git a/vendor/github.com/prometheus/prometheus/promql/functions.go b/vendor/github.com/prometheus/prometheus/promql/functions.go index e25f3f119..3da38ea0f 100644 --- a/vendor/github.com/prometheus/prometheus/promql/functions.go +++ b/vendor/github.com/prometheus/prometheus/promql/functions.go @@ -24,6 +24,7 @@ import ( "github.com/grafana/regexp" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql/parser" ) @@ -66,9 +67,11 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod ms := args[0].(*parser.MatrixSelector) vs := ms.VectorSelector.(*parser.VectorSelector) var ( - samples = vals[0].(Matrix)[0] - rangeStart = enh.Ts - durationMilliseconds(ms.Range+vs.Offset) - rangeEnd = enh.Ts - durationMilliseconds(vs.Offset) + samples = vals[0].(Matrix)[0] + rangeStart = enh.Ts - durationMilliseconds(ms.Range+vs.Offset) + rangeEnd = enh.Ts - durationMilliseconds(vs.Offset) + resultValue float64 + resultHistogram *histogram.FloatHistogram ) // No sense in trying to compute a rate without at least two points. Drop @@ -77,14 +80,32 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod return enh.Out } - resultValue := samples.Points[len(samples.Points)-1].V - samples.Points[0].V - if isCounter { - var lastValue float64 - for _, sample := range samples.Points { - if sample.V < lastValue { - resultValue += lastValue + if samples.Points[0].H != nil { + resultHistogram = histogramRate(samples.Points, isCounter) + if resultHistogram == nil { + // Points are a mix of floats and histograms, or the histograms + // are not compatible with each other. + // TODO(beorn7): find a way of communicating the exact reason + return enh.Out + } + } else { + resultValue = samples.Points[len(samples.Points)-1].V - samples.Points[0].V + prevValue := samples.Points[0].V + // We have to iterate through everything even in the non-counter + // case because we have to check that everything is a float. + // TODO(beorn7): Find a way to check that earlier, e.g. by + // handing in a []FloatPoint and a []HistogramPoint separately. + for _, currPoint := range samples.Points[1:] { + if currPoint.H != nil { + return nil // Range contains a mix of histograms and floats. + } + if !isCounter { + continue + } + if currPoint.V < prevValue { + resultValue += prevValue } - lastValue = sample.V + prevValue = currPoint.V } } @@ -95,6 +116,7 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod sampledInterval := float64(samples.Points[len(samples.Points)-1].T-samples.Points[0].T) / 1000 averageDurationBetweenSamples := sampledInterval / float64(len(samples.Points)-1) + // TODO(beorn7): Do this for histograms, too. if isCounter && resultValue > 0 && samples.Points[0].V >= 0 { // Counters cannot be negative. If we have any slope at // all (i.e. resultValue went up), we can extrapolate @@ -126,16 +148,72 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod } else { extrapolateToInterval += averageDurationBetweenSamples / 2 } - resultValue = resultValue * (extrapolateToInterval / sampledInterval) + factor := extrapolateToInterval / sampledInterval if isRate { - resultValue = resultValue / ms.Range.Seconds() + factor /= ms.Range.Seconds() + } + if resultHistogram == nil { + resultValue *= factor + } else { + resultHistogram.Scale(factor) } return append(enh.Out, Sample{ - Point: Point{V: resultValue}, + Point: Point{V: resultValue, H: resultHistogram}, }) } +// histogramRate is a helper function for extrapolatedRate. It requires +// points[0] to be a histogram. It returns nil if any other Point in points is +// not a histogram. +func histogramRate(points []Point, isCounter bool) *histogram.FloatHistogram { + prev := points[0].H // We already know that this is a histogram. + last := points[len(points)-1].H + if last == nil { + return nil // Range contains a mix of histograms and floats. + } + minSchema := prev.Schema + if last.Schema < minSchema { + minSchema = last.Schema + } + + // First iteration to find out two things: + // - What's the smallest relevant schema? + // - Are all data points histograms? + // TODO(beorn7): Find a way to check that earlier, e.g. by handing in a + // []FloatPoint and a []HistogramPoint separately. + for _, currPoint := range points[1 : len(points)-1] { + curr := currPoint.H + if curr == nil { + return nil // Range contains a mix of histograms and floats. + } + // TODO(trevorwhitney): Check if isCounter is consistent with curr.CounterResetHint. + if !isCounter { + continue + } + if curr.Schema < minSchema { + minSchema = curr.Schema + } + } + + h := last.CopyToSchema(minSchema) + h.Sub(prev) + + if isCounter { + // Second iteration to deal with counter resets. + for _, currPoint := range points[1:] { + curr := currPoint.H + if curr.DetectReset(prev) { + h.Add(prev) + } + prev = curr + } + } + + h.CounterResetHint = histogram.GaugeType + return h.Compact(0) +} + // === delta(Matrix parser.ValueTypeMatrix) Vector === func funcDelta(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { return extrapolatedRate(vals, args, enh, false, false) @@ -227,7 +305,7 @@ func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNode // The trend factor argument. tf := vals[2].(Vector)[0].V - // Sanity check the input. + // Check that the input parameters are valid. if sf <= 0 || sf >= 1 { panic(fmt.Errorf("invalid smoothing factor. Expected: 0 < sf < 1, got: %f", sf)) } @@ -793,6 +871,59 @@ func funcPredictLinear(vals []parser.Value, args parser.Expressions, enh *EvalNo }) } +// === histogram_count(Vector parser.ValueTypeVector) Vector === +func funcHistogramCount(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + inVec := vals[0].(Vector) + + for _, sample := range inVec { + // Skip non-histogram samples. + if sample.H == nil { + continue + } + enh.Out = append(enh.Out, Sample{ + Metric: enh.DropMetricName(sample.Metric), + Point: Point{V: sample.H.Count}, + }) + } + return enh.Out +} + +// === histogram_sum(Vector parser.ValueTypeVector) Vector === +func funcHistogramSum(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + inVec := vals[0].(Vector) + + for _, sample := range inVec { + // Skip non-histogram samples. + if sample.H == nil { + continue + } + enh.Out = append(enh.Out, Sample{ + Metric: enh.DropMetricName(sample.Metric), + Point: Point{V: sample.H.Sum}, + }) + } + return enh.Out +} + +// === histogram_fraction(lower, upper parser.ValueTypeScalar, Vector parser.ValueTypeVector) Vector === +func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + lower := vals[0].(Vector)[0].V + upper := vals[1].(Vector)[0].V + inVec := vals[2].(Vector) + + for _, sample := range inVec { + // Skip non-histogram samples. + if sample.H == nil { + continue + } + enh.Out = append(enh.Out, Sample{ + Metric: enh.DropMetricName(sample.Metric), + Point: Point{V: histogramFraction(lower, upper, sample.H)}, + }) + } + return enh.Out +} + // === histogram_quantile(k parser.ValueTypeScalar, Vector parser.ValueTypeVector) Vector === func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { q := vals[0].(Vector)[0].V @@ -805,26 +936,57 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev v.buckets = v.buckets[:0] } } - for _, el := range inVec { + + var histogramSamples []Sample + + for _, sample := range inVec { + // We are only looking for conventional buckets here. Remember + // the histograms for later treatment. + if sample.H != nil { + histogramSamples = append(histogramSamples, sample) + continue + } + upperBound, err := strconv.ParseFloat( - el.Metric.Get(model.BucketLabel), 64, + sample.Metric.Get(model.BucketLabel), 64, ) if err != nil { // Oops, no bucket label or malformed label value. Skip. // TODO(beorn7): Issue a warning somehow. continue } - enh.lblBuf = el.Metric.BytesWithoutLabels(enh.lblBuf, labels.BucketLabel) + enh.lblBuf = sample.Metric.BytesWithoutLabels(enh.lblBuf, labels.BucketLabel) mb, ok := enh.signatureToMetricWithBuckets[string(enh.lblBuf)] if !ok { - el.Metric = labels.NewBuilder(el.Metric). + sample.Metric = labels.NewBuilder(sample.Metric). Del(excludedLabels...). - Labels(nil) + Labels(labels.EmptyLabels()) - mb = &metricWithBuckets{el.Metric, nil} + mb = &metricWithBuckets{sample.Metric, nil} enh.signatureToMetricWithBuckets[string(enh.lblBuf)] = mb } - mb.buckets = append(mb.buckets, bucket{upperBound, el.V}) + mb.buckets = append(mb.buckets, bucket{upperBound, sample.V}) + + } + + // Now deal with the histograms. + for _, sample := range histogramSamples { + // We have to reconstruct the exact same signature as above for + // a conventional histogram, just ignoring any le label. + enh.lblBuf = sample.Metric.Bytes(enh.lblBuf) + if mb, ok := enh.signatureToMetricWithBuckets[string(enh.lblBuf)]; ok && len(mb.buckets) > 0 { + // At this data point, we have conventional histogram + // buckets and a native histogram with the same name and + // labels. Do not evaluate anything. + // TODO(beorn7): Issue a warning somehow. + delete(enh.signatureToMetricWithBuckets, string(enh.lblBuf)) + continue + } + + enh.Out = append(enh.Out, Sample{ + Metric: enh.DropMetricName(sample.Metric), + Point: Point{V: histogramQuantile(q, sample.H)}, + }) } for _, mb := range enh.signatureToMetricWithBuckets { @@ -918,7 +1080,7 @@ func funcLabelReplace(vals []parser.Value, args parser.Expressions, enh *EvalNod if len(res) > 0 { lb.Set(dst, string(res)) } - outMetric = lb.Labels(nil) + outMetric = lb.Labels(labels.EmptyLabels()) enh.Dmn[h] = outMetric } } @@ -986,7 +1148,7 @@ func funcLabelJoin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHe lb.Set(dst, strval) } - outMetric = lb.Labels(nil) + outMetric = lb.Labels(labels.EmptyLabels()) enh.Dmn[h] = outMetric } @@ -1103,7 +1265,10 @@ var FunctionCalls = map[string]FunctionCall{ "deriv": funcDeriv, "exp": funcExp, "floor": funcFloor, + "histogram_count": funcHistogramCount, + "histogram_fraction": funcHistogramFraction, "histogram_quantile": funcHistogramQuantile, + "histogram_sum": funcHistogramSum, "holt_winters": funcHoltWinters, "hour": funcHour, "idelta": funcIdelta, @@ -1221,7 +1386,7 @@ func (s *vectorByReverseValueHeap) Pop() interface{} { // createLabelsForAbsentFunction returns the labels that are uniquely and exactly matched // in a given expression. It is used in the absent functions. func createLabelsForAbsentFunction(expr parser.Expr) labels.Labels { - m := labels.Labels{} + b := labels.NewBuilder(labels.EmptyLabels()) var lm []*labels.Matcher switch n := expr.(type) { @@ -1230,25 +1395,26 @@ func createLabelsForAbsentFunction(expr parser.Expr) labels.Labels { case *parser.MatrixSelector: lm = n.VectorSelector.(*parser.VectorSelector).LabelMatchers default: - return m + return labels.EmptyLabels() } - empty := []string{} + // The 'has' map implements backwards-compatibility for historic behaviour: + // e.g. in `absent(x{job="a",job="b",foo="bar"})` then `job` is removed from the output. + // Note this gives arguably wrong behaviour for `absent(x{job="a",job="a",foo="bar"})`. + has := make(map[string]bool, len(lm)) for _, ma := range lm { if ma.Name == labels.MetricName { continue } - if ma.Type == labels.MatchEqual && !m.Has(ma.Name) { - m = labels.NewBuilder(m).Set(ma.Name, ma.Value).Labels(nil) + if ma.Type == labels.MatchEqual && !has[ma.Name] { + b.Set(ma.Name, ma.Value) + has[ma.Name] = true } else { - empty = append(empty, ma.Name) + b.Del(ma.Name) } } - for _, v := range empty { - m = labels.NewBuilder(m).Del(v).Labels(nil) - } - return m + return b.Labels(labels.EmptyLabels()) } func stringFromArg(e parser.Expr) string { diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/functions.go b/vendor/github.com/prometheus/prometheus/promql/parser/functions.go index 92afff8b2..450021328 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/functions.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/functions.go @@ -163,6 +163,21 @@ var Functions = map[string]*Function{ ArgTypes: []ValueType{ValueTypeVector}, ReturnType: ValueTypeVector, }, + "histogram_count": { + Name: "histogram_count", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeVector, + }, + "histogram_sum": { + Name: "histogram_sum", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeVector, + }, + "histogram_fraction": { + Name: "histogram_fraction", + ArgTypes: []ValueType{ValueTypeScalar, ValueTypeScalar, ValueTypeVector}, + ReturnType: ValueTypeVector, + }, "histogram_quantile": { Name: "histogram_quantile", ArgTypes: []ValueType{ValueTypeScalar, ValueTypeVector}, diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y index 433f45259..461e854ac 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y +++ b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y @@ -16,13 +16,13 @@ package parser import ( "math" - "sort" "strconv" "time" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/value" ) + %} %union { @@ -32,6 +32,7 @@ import ( matcher *labels.Matcher label labels.Label labels labels.Labels + lblList []labels.Label strings []string series []SequenceValue uint uint64 @@ -138,10 +139,9 @@ START_METRIC_SELECTOR // Type definitions for grammar rules. %type label_match_list %type label_matcher - %type aggregate_op grouping_label match_op maybe_label metric_identifier unary_op at_modifier_preprocessors - -%type label_set label_set_list metric +%type label_set metric +%type label_set_list %type