diff --git a/core/src/internal_flags.rs b/core/src/internal_flags.rs index 9da20a59a..27d0543d9 100644 --- a/core/src/internal_flags.rs +++ b/core/src/internal_flags.rs @@ -140,6 +140,8 @@ impl InternalFlags { WorkflowTaskCompletedMetadata { core_used_flags: core_newly_used, lang_used_flags: lang_newly_used, + sdk_name: "".to_string(), + sdk_version: "".to_string(), } } Self::Disabled => WorkflowTaskCompletedMetadata::default(), @@ -182,6 +184,8 @@ mod tests { sdk_metadata: Some(WorkflowTaskCompletedMetadata { core_used_flags: vec![1], lang_used_flags: vec![], + sdk_name: "".to_string(), + sdk_version: "".to_string(), }), ..Default::default() }); @@ -215,6 +219,8 @@ mod tests { sdk_metadata: Some(WorkflowTaskCompletedMetadata { core_used_flags: vec![2], lang_used_flags: vec![2], + sdk_name: "".to_string(), + sdk_version: "".to_string(), }), ..Default::default() }); diff --git a/core/src/worker/client/mocks.rs b/core/src/worker/client/mocks.rs index 5af80f286..eb0ac3faa 100644 --- a/core/src/worker/client/mocks.rs +++ b/core/src/worker/client/mocks.rs @@ -18,6 +18,7 @@ pub(crate) static DEFAULT_TEST_CAPABILITIES: &Capabilities = &Capabilities { upsert_memo: true, eager_workflow_start: true, sdk_metadata: true, + count_group_by_execution_status: false, }; #[cfg(test)] diff --git a/sdk-core-protos/protos/api_upstream/.buildkite/Dockerfile b/sdk-core-protos/protos/api_upstream/.buildkite/Dockerfile index 8579a64ca..960774481 100644 --- a/sdk-core-protos/protos/api_upstream/.buildkite/Dockerfile +++ b/sdk-core-protos/protos/api_upstream/.buildkite/Dockerfile @@ -1,2 +1,2 @@ -FROM temporalio/base-ci-builder:1.5.0 -WORKDIR /temporal \ No newline at end of file +FROM temporalio/base-ci-builder:1.10.3 +WORKDIR /temporal diff --git a/sdk-core-protos/protos/api_upstream/.gitignore b/sdk-core-protos/protos/api_upstream/.gitignore index 4c966bedf..af5159473 100644 --- a/sdk-core-protos/protos/api_upstream/.gitignore +++ b/sdk-core-protos/protos/api_upstream/.gitignore @@ -1,3 +1,4 @@ /.idea /.gen -/.vscode \ No newline at end of file +/.vscode +/.stamp diff --git a/sdk-core-protos/protos/api_upstream/Makefile b/sdk-core-protos/protos/api_upstream/Makefile index e92c6e154..3277270f9 100644 --- a/sdk-core-protos/protos/api_upstream/Makefile +++ b/sdk-core-protos/protos/api_upstream/Makefile @@ -15,43 +15,54 @@ GOPATH := $(shell go env GOPATH) endif GOBIN := $(if $(shell go env GOBIN),$(shell go env GOBIN),$(GOPATH)/bin) -SHELL := PATH=$(GOBIN):$(PATH) /bin/sh +PATH := $(GOBIN):$(PATH) +STAMPDIR := .stamp COLOR := "\e[1;36m%s\e[0m\n" +# Only prints output if the exit code is non-zero +define silent_exec + @output=$$($(1) 2>&1); \ + status=$$?; \ + if [ $$status -ne 0 ]; then \ + echo "$$output"; \ + fi; \ + exit $$status +endef + PROTO_ROOT := . -PROTO_FILES = $(shell find $(PROTO_ROOT) -name "*.proto") +PROTO_FILES = $(shell find temporal -name "*.proto") PROTO_DIRS = $(sort $(dir $(PROTO_FILES))) PROTO_OUT := .gen -PROTO_IMPORTS = -I=$(PROTO_ROOT) -I=$(shell go list -modfile build/go.mod -m -f '{{.Dir}}' github.com/temporalio/gogo-protobuf)/protobuf +PROTO_IMPORTS = \ + -I=$(PROTO_ROOT) +PROTO_PATHS = paths=source_relative:$(PROTO_OUT) $(PROTO_OUT): mkdir $(PROTO_OUT) ##### Compile proto files for go ##### -grpc: buf-lint api-linter buf-breaking gogo-grpc fix-path +grpc: buf-lint api-linter buf-breaking clean go-grpc fix-path go-grpc: clean $(PROTO_OUT) printf $(COLOR) "Compile for go-gRPC..." - $(foreach PROTO_DIR,$(PROTO_DIRS),protoc --fatal_warnings $(PROTO_IMPORTS) --go_out=plugins=grpc,paths=source_relative:$(PROTO_OUT) $(PROTO_DIR)*.proto;) - -gogo-grpc: clean $(PROTO_OUT) - printf $(COLOR) "Compile for gogo-gRPC..." - $(foreach PROTO_DIR,$(PROTO_DIRS),protoc --fatal_warnings $(PROTO_IMPORTS) --gogoslick_out=Mgoogle/protobuf/any.proto=github.com/gogo/protobuf/types,Mgoogle/protobuf/wrappers.proto=github.com/gogo/protobuf/types,Mgoogle/protobuf/duration.proto=github.com/gogo/protobuf/types,Mgoogle/protobuf/descriptor.proto=github.com/golang/protobuf/protoc-gen-go/descriptor,Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types,plugins=grpc,paths=source_relative:$(PROTO_OUT) $(PROTO_DIR)*.proto;) + $(foreach PROTO_DIR,$(PROTO_DIRS),\ + protoc --fatal_warnings $(PROTO_IMPORTS) \ + --go_out=$(PROTO_PATHS) \ + --grpc-gateway_out=allow_patch_feature=false,$(PROTO_PATHS)\ + --doc_out=html,index.html,source_relative:$(PROTO_OUT) \ + $(PROTO_DIR)*.proto;) fix-path: mv -f $(PROTO_OUT)/temporal/api/* $(PROTO_OUT) && rm -rf $(PROTO_OUT)/temporal ##### Plugins & tools ##### -grpc-install: gogo-protobuf-install - printf $(COLOR) "Install/update gRPC plugins..." - go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@latest - -gogo-protobuf-install: go-protobuf-install - go install -modfile build/go.mod github.com/temporalio/gogo-protobuf/protoc-gen-gogoslick - -go-protobuf-install: - go install github.com/golang/protobuf/protoc-gen-go@v1.5.2 +grpc-install: + @printf $(COLOR) "Install/update protoc and plugins..." + @go install google.golang.org/protobuf/cmd/protoc-gen-go@latest + @go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@latest + @go install github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway@latest + @go install github.com/pseudomuto/protoc-gen-doc/cmd/protoc-gen-doc@latest api-linter-install: printf $(COLOR) "Install/update api-linter..." @@ -59,14 +70,22 @@ api-linter-install: buf-install: printf $(COLOR) "Install/update buf..." - go install github.com/bufbuild/buf/cmd/buf@v1.6.0 + go install github.com/bufbuild/buf/cmd/buf@v1.27.0 ##### Linters ##### api-linter: printf $(COLOR) "Run api-linter..." - api-linter --set-exit-status $(PROTO_IMPORTS) --config $(PROTO_ROOT)/api-linter.yaml $(PROTO_FILES) + $(call silent_exec, api-linter --set-exit-status $(PROTO_IMPORTS) --config $(PROTO_ROOT)/api-linter.yaml $(PROTO_FILES)) + +$(STAMPDIR): + mkdir $@ + +$(STAMPDIR)/buf-mod-prune: $(STAMPDIR) buf.yaml + printf $(COLOR) "Pruning buf module" + buf mod prune + touch $@ -buf-lint: +buf-lint: $(STAMPDIR)/buf-mod-prune printf $(COLOR) "Run buf linter..." (cd $(PROTO_ROOT) && buf lint) @@ -77,4 +96,4 @@ buf-breaking: ##### Clean ##### clean: printf $(COLOR) "Delete generated go files..." - rm -rf $(PROTO_OUT) + rm -rf $(PROTO_OUT) $(BUF_DEPS) diff --git a/sdk-core-protos/protos/api_upstream/api-linter.yaml b/sdk-core-protos/protos/api_upstream/api-linter.yaml index 0301cc8c0..ff5a4ea57 100644 --- a/sdk-core-protos/protos/api_upstream/api-linter.yaml +++ b/sdk-core-protos/protos/api_upstream/api-linter.yaml @@ -1,40 +1,56 @@ - included_paths: - - '**/*.proto' + - "**/*.proto" disabled_rules: - - 'core::0192::has-comments' + - "core::0192::has-comments" - included_paths: - - '**/message.proto' + - "**/message.proto" disabled_rules: - - 'core::0122::name-suffix' - - 'core::0123::resource-annotation' + - "core::0122::name-suffix" + - "core::0123::resource-annotation" - included_paths: - - '**/workflowservice/v1/request_response.proto' - - '**/operatorservice/v1/request_response.proto' + - "**/workflowservice/v1/request_response.proto" + - "**/operatorservice/v1/request_response.proto" disabled_rules: - - 'core::0122::name-suffix' - - 'core::0131::request-name-required' - - 'core::0131::request-unknown-fields' - - 'core::0132::request-parent-required' - - 'core::0132::request-unknown-fields' - - 'core::0132::response-unknown-fields' - - 'core::0134::request-unknown-fields' - - 'core::0158::request-page-size-field' - - 'core::0158::request-page-token-field' - - 'core::0158::response-next-page-token-field' - - 'core::0158::response-plural-first-field' - - 'core::0158::response-repeated-first-field' + - "core::0122::name-suffix" + - "core::0131::request-name-required" + - "core::0131::request-unknown-fields" + - "core::0132::request-parent-required" + - "core::0132::request-unknown-fields" + - "core::0132::response-unknown-fields" + - "core::0134::request-unknown-fields" + - "core::0158::request-page-size-field" + - "core::0158::request-page-token-field" + - "core::0158::response-next-page-token-field" + - "core::0158::response-plural-first-field" + - "core::0158::response-repeated-first-field" - included_paths: - - '**/workflowservice/v1/service.proto' - - '**/operatorservice/v1/service.proto' + - "**/workflowservice/v1/service.proto" + - "**/operatorservice/v1/service.proto" disabled_rules: - - 'core::0127::http-annotation' - - 'core::0131::method-signature' - - 'core::0131::response-message-name' + # We extract specific fields in URL since the gRPC API predates the HTTP API + - "core::0127::resource-name-extraction" + + # We do not require specific "Get", "Create", "Update", or "Delete" RPC + # rules just because we happen to use a known RPC name prefix + - "core::0131" + - "core::0133" + - "core::0134" + - "core::0135" + + # We don't require HTTP calls to be suffixed with the same name as the gRPC + # name + - "core::0136::http-uri-suffix" + +- included_paths: + - "**/operatorservice/v1/service.proto" + disabled_rules: + # Do not require HTTP annotations on OperatorService calls at this time + - "core::0127::http-annotation" - included_paths: - - 'dependencies/gogoproto/gogo.proto' + - "google/**/*.proto" disabled_rules: - - 'all' + - "all" diff --git a/sdk-core-protos/protos/api_upstream/buf.gen.yaml b/sdk-core-protos/protos/api_upstream/buf.gen.yaml new file mode 100644 index 000000000..555cd008d --- /dev/null +++ b/sdk-core-protos/protos/api_upstream/buf.gen.yaml @@ -0,0 +1,20 @@ +version: v1 +plugins: + - plugin: buf.build/protocolbuffers/go:v1.31.0 + out: ./ + opt: + - paths=source_relative + - plugin: buf.build/grpc/go:v1.3.0 + out: ./ + opt: + - paths=source_relative + - plugin: buf.build/grpc-ecosystem/gateway:v2.18.0 + out: ./ + opt: + - paths=source_relative + - allow_patch_feature=false + - name: go-helpers + out: ./ + path: ["go", "run", "./protoc-gen-go-helpers"] + opt: + - paths=source_relative diff --git a/sdk-core-protos/protos/api_upstream/buf.lock b/sdk-core-protos/protos/api_upstream/buf.lock new file mode 100644 index 000000000..81047e195 --- /dev/null +++ b/sdk-core-protos/protos/api_upstream/buf.lock @@ -0,0 +1,11 @@ +# Generated by buf. DO NOT EDIT. +version: v1 +deps: + - remote: buf.build + owner: googleapis + repository: googleapis + commit: 28151c0d0a1641bf938a7672c500e01d + - remote: buf.build + owner: grpc-ecosystem + repository: grpc-gateway + commit: 048ae6ff94ca4476b3225904b1078fad diff --git a/sdk-core-protos/protos/api_upstream/buf.yaml b/sdk-core-protos/protos/api_upstream/buf.yaml index 68d6bb9fd..499b2b652 100644 --- a/sdk-core-protos/protos/api_upstream/buf.yaml +++ b/sdk-core-protos/protos/api_upstream/buf.yaml @@ -1,9 +1,18 @@ version: v1 +deps: + - buf.build/grpc-ecosystem/grpc-gateway +build: + excludes: + # Buf won't accept a local dependency on the google protos but we need them + # to run api-linter, so just tell buf it ignore it + - google breaking: use: - WIRE_JSON + ignore: + - google lint: use: - DEFAULT ignore: - - dependencies + - google diff --git a/sdk-core-protos/protos/api_upstream/build/go.mod b/sdk-core-protos/protos/api_upstream/build/go.mod deleted file mode 100644 index 32cb63c63..000000000 --- a/sdk-core-protos/protos/api_upstream/build/go.mod +++ /dev/null @@ -1,7 +0,0 @@ -module build - -go 1.18 - -require ( - github.com/temporalio/gogo-protobuf v1.22.1 -) \ No newline at end of file diff --git a/sdk-core-protos/protos/api_upstream/build/go.sum b/sdk-core-protos/protos/api_upstream/build/go.sum deleted file mode 100644 index 15db273c7..000000000 --- a/sdk-core-protos/protos/api_upstream/build/go.sum +++ /dev/null @@ -1,5 +0,0 @@ -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/temporalio/gogo-protobuf v1.22.1 h1:K5ja5MqmCQKo4tlX7u3g+ZJqbvRr0589ss2cZQx2dSM= -github.com/temporalio/gogo-protobuf v1.22.1/go.mod h1:tCaEv+fB8tsyLgoaqKr78K/JOhdRe684yyo0z30SHyA= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/sdk-core-protos/protos/api_upstream/dependencies/gogoproto/gogo.proto b/sdk-core-protos/protos/api_upstream/dependencies/gogoproto/gogo.proto deleted file mode 100644 index 49837cc64..000000000 --- a/sdk-core-protos/protos/api_upstream/dependencies/gogoproto/gogo.proto +++ /dev/null @@ -1,141 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/temporalio/gogo-protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto2"; -package gogoproto; - -import "google/protobuf/descriptor.proto"; - -option go_package = "github.com/gogo/protobuf/gogoproto"; - -extend google.protobuf.EnumOptions { - optional bool goproto_enum_prefix = 62001; - optional bool goproto_enum_stringer = 62021; - optional bool enum_stringer = 62022; - optional string enum_customname = 62023; - optional bool enumdecl = 62024; -} - -extend google.protobuf.EnumValueOptions { - optional string enumvalue_customname = 66001; -} - -extend google.protobuf.FileOptions { - optional bool goproto_getters_all = 63001; - optional bool goproto_enum_prefix_all = 63002; - optional bool goproto_stringer_all = 63003; - optional bool verbose_equal_all = 63004; - optional bool face_all = 63005; - optional bool gostring_all = 63006; - optional bool populate_all = 63007; - optional bool stringer_all = 63008; - optional bool onlyone_all = 63009; - - optional bool equal_all = 63013; - optional bool description_all = 63014; - optional bool testgen_all = 63015; - optional bool benchgen_all = 63016; - optional bool marshaler_all = 63017; - optional bool unmarshaler_all = 63018; - optional bool stable_marshaler_all = 63019; - - optional bool sizer_all = 63020; - - optional bool goproto_enum_stringer_all = 63021; - optional bool enum_stringer_all = 63022; - - optional bool unsafe_marshaler_all = 63023; - optional bool unsafe_unmarshaler_all = 63024; - - optional bool goproto_extensions_map_all = 63025; - optional bool goproto_unrecognized_all = 63026; - optional bool gogoproto_import = 63027; - optional bool protosizer_all = 63028; - optional bool compare_all = 63029; - optional bool typedecl_all = 63030; - optional bool enumdecl_all = 63031; - - optional bool goproto_registration = 63032; - optional bool messagename_all = 63033; - - optional bool goproto_sizecache_all = 63034; - optional bool goproto_unkeyed_all = 63035; -} - -extend google.protobuf.MessageOptions { - optional bool goproto_getters = 64001; - optional bool goproto_stringer = 64003; - optional bool verbose_equal = 64004; - optional bool face = 64005; - optional bool gostring = 64006; - optional bool populate = 64007; - optional bool stringer = 67008; - optional bool onlyone = 64009; - - optional bool equal = 64013; - optional bool description = 64014; - optional bool testgen = 64015; - optional bool benchgen = 64016; - optional bool marshaler = 64017; - optional bool unmarshaler = 64018; - optional bool stable_marshaler = 64019; - - optional bool sizer = 64020; - - optional bool unsafe_marshaler = 64023; - optional bool unsafe_unmarshaler = 64024; - - optional bool goproto_extensions_map = 64025; - optional bool goproto_unrecognized = 64026; - - optional bool protosizer = 64028; - optional bool compare = 64029; - - optional bool typedecl = 64030; - - optional bool messagename = 64033; - - optional bool goproto_sizecache = 64034; - optional bool goproto_unkeyed = 64035; -} - -extend google.protobuf.FieldOptions { - optional bool nullable = 65001; - optional bool embed = 65002; - optional string customtype = 65003; - optional string customname = 65004; - optional string jsontag = 65005; - optional string moretags = 65006; - optional string casttype = 65007; - optional string castkey = 65008; - optional string castvalue = 65009; - - optional bool stdtime = 65010; - optional bool stdduration = 65011; - optional bool wktpointer = 65012; -} diff --git a/sdk-core-protos/protos/api_upstream/go.mod b/sdk-core-protos/protos/api_upstream/go.mod deleted file mode 100644 index 2d11a0dba..000000000 --- a/sdk-core-protos/protos/api_upstream/go.mod +++ /dev/null @@ -1,6 +0,0 @@ -// There is special go module in `build` directory that is used to control tools versions. -// This file exists because go 1.18 doesn't allow go sub modules if root dirrectory is not a go module. - -module api - -go 1.18 diff --git a/sdk-core-protos/protos/api_upstream/google/api/annotations.proto b/sdk-core-protos/protos/api_upstream/google/api/annotations.proto new file mode 100644 index 000000000..efdab3db6 --- /dev/null +++ b/sdk-core-protos/protos/api_upstream/google/api/annotations.proto @@ -0,0 +1,31 @@ +// Copyright 2015 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.api; + +import "google/api/http.proto"; +import "google/protobuf/descriptor.proto"; + +option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; +option java_multiple_files = true; +option java_outer_classname = "AnnotationsProto"; +option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; + +extend google.protobuf.MethodOptions { + // See `HttpRule`. + HttpRule http = 72295728; +} diff --git a/sdk-core-protos/protos/api_upstream/google/api/http.proto b/sdk-core-protos/protos/api_upstream/google/api/http.proto new file mode 100644 index 000000000..31d867a27 --- /dev/null +++ b/sdk-core-protos/protos/api_upstream/google/api/http.proto @@ -0,0 +1,379 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.api; + +option cc_enable_arenas = true; +option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; +option java_multiple_files = true; +option java_outer_classname = "HttpProto"; +option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; + +// Defines the HTTP configuration for an API service. It contains a list of +// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method +// to one or more HTTP REST API methods. +message Http { + // A list of HTTP configuration rules that apply to individual API methods. + // + // **NOTE:** All service configuration rules follow "last one wins" order. + repeated HttpRule rules = 1; + + // When set to true, URL path parameters will be fully URI-decoded except in + // cases of single segment matches in reserved expansion, where "%2F" will be + // left encoded. + // + // The default behavior is to not decode RFC 6570 reserved characters in multi + // segment matches. + bool fully_decode_reserved_expansion = 2; +} + +// # gRPC Transcoding +// +// gRPC Transcoding is a feature for mapping between a gRPC method and one or +// more HTTP REST endpoints. It allows developers to build a single API service +// that supports both gRPC APIs and REST APIs. Many systems, including [Google +// APIs](https://github.com/googleapis/googleapis), +// [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC +// Gateway](https://github.com/grpc-ecosystem/grpc-gateway), +// and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature +// and use it for large scale production services. +// +// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies +// how different portions of the gRPC request message are mapped to the URL +// path, URL query parameters, and HTTP request body. It also controls how the +// gRPC response message is mapped to the HTTP response body. `HttpRule` is +// typically specified as an `google.api.http` annotation on the gRPC method. +// +// Each mapping specifies a URL path template and an HTTP method. The path +// template may refer to one or more fields in the gRPC request message, as long +// as each field is a non-repeated field with a primitive (non-message) type. +// The path template controls how fields of the request message are mapped to +// the URL path. +// +// Example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/{name=messages/*}" +// }; +// } +// } +// message GetMessageRequest { +// string name = 1; // Mapped to URL path. +// } +// message Message { +// string text = 1; // The resource content. +// } +// +// This enables an HTTP REST to gRPC mapping as below: +// +// HTTP | gRPC +// -----|----- +// `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")` +// +// Any fields in the request message which are not bound by the path template +// automatically become HTTP query parameters if there is no HTTP request body. +// For example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get:"/v1/messages/{message_id}" +// }; +// } +// } +// message GetMessageRequest { +// message SubMessage { +// string subfield = 1; +// } +// string message_id = 1; // Mapped to URL path. +// int64 revision = 2; // Mapped to URL query parameter `revision`. +// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. +// } +// +// This enables a HTTP JSON to RPC mapping as below: +// +// HTTP | gRPC +// -----|----- +// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | +// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: +// "foo"))` +// +// Note that fields which are mapped to URL query parameters must have a +// primitive type or a repeated primitive type or a non-repeated message type. +// In the case of a repeated type, the parameter can be repeated in the URL +// as `...?param=A¶m=B`. In the case of a message type, each field of the +// message is mapped to a separate parameter, such as +// `...?foo.a=A&foo.b=B&foo.c=C`. +// +// For HTTP methods that allow a request body, the `body` field +// specifies the mapping. Consider a REST update method on the +// message resource collection: +// +// service Messaging { +// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { +// option (google.api.http) = { +// patch: "/v1/messages/{message_id}" +// body: "message" +// }; +// } +// } +// message UpdateMessageRequest { +// string message_id = 1; // mapped to the URL +// Message message = 2; // mapped to the body +// } +// +// The following HTTP JSON to RPC mapping is enabled, where the +// representation of the JSON in the request body is determined by +// protos JSON encoding: +// +// HTTP | gRPC +// -----|----- +// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: +// "123456" message { text: "Hi!" })` +// +// The special name `*` can be used in the body mapping to define that +// every field not bound by the path template should be mapped to the +// request body. This enables the following alternative definition of +// the update method: +// +// service Messaging { +// rpc UpdateMessage(Message) returns (Message) { +// option (google.api.http) = { +// patch: "/v1/messages/{message_id}" +// body: "*" +// }; +// } +// } +// message Message { +// string message_id = 1; +// string text = 2; +// } +// +// +// The following HTTP JSON to RPC mapping is enabled: +// +// HTTP | gRPC +// -----|----- +// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: +// "123456" text: "Hi!")` +// +// Note that when using `*` in the body mapping, it is not possible to +// have HTTP parameters, as all fields not bound by the path end in +// the body. This makes this option more rarely used in practice when +// defining REST APIs. The common usage of `*` is in custom methods +// which don't use the URL at all for transferring data. +// +// It is possible to define multiple HTTP methods for one RPC by using +// the `additional_bindings` option. Example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/messages/{message_id}" +// additional_bindings { +// get: "/v1/users/{user_id}/messages/{message_id}" +// } +// }; +// } +// } +// message GetMessageRequest { +// string message_id = 1; +// string user_id = 2; +// } +// +// This enables the following two alternative HTTP JSON to RPC mappings: +// +// HTTP | gRPC +// -----|----- +// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` +// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: +// "123456")` +// +// ## Rules for HTTP mapping +// +// 1. Leaf request fields (recursive expansion nested messages in the request +// message) are classified into three categories: +// - Fields referred by the path template. They are passed via the URL path. +// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They +// are passed via the HTTP +// request body. +// - All other fields are passed via the URL query parameters, and the +// parameter name is the field path in the request message. A repeated +// field can be represented as multiple query parameters under the same +// name. +// 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL +// query parameter, all fields +// are passed via URL path and HTTP request body. +// 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP +// request body, all +// fields are passed via URL path and URL query parameters. +// +// ### Path template syntax +// +// Template = "/" Segments [ Verb ] ; +// Segments = Segment { "/" Segment } ; +// Segment = "*" | "**" | LITERAL | Variable ; +// Variable = "{" FieldPath [ "=" Segments ] "}" ; +// FieldPath = IDENT { "." IDENT } ; +// Verb = ":" LITERAL ; +// +// The syntax `*` matches a single URL path segment. The syntax `**` matches +// zero or more URL path segments, which must be the last part of the URL path +// except the `Verb`. +// +// The syntax `Variable` matches part of the URL path as specified by its +// template. A variable template must not contain other variables. If a variable +// matches a single path segment, its template may be omitted, e.g. `{var}` +// is equivalent to `{var=*}`. +// +// The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` +// contains any reserved character, such characters should be percent-encoded +// before the matching. +// +// If a variable contains exactly one path segment, such as `"{var}"` or +// `"{var=*}"`, when such a variable is expanded into a URL path on the client +// side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The +// server side does the reverse decoding. Such variables show up in the +// [Discovery +// Document](https://developers.google.com/discovery/v1/reference/apis) as +// `{var}`. +// +// If a variable contains multiple path segments, such as `"{var=foo/*}"` +// or `"{var=**}"`, when such a variable is expanded into a URL path on the +// client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. +// The server side does the reverse decoding, except "%2F" and "%2f" are left +// unchanged. Such variables show up in the +// [Discovery +// Document](https://developers.google.com/discovery/v1/reference/apis) as +// `{+var}`. +// +// ## Using gRPC API Service Configuration +// +// gRPC API Service Configuration (service config) is a configuration language +// for configuring a gRPC service to become a user-facing product. The +// service config is simply the YAML representation of the `google.api.Service` +// proto message. +// +// As an alternative to annotating your proto file, you can configure gRPC +// transcoding in your service config YAML files. You do this by specifying a +// `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same +// effect as the proto annotation. This can be particularly useful if you +// have a proto that is reused in multiple services. Note that any transcoding +// specified in the service config will override any matching transcoding +// configuration in the proto. +// +// Example: +// +// http: +// rules: +// # Selects a gRPC method and applies HttpRule to it. +// - selector: example.v1.Messaging.GetMessage +// get: /v1/messages/{message_id}/{sub.subfield} +// +// ## Special notes +// +// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the +// proto to JSON conversion must follow the [proto3 +// specification](https://developers.google.com/protocol-buffers/docs/proto3#json). +// +// While the single segment variable follows the semantics of +// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String +// Expansion, the multi segment variable **does not** follow RFC 6570 Section +// 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion +// does not expand special characters like `?` and `#`, which would lead +// to invalid URLs. As the result, gRPC Transcoding uses a custom encoding +// for multi segment variables. +// +// The path variables **must not** refer to any repeated or mapped field, +// because client libraries are not capable of handling such variable expansion. +// +// The path variables **must not** capture the leading "/" character. The reason +// is that the most common use case "{var}" does not capture the leading "/" +// character. For consistency, all path variables must share the same behavior. +// +// Repeated message fields must not be mapped to URL query parameters, because +// no client library can support such complicated mapping. +// +// If an API needs to use a JSON array for request or response body, it can map +// the request or response body to a repeated field. However, some gRPC +// Transcoding implementations may not support this feature. +message HttpRule { + // Selects a method to which this rule applies. + // + // Refer to [selector][google.api.DocumentationRule.selector] for syntax + // details. + string selector = 1; + + // Determines the URL pattern is matched by this rules. This pattern can be + // used with any of the {get|put|post|delete|patch} methods. A custom method + // can be defined using the 'custom' field. + oneof pattern { + // Maps to HTTP GET. Used for listing and getting information about + // resources. + string get = 2; + + // Maps to HTTP PUT. Used for replacing a resource. + string put = 3; + + // Maps to HTTP POST. Used for creating a resource or performing an action. + string post = 4; + + // Maps to HTTP DELETE. Used for deleting a resource. + string delete = 5; + + // Maps to HTTP PATCH. Used for updating a resource. + string patch = 6; + + // The custom pattern is used for specifying an HTTP method that is not + // included in the `pattern` field, such as HEAD, or "*" to leave the + // HTTP method unspecified for this rule. The wild-card rule is useful + // for services that provide content to Web (HTML) clients. + CustomHttpPattern custom = 8; + } + + // The name of the request field whose value is mapped to the HTTP request + // body, or `*` for mapping all request fields not captured by the path + // pattern to the HTTP body, or omitted for not having any HTTP request body. + // + // NOTE: the referred field must be present at the top-level of the request + // message type. + string body = 7; + + // Optional. The name of the response field whose value is mapped to the HTTP + // response body. When omitted, the entire response message will be used + // as the HTTP response body. + // + // NOTE: The referred field must be present at the top-level of the response + // message type. + string response_body = 12; + + // Additional HTTP bindings for the selector. Nested bindings must + // not contain an `additional_bindings` field themselves (that is, + // the nesting may only be one level deep). + repeated HttpRule additional_bindings = 11; +} + +// A custom pattern is used for defining custom HTTP verb. +message CustomHttpPattern { + // The name of this custom HTTP verb. + string kind = 1; + + // The path matched by this custom verb. + string path = 2; +} diff --git a/sdk-core-protos/protos/api_upstream/google/protobuf/any.proto b/sdk-core-protos/protos/api_upstream/google/protobuf/any.proto new file mode 100644 index 000000000..ad8a3b5bd --- /dev/null +++ b/sdk-core-protos/protos/api_upstream/google/protobuf/any.proto @@ -0,0 +1,162 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option go_package = "google.golang.org/protobuf/types/known/anypb"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "AnyProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// // or ... +// if (any.isSameTypeAs(Foo.getDefaultInstance())) { +// foo = any.unpack(Foo.getDefaultInstance()); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := anypb.New(foo) +// if err != nil { +// ... +// } +// ... +// foo := &pb.Foo{} +// if err := any.UnmarshalTo(foo); err != nil { +// ... +// } +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +message Any { + // A URL/resource name that uniquely identifies the type of the serialized + // protocol buffer message. This string must contain at least + // one "/" character. The last segment of the URL's path must represent + // the fully qualified name of the type (as in + // `path/google.protobuf.Duration`). The name should be in a canonical form + // (e.g., leading "." is not accepted). + // + // In practice, teams usually precompile into the binary all types that they + // expect it to use in the context of Any. However, for URLs which use the + // scheme `http`, `https`, or no scheme, one can optionally set up a type + // server that maps type URLs to message definitions as follows: + // + // * If no scheme is provided, `https` is assumed. + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Note: this functionality is not currently available in the official + // protobuf release, and it is not used for type URLs beginning with + // type.googleapis.com. As of May 2023, there are no widely used type server + // implementations and no plans to implement one. + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + string type_url = 1; + + // Must be a valid serialized protocol buffer of the above specified type. + bytes value = 2; +} \ No newline at end of file diff --git a/sdk-core-protos/protos/api_upstream/google/protobuf/descriptor.proto b/sdk-core-protos/protos/api_upstream/google/protobuf/descriptor.proto new file mode 100644 index 000000000..4bc6ce04b --- /dev/null +++ b/sdk-core-protos/protos/api_upstream/google/protobuf/descriptor.proto @@ -0,0 +1,1212 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// The messages in this file describe the definitions found in .proto files. +// A valid .proto file can be translated directly to a FileDescriptorProto +// without any other information (e.g. without reading its imports). + +syntax = "proto2"; + +package google.protobuf; + +option go_package = "google.golang.org/protobuf/types/descriptorpb"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DescriptorProtos"; +option csharp_namespace = "Google.Protobuf.Reflection"; +option objc_class_prefix = "GPB"; +option cc_enable_arenas = true; + +// descriptor.proto must be optimized for speed because reflection-based +// algorithms don't work during bootstrapping. +option optimize_for = SPEED; + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +message FileDescriptorSet { + repeated FileDescriptorProto file = 1; +} + +// The full set of known editions. +enum Edition { + // A placeholder for an unknown edition value. + EDITION_UNKNOWN = 0; + + // Legacy syntax "editions". These pre-date editions, but behave much like + // distinct editions. These can't be used to specify the edition of proto + // files, but feature definitions must supply proto2/proto3 defaults for + // backwards compatibility. + EDITION_PROTO2 = 998; + EDITION_PROTO3 = 999; + + // Editions that have been released. The specific values are arbitrary and + // should not be depended on, but they will always be time-ordered for easy + // comparison. + EDITION_2023 = 1000; + + // Placeholder editions for testing feature resolution. These should not be + // used or relyed on outside of tests. + EDITION_1_TEST_ONLY = 1; + EDITION_2_TEST_ONLY = 2; + EDITION_99997_TEST_ONLY = 99997; + EDITION_99998_TEST_ONLY = 99998; + EDITION_99999_TEST_ONLY = 99999; +} + +// Describes a complete .proto file. +message FileDescriptorProto { + optional string name = 1; // file name, relative to root of source tree + optional string package = 2; // e.g. "foo", "foo.bar", etc. + + // Names of files imported by this file. + repeated string dependency = 3; + // Indexes of the public imported files in the dependency list above. + repeated int32 public_dependency = 10; + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + repeated int32 weak_dependency = 11; + + // All top-level definitions in this file. + repeated DescriptorProto message_type = 4; + repeated EnumDescriptorProto enum_type = 5; + repeated ServiceDescriptorProto service = 6; + repeated FieldDescriptorProto extension = 7; + + optional FileOptions options = 8; + + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + optional SourceCodeInfo source_code_info = 9; + + // The syntax of the proto file. + // The supported values are "proto2", "proto3", and "editions". + // + // If `edition` is present, this value must be "editions". + optional string syntax = 12; + + // The edition of the proto file. + optional Edition edition = 14; +} + +// Describes a message type. +message DescriptorProto { + optional string name = 1; + + repeated FieldDescriptorProto field = 2; + repeated FieldDescriptorProto extension = 6; + + repeated DescriptorProto nested_type = 3; + repeated EnumDescriptorProto enum_type = 4; + + message ExtensionRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Exclusive. + + optional ExtensionRangeOptions options = 3; + } + repeated ExtensionRange extension_range = 5; + + repeated OneofDescriptorProto oneof_decl = 8; + + optional MessageOptions options = 7; + + // Range of reserved tag numbers. Reserved tag numbers may not be used by + // fields or extension ranges in the same message. Reserved ranges may + // not overlap. + message ReservedRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Exclusive. + } + repeated ReservedRange reserved_range = 9; + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + repeated string reserved_name = 10; +} + +message ExtensionRangeOptions { + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + message Declaration { + // The extension number declared within the extension range. + optional int32 number = 1; + + // The fully-qualified name of the extension field. There must be a leading + // dot in front of the full name. + optional string full_name = 2; + + // The fully-qualified type name of the extension field. Unlike + // Metadata.type, Declaration.type must have a leading dot for messages + // and enums. + optional string type = 3; + + // If true, indicates that the number is reserved in the extension range, + // and any extension field with the number will fail to compile. Set this + // when a declared extension field is deleted. + optional bool reserved = 5; + + // If true, indicates that the extension must be defined as repeated. + // Otherwise the extension must be defined as optional. + optional bool repeated = 6; + + reserved 4; // removed is_repeated + } + + // For external users: DO NOT USE. We are in the process of open sourcing + // extension declaration and executing internal cleanups before it can be + // used externally. + repeated Declaration declaration = 2 [retention = RETENTION_SOURCE]; + + // Any features defined in the specific edition. + optional FeatureSet features = 50; + + // The verification state of the extension range. + enum VerificationState { + // All the extensions of the range must be declared. + DECLARATION = 0; + UNVERIFIED = 1; + } + + // The verification state of the range. + // TODO: flip the default to DECLARATION once all empty ranges + // are marked as UNVERIFIED. + optional VerificationState verification = 3 [default = UNVERIFIED]; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +// Describes a field within a message. +message FieldDescriptorProto { + enum Type { + // 0 is reserved for errors. + // Order is weird for historical reasons. + TYPE_DOUBLE = 1; + TYPE_FLOAT = 2; + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + TYPE_INT64 = 3; + TYPE_UINT64 = 4; + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + TYPE_INT32 = 5; + TYPE_FIXED64 = 6; + TYPE_FIXED32 = 7; + TYPE_BOOL = 8; + TYPE_STRING = 9; + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + TYPE_GROUP = 10; + TYPE_MESSAGE = 11; // Length-delimited aggregate. + + // New in version 2. + TYPE_BYTES = 12; + TYPE_UINT32 = 13; + TYPE_ENUM = 14; + TYPE_SFIXED32 = 15; + TYPE_SFIXED64 = 16; + TYPE_SINT32 = 17; // Uses ZigZag encoding. + TYPE_SINT64 = 18; // Uses ZigZag encoding. + } + + enum Label { + // 0 is reserved for errors + LABEL_OPTIONAL = 1; + LABEL_REQUIRED = 2; + LABEL_REPEATED = 3; + } + + optional string name = 1; + optional int32 number = 3; + optional Label label = 4; + + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + optional Type type = 5; + + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + optional string type_name = 6; + + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + optional string extendee = 2; + + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + optional string default_value = 7; + + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + optional int32 oneof_index = 9; + + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + optional string json_name = 10; + + optional FieldOptions options = 8; + + // If true, this is a proto3 "optional". When a proto3 field is optional, it + // tracks presence regardless of field type. + // + // When proto3_optional is true, this field must be belong to a oneof to + // signal to old proto3 clients that presence is tracked for this field. This + // oneof is known as a "synthetic" oneof, and this field must be its sole + // member (each proto3 optional field gets its own synthetic oneof). Synthetic + // oneofs exist in the descriptor only, and do not generate any API. Synthetic + // oneofs must be ordered after all "real" oneofs. + // + // For message fields, proto3_optional doesn't create any semantic change, + // since non-repeated message fields always track presence. However it still + // indicates the semantic detail of whether the user wrote "optional" or not. + // This can be useful for round-tripping the .proto file. For consistency we + // give message fields a synthetic oneof also, even though it is not required + // to track presence. This is especially important because the parser can't + // tell if a field is a message or an enum, so it must always create a + // synthetic oneof. + // + // Proto2 optional fields do not set this flag, because they already indicate + // optional with `LABEL_OPTIONAL`. + optional bool proto3_optional = 17; +} + +// Describes a oneof. +message OneofDescriptorProto { + optional string name = 1; + optional OneofOptions options = 2; +} + +// Describes an enum type. +message EnumDescriptorProto { + optional string name = 1; + + repeated EnumValueDescriptorProto value = 2; + + optional EnumOptions options = 3; + + // Range of reserved numeric values. Reserved values may not be used by + // entries in the same enum. Reserved ranges may not overlap. + // + // Note that this is distinct from DescriptorProto.ReservedRange in that it + // is inclusive such that it can appropriately represent the entire int32 + // domain. + message EnumReservedRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Inclusive. + } + + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + repeated EnumReservedRange reserved_range = 4; + + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + repeated string reserved_name = 5; +} + +// Describes a value within an enum. +message EnumValueDescriptorProto { + optional string name = 1; + optional int32 number = 2; + + optional EnumValueOptions options = 3; +} + +// Describes a service. +message ServiceDescriptorProto { + optional string name = 1; + repeated MethodDescriptorProto method = 2; + + optional ServiceOptions options = 3; +} + +// Describes a method of a service. +message MethodDescriptorProto { + optional string name = 1; + + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + optional string input_type = 2; + optional string output_type = 3; + + optional MethodOptions options = 4; + + // Identifies if client streams multiple client messages + optional bool client_streaming = 5 [default = false]; + // Identifies if server streams multiple server messages + optional bool server_streaming = 6 [default = false]; +} + +// =================================================================== +// Options + +// Each of the definitions above may have "options" attached. These are +// just annotations which may cause code to be generated slightly differently +// or may contain hints for code that manipulates protocol messages. +// +// Clients may define custom options as extensions of the *Options messages. +// These extensions may not yet be known at parsing time, so the parser cannot +// store the values in them. Instead it stores them in a field in the *Options +// message called uninterpreted_option. This field must have the same name +// across all *Options messages. We then use this field to populate the +// extensions when we build a descriptor, at which point all protos have been +// parsed and so all extensions are known. +// +// Extension numbers for custom options may be chosen as follows: +// * For options which will only be used within a single application or +// organization, or for experimental options, use field numbers 50000 +// through 99999. It is up to you to ensure that you do not use the +// same number for multiple options. +// * For options which will be published and used publicly by multiple +// independent entities, e-mail protobuf-global-extension-registry@google.com +// to reserve extension numbers. Simply provide your project name (e.g. +// Objective-C plugin) and your project website (if available) -- there's no +// need to explain how you intend to use them. Usually you only need one +// extension number. You can declare multiple options with only one extension +// number by putting them in a sub-message. See the Custom Options section of +// the docs for examples: +// https://developers.google.com/protocol-buffers/docs/proto#options +// If this turns out to be popular, a web service will be set up +// to automatically assign option numbers. + +message FileOptions { + + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + optional string java_package = 1; + + // Controls the name of the wrapper Java class generated for the .proto file. + // That class will always contain the .proto file's getDescriptor() method as + // well as any top-level extensions defined in the .proto file. + // If java_multiple_files is disabled, then all the other classes from the + // .proto file will be nested inside the single wrapper outer class. + optional string java_outer_classname = 8; + + // If enabled, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the wrapper class + // named by java_outer_classname. However, the wrapper class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + optional bool java_multiple_files = 10 [default = false]; + + // This option does nothing. + optional bool java_generate_equals_and_hash = 20 [deprecated=true]; + + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + optional bool java_string_check_utf8 = 27 [default = false]; + + // Generated classes can be optimized for speed or code size. + enum OptimizeMode { + SPEED = 1; // Generate complete code for parsing, serialization, + // etc. + CODE_SIZE = 2; // Use ReflectionOps to implement these methods. + LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime. + } + optional OptimizeMode optimize_for = 9 [default = SPEED]; + + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + optional string go_package = 11; + + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + optional bool cc_generic_services = 16 [default = false]; + optional bool java_generic_services = 17 [default = false]; + optional bool py_generic_services = 18 [default = false]; + optional bool php_generic_services = 42 [default = false]; + + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + optional bool deprecated = 23 [default = false]; + + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + optional bool cc_enable_arenas = 31 [default = true]; + + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + optional string objc_class_prefix = 36; + + // Namespace for generated classes; defaults to the package. + optional string csharp_namespace = 37; + + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + optional string swift_prefix = 39; + + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + optional string php_class_prefix = 40; + + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + optional string php_namespace = 41; + + // Use this option to change the namespace of php generated metadata classes. + // Default is empty. When this option is empty, the proto file name will be + // used for determining the namespace. + optional string php_metadata_namespace = 44; + + // Use this option to change the package of ruby generated classes. Default + // is empty. When this option is not set, the package name will be used for + // determining the ruby package. + optional string ruby_package = 45; + + // Any features defined in the specific edition. + optional FeatureSet features = 50; + + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. + // See the documentation for the "Options" section above. + extensions 1000 to max; + + reserved 38; +} + +message MessageOptions { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + optional bool message_set_wire_format = 1 [default = false]; + + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + optional bool no_standard_descriptor_accessor = 2 [default = false]; + + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + optional bool deprecated = 3 [default = false]; + + reserved 4, 5, 6; + + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + // + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementations still need to work as + // if the field is a repeated message field. + optional bool map_entry = 7; + + reserved 8; // javalite_serializable + reserved 9; // javanano_as_lite + + // Enable the legacy handling of JSON field name conflicts. This lowercases + // and strips underscored from the fields before comparison in proto3 only. + // The new behavior takes `json_name` into account and applies to proto2 as + // well. + // + // This should only be used as a temporary measure against broken builds due + // to the change in behavior for JSON field name conflicts. + // + // TODO This is legacy behavior we plan to remove once downstream + // teams have had time to migrate. + optional bool deprecated_legacy_json_field_conflicts = 11 [deprecated = true]; + + // Any features defined in the specific edition. + optional FeatureSet features = 12; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message FieldOptions { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is only implemented to support use of + // [ctype=CORD] and [ctype=STRING] (the default) on non-repeated fields of + // type "bytes" in the open source release -- sorry, we'll try to include + // other types in a future version! + optional CType ctype = 1 [default = STRING]; + enum CType { + // Default mode. + STRING = 0; + + // The option [ctype=CORD] may be applied to a non-repeated field of type + // "bytes". It indicates that in C++, the data should be stored in a Cord + // instead of a string. For very large strings, this may reduce memory + // fragmentation. It may also allow better performance when parsing from a + // Cord, or when parsing with aliasing enabled, as the parsed Cord may then + // alias the original buffer. + CORD = 1; + + STRING_PIECE = 2; + } + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + optional bool packed = 2; + + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + optional JSType jstype = 6 [default = JS_NORMAL]; + enum JSType { + // Use the default type. + JS_NORMAL = 0; + + // Use JavaScript strings. + JS_STRING = 1; + + // Use JavaScript numbers. + JS_NUMBER = 2; + } + + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + // + // As of May 2022, lazy verifies the contents of the byte stream during + // parsing. An invalid byte stream will cause the overall parsing to fail. + optional bool lazy = 5 [default = false]; + + // unverified_lazy does no correctness checks on the byte stream. This should + // only be used where lazy with verification is prohibitive for performance + // reasons. + optional bool unverified_lazy = 15 [default = false]; + + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + optional bool deprecated = 3 [default = false]; + + // For Google-internal migration only. Do not use. + optional bool weak = 10 [default = false]; + + // Indicate that the field value should not be printed out when using debug + // formats, e.g. when the field contains sensitive credentials. + optional bool debug_redact = 16 [default = false]; + + // If set to RETENTION_SOURCE, the option will be omitted from the binary. + // Note: as of January 2023, support for this is in progress and does not yet + // have an effect (b/264593489). + enum OptionRetention { + RETENTION_UNKNOWN = 0; + RETENTION_RUNTIME = 1; + RETENTION_SOURCE = 2; + } + + optional OptionRetention retention = 17; + + // This indicates the types of entities that the field may apply to when used + // as an option. If it is unset, then the field may be freely used as an + // option on any kind of entity. Note: as of January 2023, support for this is + // in progress and does not yet have an effect (b/264593489). + enum OptionTargetType { + TARGET_TYPE_UNKNOWN = 0; + TARGET_TYPE_FILE = 1; + TARGET_TYPE_EXTENSION_RANGE = 2; + TARGET_TYPE_MESSAGE = 3; + TARGET_TYPE_FIELD = 4; + TARGET_TYPE_ONEOF = 5; + TARGET_TYPE_ENUM = 6; + TARGET_TYPE_ENUM_ENTRY = 7; + TARGET_TYPE_SERVICE = 8; + TARGET_TYPE_METHOD = 9; + } + + repeated OptionTargetType targets = 19; + + message EditionDefault { + optional Edition edition = 3; + optional string value = 2; // Textproto value. + } + repeated EditionDefault edition_defaults = 20; + + // Any features defined in the specific edition. + optional FeatureSet features = 21; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; + + reserved 4; // removed jtype + reserved 18; // reserve target, target_obsolete_do_not_use +} + +message OneofOptions { + // Any features defined in the specific edition. + optional FeatureSet features = 1; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message EnumOptions { + + // Set this option to true to allow mapping different tag names to the same + // value. + optional bool allow_alias = 2; + + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + optional bool deprecated = 3 [default = false]; + + reserved 5; // javanano_as_lite + + // Enable the legacy handling of JSON field name conflicts. This lowercases + // and strips underscored from the fields before comparison in proto3 only. + // The new behavior takes `json_name` into account and applies to proto2 as + // well. + // TODO Remove this legacy behavior once downstream teams have + // had time to migrate. + optional bool deprecated_legacy_json_field_conflicts = 6 [deprecated = true]; + + // Any features defined in the specific edition. + optional FeatureSet features = 7; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message EnumValueOptions { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + optional bool deprecated = 1 [default = false]; + + // Any features defined in the specific edition. + optional FeatureSet features = 2; + + // Indicate that fields annotated with this enum value should not be printed + // out when using debug formats, e.g. when the field contains sensitive + // credentials. + optional bool debug_redact = 3 [default = false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message ServiceOptions { + + // Any features defined in the specific edition. + optional FeatureSet features = 34; + + // Note: Field numbers 1 through 32 are reserved for Google's internal RPC + // framework. We apologize for hoarding these numbers to ourselves, but + // we were already using them long before we decided to release Protocol + // Buffers. + + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + optional bool deprecated = 33 [default = false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message MethodOptions { + + // Note: Field numbers 1 through 32 are reserved for Google's internal RPC + // framework. We apologize for hoarding these numbers to ourselves, but + // we were already using them long before we decided to release Protocol + // Buffers. + + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + optional bool deprecated = 33 [default = false]; + + // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, + // or neither? HTTP based RPC implementation may choose GET verb for safe + // methods, and PUT verb for idempotent methods instead of the default POST. + enum IdempotencyLevel { + IDEMPOTENCY_UNKNOWN = 0; + NO_SIDE_EFFECTS = 1; // implies idempotent + IDEMPOTENT = 2; // idempotent, but may have side effects + } + optional IdempotencyLevel idempotency_level = 34 + [default = IDEMPOTENCY_UNKNOWN]; + + // Any features defined in the specific edition. + optional FeatureSet features = 35; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +message UninterpretedOption { + // The name of the uninterpreted option. Each string represents a segment in + // a dot-separated name. is_extension is true iff a segment represents an + // extension (denoted with parentheses in options specs in .proto files). + // E.g.,{ ["foo", false], ["bar.baz", true], ["moo", false] } represents + // "foo.(bar.baz).moo". + message NamePart { + required string name_part = 1; + required bool is_extension = 2; + } + repeated NamePart name = 2; + + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + optional string identifier_value = 3; + optional uint64 positive_int_value = 4; + optional int64 negative_int_value = 5; + optional double double_value = 6; + optional bytes string_value = 7; + optional string aggregate_value = 8; +} + +// =================================================================== +// Features + +// TODO Enums in C++ gencode (and potentially other languages) are +// not well scoped. This means that each of the feature enums below can clash +// with each other. The short names we've chosen maximize call-site +// readability, but leave us very open to this scenario. A future feature will +// be designed and implemented to handle this, hopefully before we ever hit a +// conflict here. +message FeatureSet { + enum FieldPresence { + FIELD_PRESENCE_UNKNOWN = 0; + EXPLICIT = 1; + IMPLICIT = 2; + LEGACY_REQUIRED = 3; + } + optional FieldPresence field_presence = 1 [ + retention = RETENTION_RUNTIME, + targets = TARGET_TYPE_FIELD, + targets = TARGET_TYPE_FILE, + edition_defaults = { edition: EDITION_PROTO2, value: "EXPLICIT" }, + edition_defaults = { edition: EDITION_PROTO3, value: "IMPLICIT" }, + edition_defaults = { edition: EDITION_2023, value: "EXPLICIT" } + ]; + + enum EnumType { + ENUM_TYPE_UNKNOWN = 0; + OPEN = 1; + CLOSED = 2; + } + optional EnumType enum_type = 2 [ + retention = RETENTION_RUNTIME, + targets = TARGET_TYPE_ENUM, + targets = TARGET_TYPE_FILE, + edition_defaults = { edition: EDITION_PROTO2, value: "CLOSED" }, + edition_defaults = { edition: EDITION_PROTO3, value: "OPEN" } + ]; + + enum RepeatedFieldEncoding { + REPEATED_FIELD_ENCODING_UNKNOWN = 0; + PACKED = 1; + EXPANDED = 2; + } + optional RepeatedFieldEncoding repeated_field_encoding = 3 [ + retention = RETENTION_RUNTIME, + targets = TARGET_TYPE_FIELD, + targets = TARGET_TYPE_FILE, + edition_defaults = { edition: EDITION_PROTO2, value: "EXPANDED" }, + edition_defaults = { edition: EDITION_PROTO3, value: "PACKED" } + ]; + + enum Utf8Validation { + UTF8_VALIDATION_UNKNOWN = 0; + UNVERIFIED = 1; + VERIFY = 2; + } + optional Utf8Validation utf8_validation = 4 [ + retention = RETENTION_RUNTIME, + targets = TARGET_TYPE_FIELD, + targets = TARGET_TYPE_FILE, + edition_defaults = { edition: EDITION_PROTO2, value: "UNVERIFIED" }, + edition_defaults = { edition: EDITION_PROTO3, value: "VERIFY" } + ]; + + enum MessageEncoding { + MESSAGE_ENCODING_UNKNOWN = 0; + LENGTH_PREFIXED = 1; + DELIMITED = 2; + } + optional MessageEncoding message_encoding = 5 [ + retention = RETENTION_RUNTIME, + targets = TARGET_TYPE_FIELD, + targets = TARGET_TYPE_FILE, + edition_defaults = { edition: EDITION_PROTO2, value: "LENGTH_PREFIXED" } + ]; + + enum JsonFormat { + JSON_FORMAT_UNKNOWN = 0; + ALLOW = 1; + LEGACY_BEST_EFFORT = 2; + } + optional JsonFormat json_format = 6 [ + retention = RETENTION_RUNTIME, + targets = TARGET_TYPE_MESSAGE, + targets = TARGET_TYPE_ENUM, + targets = TARGET_TYPE_FILE, + edition_defaults = { edition: EDITION_PROTO2, value: "LEGACY_BEST_EFFORT" }, + edition_defaults = { edition: EDITION_PROTO3, value: "ALLOW" } + ]; + + reserved 999; + + extensions 1000; // for Protobuf C++ + extensions 1001; // for Protobuf Java + + extensions 9995 to 9999; // For internal testing +} + +// A compiled specification for the defaults of a set of features. These +// messages are generated from FeatureSet extensions and can be used to seed +// feature resolution. The resolution with this object becomes a simple search +// for the closest matching edition, followed by proto merges. +message FeatureSetDefaults { + // A map from every known edition with a unique set of defaults to its + // defaults. Not all editions may be contained here. For a given edition, + // the defaults at the closest matching edition ordered at or before it should + // be used. This field must be in strict ascending order by edition. + message FeatureSetEditionDefault { + optional Edition edition = 3; + optional FeatureSet features = 2; + } + repeated FeatureSetEditionDefault defaults = 1; + + // The minimum supported edition (inclusive) when this was constructed. + // Editions before this will not have defaults. + optional Edition minimum_edition = 4; + + // The maximum known edition (inclusive) when this was constructed. Editions + // after this will not have reliable defaults. + optional Edition maximum_edition = 5; +} + +// =================================================================== +// Optional source code info + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +message SourceCodeInfo { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendant. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + repeated Location location = 1; + message Location { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition occurs. + // For example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + repeated int32 path = 1 [packed = true]; + + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + repeated int32 span = 2 [packed = true]; + + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to moo. + // // + // // Another line attached to moo. + // optional double moo = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to moo or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + optional string leading_comments = 3; + optional string trailing_comments = 4; + repeated string leading_detached_comments = 6; + } +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +message GeneratedCodeInfo { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + repeated Annotation annotation = 1; + message Annotation { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + repeated int32 path = 1 [packed = true]; + + // Identifies the filesystem path to the original source .proto. + optional string source_file = 2; + + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + optional int32 begin = 3; + + // Identifies the ending offset in bytes in the generated code that + // relates to the identified object. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + optional int32 end = 4; + + // Represents the identified object's effect on the element in the original + // .proto file. + enum Semantic { + // There is no effect or the effect is indescribable. + NONE = 0; + // The element is set or otherwise mutated. + SET = 1; + // An alias to the element is returned. + ALIAS = 2; + } + optional Semantic semantic = 5; + } +} \ No newline at end of file diff --git a/sdk-core-protos/protos/api_upstream/google/protobuf/duration.proto b/sdk-core-protos/protos/api_upstream/google/protobuf/duration.proto new file mode 100644 index 000000000..a49438b9e --- /dev/null +++ b/sdk-core-protos/protos/api_upstream/google/protobuf/duration.proto @@ -0,0 +1,115 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option cc_enable_arenas = true; +option go_package = "google.golang.org/protobuf/types/known/durationpb"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DurationProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (duration.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// +message Duration { + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + int64 seconds = 1; + + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + int32 nanos = 2; +} \ No newline at end of file diff --git a/sdk-core-protos/protos/api_upstream/google/protobuf/empty.proto b/sdk-core-protos/protos/api_upstream/google/protobuf/empty.proto new file mode 100644 index 000000000..b87c89dcf --- /dev/null +++ b/sdk-core-protos/protos/api_upstream/google/protobuf/empty.proto @@ -0,0 +1,51 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option go_package = "google.golang.org/protobuf/types/known/emptypb"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "EmptyProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +message Empty {} diff --git a/sdk-core-protos/protos/api_upstream/google/protobuf/timestamp.proto b/sdk-core-protos/protos/api_upstream/google/protobuf/timestamp.proto new file mode 100644 index 000000000..d0698db68 --- /dev/null +++ b/sdk-core-protos/protos/api_upstream/google/protobuf/timestamp.proto @@ -0,0 +1,144 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option cc_enable_arenas = true; +option go_package = "google.golang.org/protobuf/types/known/timestamppb"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "TimestampProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; + +// A Timestamp represents a point in time independent of any time zone or local +// calendar, encoded as a count of seconds and fractions of seconds at +// nanosecond resolution. The count is relative to an epoch at UTC midnight on +// January 1, 1970, in the proleptic Gregorian calendar which extends the +// Gregorian calendar backwards to year one. +// +// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap +// second table is needed for interpretation, using a [24-hour linear +// smear](https://developers.google.com/time/smear). +// +// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By +// restricting to that range, we ensure that we can convert to and from [RFC +// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings. +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// Example 5: Compute Timestamp from Java `Instant.now()`. +// +// Instant now = Instant.now(); +// +// Timestamp timestamp = +// Timestamp.newBuilder().setSeconds(now.getEpochSecond()) +// .setNanos(now.getNano()).build(); +// +// Example 6: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required. A proto3 JSON serializer should always use UTC (as indicated by +// "Z") when printing the Timestamp type and a proto3 JSON parser should be +// able to accept both UTC and other timezones (as indicated by an offset). +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard +// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString) +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using +// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with +// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use +// the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime() +// ) to obtain a formatter capable of generating timestamps in this format. +// +message Timestamp { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + int32 nanos = 2; +} \ No newline at end of file diff --git a/sdk-core-protos/protos/api_upstream/google/protobuf/wrappers.proto b/sdk-core-protos/protos/api_upstream/google/protobuf/wrappers.proto new file mode 100644 index 000000000..6c4b5ac6a --- /dev/null +++ b/sdk-core-protos/protos/api_upstream/google/protobuf/wrappers.proto @@ -0,0 +1,123 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Wrappers for primitive (non-message) types. These types are useful +// for embedding primitives in the `google.protobuf.Any` type and for places +// where we need to distinguish between the absence of a primitive +// typed field and its default value. +// +// These wrappers have no meaningful use within repeated fields as they lack +// the ability to detect presence on individual elements. +// These wrappers have no meaningful use within a map or a oneof since +// individual entries of a map or fields of a oneof can already detect presence. + +syntax = "proto3"; + +package google.protobuf; + +option cc_enable_arenas = true; +option go_package = "google.golang.org/protobuf/types/known/wrapperspb"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "WrappersProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +message DoubleValue { + // The double value. + double value = 1; +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +message FloatValue { + // The float value. + float value = 1; +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +message Int64Value { + // The int64 value. + int64 value = 1; +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +message UInt64Value { + // The uint64 value. + uint64 value = 1; +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +message Int32Value { + // The int32 value. + int32 value = 1; +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +message UInt32Value { + // The uint32 value. + uint32 value = 1; +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +message BoolValue { + // The bool value. + bool value = 1; +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +message StringValue { + // The string value. + string value = 1; +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +message BytesValue { + // The bytes value. + bytes value = 1; +} \ No newline at end of file diff --git a/sdk-core-protos/protos/api_upstream/temporal/api/batch/v1/message.proto b/sdk-core-protos/protos/api_upstream/temporal/api/batch/v1/message.proto index 57ff1c8f3..f11f1c432 100644 --- a/sdk-core-protos/protos/api_upstream/temporal/api/batch/v1/message.proto +++ b/sdk-core-protos/protos/api_upstream/temporal/api/batch/v1/message.proto @@ -31,10 +31,8 @@ option java_outer_classname = "MessageProto"; option ruby_package = "Temporalio::Api::Batch::V1"; option csharp_namespace = "Temporalio.Api.Batch.V1"; -import "dependencies/gogoproto/gogo.proto"; import "google/protobuf/timestamp.proto"; - import "temporal/api/common/v1/message.proto"; import "temporal/api/enums/v1/batch_operation.proto"; import "temporal/api/enums/v1/reset.proto"; @@ -45,9 +43,9 @@ message BatchOperationInfo { // Batch operation state temporal.api.enums.v1.BatchOperationState state = 2; // Batch operation start time - google.protobuf.Timestamp start_time = 3 [(gogoproto.stdtime) = true]; + google.protobuf.Timestamp start_time = 3; // Batch operation close time - google.protobuf.Timestamp close_time = 4 [(gogoproto.stdtime) = true]; + google.protobuf.Timestamp close_time = 4; } // BatchOperationTermination sends terminate requests to batch workflows. @@ -98,4 +96,4 @@ message BatchOperationReset { temporal.api.enums.v1.ResetReapplyType reset_reapply_type = 2; // The identity of the worker/client. string identity = 3; -} \ No newline at end of file +} diff --git a/sdk-core-protos/protos/api_upstream/temporal/api/command/v1/message.proto b/sdk-core-protos/protos/api_upstream/temporal/api/command/v1/message.proto index 8a2b2b6a9..8ffcf83d2 100644 --- a/sdk-core-protos/protos/api_upstream/temporal/api/command/v1/message.proto +++ b/sdk-core-protos/protos/api_upstream/temporal/api/command/v1/message.proto @@ -33,8 +33,6 @@ option csharp_namespace = "Temporalio.Api.Command.V1"; import "google/protobuf/duration.proto"; -import "dependencies/gogoproto/gogo.proto"; - import "temporal/api/enums/v1/workflow.proto"; import "temporal/api/enums/v1/command_type.proto"; import "temporal/api/common/v1/message.proto"; @@ -56,7 +54,7 @@ message ScheduleActivityTaskCommandAttributes { // // (-- api-linter: core::0140::prepositions=disabled // aip.dev/not-precedent: "to" is used to indicate interval. --) - google.protobuf.Duration schedule_to_close_timeout = 7 [(gogoproto.stdduration) = true]; + google.protobuf.Duration schedule_to_close_timeout = 7; // Limits the time an activity task can stay in a task queue before a worker picks it up. The // "schedule" time is when the most recent retry is scheduled. This timeout should usually not // be set: it's useful in specific scenarios like worker-specific task queues. This timeout is @@ -67,15 +65,15 @@ message ScheduleActivityTaskCommandAttributes { // // (-- api-linter: core::0140::prepositions=disabled // aip.dev/not-precedent: "to" is used to indicate interval. --) - google.protobuf.Duration schedule_to_start_timeout = 8 [(gogoproto.stdduration) = true]; + google.protobuf.Duration schedule_to_start_timeout = 8; // Maximum time an activity is allowed to execute after being picked up by a worker. This // timeout is always retryable. Either this or `schedule_to_close_timeout` must be specified. // // (-- api-linter: core::0140::prepositions=disabled // aip.dev/not-precedent: "to" is used to indicate interval. --) - google.protobuf.Duration start_to_close_timeout = 9 [(gogoproto.stdduration) = true]; + google.protobuf.Duration start_to_close_timeout = 9; // Maximum permitted time between successful worker heartbeats. - google.protobuf.Duration heartbeat_timeout = 10 [(gogoproto.stdduration) = true]; + google.protobuf.Duration heartbeat_timeout = 10; // Activities are provided by a default retry policy which is controlled through the service's // dynamic configuration. Retries will be attempted until `schedule_to_close_timeout` has // elapsed. To disable retries set retry_policy.maximum_attempts to 1. @@ -102,7 +100,7 @@ message StartTimerCommandAttributes { // // (-- api-linter: core::0140::prepositions=disabled // aip.dev/not-precedent: "to" is used to indicate interval. --) - google.protobuf.Duration start_to_fire_timeout = 2 [(gogoproto.stdduration) = true]; + google.protobuf.Duration start_to_fire_timeout = 2; } message CompleteWorkflowExecutionCommandAttributes { @@ -178,11 +176,11 @@ message ContinueAsNewWorkflowExecutionCommandAttributes { temporal.api.common.v1.Payloads input = 3; // Timeout of a single workflow run. - google.protobuf.Duration workflow_run_timeout = 4 [(gogoproto.stdduration) = true]; + google.protobuf.Duration workflow_run_timeout = 4; // Timeout of a single workflow task. - google.protobuf.Duration workflow_task_timeout = 5 [(gogoproto.stdduration) = true]; + google.protobuf.Duration workflow_task_timeout = 5; // How long the workflow start will be delayed - not really a "backoff" in the traditional sense. - google.protobuf.Duration backoff_start_interval = 6 [(gogoproto.stdduration) = true]; + google.protobuf.Duration backoff_start_interval = 6; temporal.api.common.v1.RetryPolicy retry_policy = 7; // Should be removed temporal.api.enums.v1.ContinueAsNewInitiator initiator = 8; @@ -209,11 +207,11 @@ message StartChildWorkflowExecutionCommandAttributes { temporal.api.taskqueue.v1.TaskQueue task_queue = 4; temporal.api.common.v1.Payloads input = 5; // Total workflow execution timeout including retries and continue as new. - google.protobuf.Duration workflow_execution_timeout = 6 [(gogoproto.stdduration) = true]; + google.protobuf.Duration workflow_execution_timeout = 6; // Timeout of a single workflow run. - google.protobuf.Duration workflow_run_timeout = 7 [(gogoproto.stdduration) = true]; + google.protobuf.Duration workflow_run_timeout = 7; // Timeout of a single workflow task. - google.protobuf.Duration workflow_task_timeout = 8 [(gogoproto.stdduration) = true]; + google.protobuf.Duration workflow_task_timeout = 8; // Default: PARENT_CLOSE_POLICY_TERMINATE. temporal.api.enums.v1.ParentClosePolicy parent_close_policy = 9; string control = 10; diff --git a/sdk-core-protos/protos/api_upstream/temporal/api/common/v1/message.proto b/sdk-core-protos/protos/api_upstream/temporal/api/common/v1/message.proto index a66e4d6ab..60395287d 100644 --- a/sdk-core-protos/protos/api_upstream/temporal/api/common/v1/message.proto +++ b/sdk-core-protos/protos/api_upstream/temporal/api/common/v1/message.proto @@ -33,8 +33,6 @@ option csharp_namespace = "Temporalio.Api.Common.V1"; import "google/protobuf/duration.proto"; -import "dependencies/gogoproto/gogo.proto"; - import "temporal/api/enums/v1/common.proto"; message DataBlob { @@ -95,14 +93,14 @@ message ActivityType { // How retries ought to be handled, usable by both workflows and activities message RetryPolicy { // Interval of the first retry. If retryBackoffCoefficient is 1.0 then it is used for all retries. - google.protobuf.Duration initial_interval = 1 [(gogoproto.stdduration) = true]; + google.protobuf.Duration initial_interval = 1; // Coefficient used to calculate the next retry interval. // The next retry interval is previous interval multiplied by the coefficient. // Must be 1 or larger. double backoff_coefficient = 2; // Maximum interval between retries. Exponential backoff leads to interval increase. // This value is the cap of the increase. Default is 100x of the initial interval. - google.protobuf.Duration maximum_interval = 3 [(gogoproto.stdduration) = true]; + google.protobuf.Duration maximum_interval = 3; // Maximum number of attempts. When exceeded the retries stop even if not expired yet. // 1 disables retries. 0 means unlimited (up to the timeouts) int32 maximum_attempts = 4; diff --git a/sdk-core-protos/protos/api_upstream/temporal/api/enums/v1/failed_cause.proto b/sdk-core-protos/protos/api_upstream/temporal/api/enums/v1/failed_cause.proto index a2fbe5f6d..a13684c70 100644 --- a/sdk-core-protos/protos/api_upstream/temporal/api/enums/v1/failed_cause.proto +++ b/sdk-core-protos/protos/api_upstream/temporal/api/enums/v1/failed_cause.proto @@ -124,4 +124,6 @@ enum ResourceExhaustedCause { RESOURCE_EXHAUSTED_CAUSE_PERSISTENCE_LIMIT = 4; // Workflow is busy RESOURCE_EXHAUSTED_CAUSE_BUSY_WORKFLOW = 5; + // Caller exceeds action per second limit. + RESOURCE_EXHAUSTED_CAUSE_APS_LIMIT = 6; } diff --git a/sdk-core-protos/protos/api_upstream/temporal/api/enums/v1/reset.proto b/sdk-core-protos/protos/api_upstream/temporal/api/enums/v1/reset.proto index 02263e2a7..3cb9b3058 100644 --- a/sdk-core-protos/protos/api_upstream/temporal/api/enums/v1/reset.proto +++ b/sdk-core-protos/protos/api_upstream/temporal/api/enums/v1/reset.proto @@ -31,7 +31,7 @@ option java_outer_classname = "ResetProto"; option ruby_package = "Temporalio::Api::Enums::V1"; option csharp_namespace = "Temporalio.Api.Enums.V1"; -// Reset reapplay(replay) options +// Reset reapply (replay) options // * RESET_REAPPLY_TYPE_SIGNAL (default) - Signals are reapplied when workflow is reset // * RESET_REAPPLY_TYPE_NONE - nothing is reapplied enum ResetReapplyType { diff --git a/sdk-core-protos/protos/api_upstream/build/tools.go b/sdk-core-protos/protos/api_upstream/temporal/api/export/v1/message.proto similarity index 62% rename from sdk-core-protos/protos/api_upstream/build/tools.go rename to sdk-core-protos/protos/api_upstream/temporal/api/export/v1/message.proto index e7f3e28e8..7ffd23305 100644 --- a/sdk-core-protos/protos/api_upstream/build/tools.go +++ b/sdk-core-protos/protos/api_upstream/temporal/api/export/v1/message.proto @@ -2,8 +2,6 @@ // // Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. // -// Copyright (c) 2020 Uber Technologies, Inc. -// // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights @@ -22,8 +20,26 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -package build +syntax = "proto3"; + +package temporal.api.export.v1; + +option go_package = "go.temporal.io/api/export/v1;workflow"; +option java_package = "io.temporal.api.export.v1"; +option java_multiple_files = true; +option java_outer_classname = "MessageProto"; +option ruby_package = "Temporalio::Api::Export::V1"; +option csharp_namespace = "Temporalio.Api.Export.V1"; + +import "temporal/api/history/v1/message.proto"; + +message WorkflowExecution { + temporal.api.history.v1.History history = 1; +} + +// WorkflowExecutions is used by the Cloud Export feature to deserialize +// the exported file. It encapsulates a collection of workflow execution information. +message WorkflowExecutions { + repeated WorkflowExecution items = 1; +} -import ( - _ "github.com/temporalio/gogo-protobuf/gogoproto" // gogoproto is just a random package name for module. -) diff --git a/sdk-core-protos/protos/api_upstream/temporal/api/filter/v1/message.proto b/sdk-core-protos/protos/api_upstream/temporal/api/filter/v1/message.proto index de0af8397..94d0f080d 100644 --- a/sdk-core-protos/protos/api_upstream/temporal/api/filter/v1/message.proto +++ b/sdk-core-protos/protos/api_upstream/temporal/api/filter/v1/message.proto @@ -33,8 +33,6 @@ option csharp_namespace = "Temporalio.Api.Filter.V1"; import "google/protobuf/timestamp.proto"; -import "dependencies/gogoproto/gogo.proto"; - import "temporal/api/enums/v1/workflow.proto"; message WorkflowExecutionFilter { @@ -47,8 +45,8 @@ message WorkflowTypeFilter { } message StartTimeFilter { - google.protobuf.Timestamp earliest_time = 1 [(gogoproto.stdtime) = true]; - google.protobuf.Timestamp latest_time = 2 [(gogoproto.stdtime) = true]; + google.protobuf.Timestamp earliest_time = 1; + google.protobuf.Timestamp latest_time = 2; } message StatusFilter { diff --git a/sdk-core-protos/protos/api_upstream/temporal/api/history/v1/message.proto b/sdk-core-protos/protos/api_upstream/temporal/api/history/v1/message.proto index dc38ffefa..5fc67710a 100644 --- a/sdk-core-protos/protos/api_upstream/temporal/api/history/v1/message.proto +++ b/sdk-core-protos/protos/api_upstream/temporal/api/history/v1/message.proto @@ -34,8 +34,6 @@ option csharp_namespace = "Temporalio.Api.History.V1"; import "google/protobuf/duration.proto"; import "google/protobuf/timestamp.proto"; -import "dependencies/gogoproto/gogo.proto"; - import "temporal/api/enums/v1/event_type.proto"; import "temporal/api/enums/v1/failed_cause.proto"; import "temporal/api/enums/v1/workflow.proto"; @@ -62,11 +60,11 @@ message WorkflowExecutionStartedEventAttributes { // SDK will deserialize this and provide it as arguments to the workflow function temporal.api.common.v1.Payloads input = 6; // Total workflow execution timeout including retries and continue as new. - google.protobuf.Duration workflow_execution_timeout = 7 [(gogoproto.stdduration) = true]; + google.protobuf.Duration workflow_execution_timeout = 7; // Timeout of a single workflow run. - google.protobuf.Duration workflow_run_timeout = 8 [(gogoproto.stdduration) = true]; + google.protobuf.Duration workflow_run_timeout = 8; // Timeout of a single workflow task. - google.protobuf.Duration workflow_task_timeout = 9 [(gogoproto.stdduration) = true]; + google.protobuf.Duration workflow_task_timeout = 9; // Run id of the previous workflow which continued-as-new or retired or cron executed into this // workflow. string continued_execution_run_id = 10; @@ -86,12 +84,12 @@ message WorkflowExecutionStartedEventAttributes { int32 attempt = 18; // The absolute time at which the workflow will be timed out. // This is passed without change to the next run/retry of a workflow. - google.protobuf.Timestamp workflow_execution_expiration_time = 19 [(gogoproto.stdtime) = true]; + google.protobuf.Timestamp workflow_execution_expiration_time = 19; // If this workflow runs on a cron schedule, it will appear here string cron_schedule = 20; // For a cron workflow, this contains the amount of time between when this iteration of // the cron workflow was scheduled and when it should run next per its cron_schedule. - google.protobuf.Duration first_workflow_task_backoff = 21 [(gogoproto.stdduration) = true]; + google.protobuf.Duration first_workflow_task_backoff = 21; temporal.api.common.v1.Memo memo = 22; temporal.api.common.v1.SearchAttributes search_attributes = 23; temporal.api.workflow.v1.ResetPoints prev_auto_reset_points = 24; @@ -139,13 +137,13 @@ message WorkflowExecutionContinuedAsNewEventAttributes { temporal.api.taskqueue.v1.TaskQueue task_queue = 3; temporal.api.common.v1.Payloads input = 4; // Timeout of a single workflow run. - google.protobuf.Duration workflow_run_timeout = 5 [(gogoproto.stdduration) = true]; + google.protobuf.Duration workflow_run_timeout = 5; // Timeout of a single workflow task. - google.protobuf.Duration workflow_task_timeout = 6 [(gogoproto.stdduration) = true]; + google.protobuf.Duration workflow_task_timeout = 6; // The `WORKFLOW_TASK_COMPLETED` event which this command was reported with int64 workflow_task_completed_event_id = 7; // TODO: How and is this used? - google.protobuf.Duration backoff_start_interval = 8 [(gogoproto.stdduration) = true]; + google.protobuf.Duration backoff_start_interval = 8; temporal.api.enums.v1.ContinueAsNewInitiator initiator = 9; // TODO: David are these right? // Deprecated. If a workflow's retry policy would cause a new run to start when the current one @@ -171,7 +169,7 @@ message WorkflowTaskScheduledEventAttributes { // // (-- api-linter: core::0140::prepositions=disabled // aip.dev/not-precedent: "to" is used to indicate interval. --) - google.protobuf.Duration start_to_close_timeout = 2 [(gogoproto.stdduration) = true]; + google.protobuf.Duration start_to_close_timeout = 2; // Starting at 1, how many attempts there have been to complete this task int32 attempt = 3; } @@ -260,7 +258,7 @@ message ActivityTaskScheduledEventAttributes { // // (-- api-linter: core::0140::prepositions=disabled // aip.dev/not-precedent: "to" is used to indicate interval. --) - google.protobuf.Duration schedule_to_close_timeout = 7 [(gogoproto.stdduration) = true]; + google.protobuf.Duration schedule_to_close_timeout = 7; // Limits time an activity task can stay in a task queue before a worker picks it up. This // timeout is always non retryable, as all a retry would achieve is to put it back into the same // queue. Defaults to `schedule_to_close_timeout` or workflow execution timeout if not @@ -268,16 +266,16 @@ message ActivityTaskScheduledEventAttributes { // // (-- api-linter: core::0140::prepositions=disabled // aip.dev/not-precedent: "to" is used to indicate interval. --) - google.protobuf.Duration schedule_to_start_timeout = 8 [(gogoproto.stdduration) = true]; + google.protobuf.Duration schedule_to_start_timeout = 8; // Maximum time an activity is allowed to execute after being picked up by a worker. This // timeout is always retryable. Either this or `schedule_to_close_timeout` must be // specified. // // (-- api-linter: core::0140::prepositions=disabled // aip.dev/not-precedent: "to" is used to indicate interval. --) - google.protobuf.Duration start_to_close_timeout = 9 [(gogoproto.stdduration) = true]; + google.protobuf.Duration start_to_close_timeout = 9; // Maximum permitted time between successful worker heartbeats. - google.protobuf.Duration heartbeat_timeout = 10 [(gogoproto.stdduration) = true]; + google.protobuf.Duration heartbeat_timeout = 10; // The `WORKFLOW_TASK_COMPLETED` event which this command was reported with int64 workflow_task_completed_event_id = 11; // Activities are assigned a default retry policy controlled by the service's dynamic @@ -372,7 +370,7 @@ message TimerStartedEventAttributes { // // (-- api-linter: core::0140::prepositions=disabled // aip.dev/not-precedent: "to" is used to indicate interval. --) - google.protobuf.Duration start_to_fire_timeout = 2 [(gogoproto.stdduration) = true]; + google.protobuf.Duration start_to_fire_timeout = 2; // The `WORKFLOW_TASK_COMPLETED` event which this command was reported with int64 workflow_task_completed_event_id = 3; } @@ -561,11 +559,11 @@ message StartChildWorkflowExecutionInitiatedEventAttributes { temporal.api.taskqueue.v1.TaskQueue task_queue = 4; temporal.api.common.v1.Payloads input = 5; // Total workflow execution timeout including retries and continue as new. - google.protobuf.Duration workflow_execution_timeout = 6 [(gogoproto.stdduration) = true]; + google.protobuf.Duration workflow_execution_timeout = 6; // Timeout of a single workflow run. - google.protobuf.Duration workflow_run_timeout = 7 [(gogoproto.stdduration) = true]; + google.protobuf.Duration workflow_run_timeout = 7; // Timeout of a single workflow task. - google.protobuf.Duration workflow_task_timeout = 8 [(gogoproto.stdduration) = true]; + google.protobuf.Duration workflow_task_timeout = 8; // Default: PARENT_CLOSE_POLICY_TERMINATE. temporal.api.enums.v1.ParentClosePolicy parent_close_policy = 9; // Deprecated @@ -689,11 +687,11 @@ message WorkflowPropertiesModifiedExternallyEventAttributes { // the provided queue. string new_task_queue = 1; // If set, update the workflow task timeout to this value. - google.protobuf.Duration new_workflow_task_timeout = 2 [(gogoproto.stdduration) = true]; + google.protobuf.Duration new_workflow_task_timeout = 2; // If set, update the workflow run timeout to this value. May be set to 0 for no timeout. - google.protobuf.Duration new_workflow_run_timeout = 3 [(gogoproto.stdduration) = true]; + google.protobuf.Duration new_workflow_run_timeout = 3; // If set, update the workflow execution timeout to this value. May be set to 0 for no timeout. - google.protobuf.Duration new_workflow_execution_timeout = 4 [(gogoproto.stdduration) = true]; + google.protobuf.Duration new_workflow_execution_timeout = 4; // If set, update the workflow memo with the provided values. The values will be merged with // the existing memo. If the user wants to delete values, a default/empty Payload should be // used as the value for the key being deleted. @@ -755,7 +753,7 @@ message WorkflowExecutionUpdateRejectedEventAttributes { message HistoryEvent { // Monotonically increasing event number, starts at 1. int64 event_id = 1; - google.protobuf.Timestamp event_time = 2 [(gogoproto.stdtime) = true]; + google.protobuf.Timestamp event_time = 2; temporal.api.enums.v1.EventType event_type = 3; // TODO: What is this? Appears unused by SDKs int64 version = 4; diff --git a/sdk-core-protos/protos/api_upstream/temporal/api/namespace/v1/message.proto b/sdk-core-protos/protos/api_upstream/temporal/api/namespace/v1/message.proto index 009b48019..7b5a6c9ea 100644 --- a/sdk-core-protos/protos/api_upstream/temporal/api/namespace/v1/message.proto +++ b/sdk-core-protos/protos/api_upstream/temporal/api/namespace/v1/message.proto @@ -34,8 +34,6 @@ option csharp_namespace = "Temporalio.Api.Namespace.V1"; import "google/protobuf/duration.proto"; import "google/protobuf/timestamp.proto"; -import "dependencies/gogoproto/gogo.proto"; - import "temporal/api/enums/v1/namespace.proto"; @@ -54,7 +52,7 @@ message NamespaceInfo { } message NamespaceConfig { - google.protobuf.Duration workflow_execution_retention_ttl = 1 [(gogoproto.stdduration) = true]; + google.protobuf.Duration workflow_execution_retention_ttl = 1; BadBinaries bad_binaries = 2; // If unspecified (ARCHIVAL_STATE_UNSPECIFIED) then default server configuration is used. temporal.api.enums.v1.ArchivalState history_archival_state = 3; @@ -73,7 +71,7 @@ message BadBinaries { message BadBinaryInfo { string reason = 1; string operator = 2; - google.protobuf.Timestamp create_time = 3 [(gogoproto.stdtime) = true]; + google.protobuf.Timestamp create_time = 3; } message UpdateNamespaceInfo { diff --git a/sdk-core-protos/protos/api_upstream/temporal/api/operatorservice/v1/request_response.proto b/sdk-core-protos/protos/api_upstream/temporal/api/operatorservice/v1/request_response.proto index de9e5e259..abe70f740 100644 --- a/sdk-core-protos/protos/api_upstream/temporal/api/operatorservice/v1/request_response.proto +++ b/sdk-core-protos/protos/api_upstream/temporal/api/operatorservice/v1/request_response.proto @@ -71,7 +71,9 @@ message ListSearchAttributesResponse { // (-- api-linter: core::0135::request-name-required=disabled // aip.dev/not-precedent: DeleteNamespace RPC doesn't follow Google API format. --) message DeleteNamespaceRequest { + // Only one of namespace or namespace_id must be specified to identify namespace. string namespace = 1; + string namespace_id = 2; } message DeleteNamespaceResponse { diff --git a/sdk-core-protos/protos/api_upstream/temporal/api/operatorservice/v1/service.proto b/sdk-core-protos/protos/api_upstream/temporal/api/operatorservice/v1/service.proto index 19630c7e8..0dfd3c412 100644 --- a/sdk-core-protos/protos/api_upstream/temporal/api/operatorservice/v1/service.proto +++ b/sdk-core-protos/protos/api_upstream/temporal/api/operatorservice/v1/service.proto @@ -33,6 +33,7 @@ option csharp_namespace = "Temporalio.Api.OperatorService.V1"; import "temporal/api/operatorservice/v1/request_response.proto"; +import "google/api/annotations.proto"; // OperatorService API defines how Temporal SDKs and other clients interact with the Temporal server // to perform administrative functions like registering a search attribute or a namespace. @@ -56,6 +57,9 @@ service OperatorService { // ListSearchAttributes returns comprehensive information about search attributes. rpc ListSearchAttributes (ListSearchAttributesRequest) returns (ListSearchAttributesResponse) { + option (google.api.http) = { + get: "/api/v1/namespaces/{namespace}/search-attributes", + }; } // DeleteNamespace synchronously deletes a namespace and asynchronously reclaims all namespace resources. diff --git a/sdk-core-protos/protos/api_upstream/temporal/api/replication/v1/message.proto b/sdk-core-protos/protos/api_upstream/temporal/api/replication/v1/message.proto index 3da7fbcd6..0ac68af69 100644 --- a/sdk-core-protos/protos/api_upstream/temporal/api/replication/v1/message.proto +++ b/sdk-core-protos/protos/api_upstream/temporal/api/replication/v1/message.proto @@ -33,8 +33,6 @@ option csharp_namespace = "Temporalio.Api.Replication.V1"; import "google/protobuf/timestamp.proto"; -import "dependencies/gogoproto/gogo.proto"; - import "temporal/api/enums/v1/namespace.proto"; message ClusterReplicationConfig { @@ -50,6 +48,6 @@ message NamespaceReplicationConfig { // Represents a historical replication status of a Namespace message FailoverStatus { // Timestamp when the Cluster switched to the following failover_version - google.protobuf.Timestamp failover_time = 1 [(gogoproto.stdtime) = true]; + google.protobuf.Timestamp failover_time = 1; int64 failover_version = 2; } diff --git a/sdk-core-protos/protos/api_upstream/temporal/api/schedule/v1/message.proto b/sdk-core-protos/protos/api_upstream/temporal/api/schedule/v1/message.proto index 737f97d98..8867d23af 100644 --- a/sdk-core-protos/protos/api_upstream/temporal/api/schedule/v1/message.proto +++ b/sdk-core-protos/protos/api_upstream/temporal/api/schedule/v1/message.proto @@ -39,8 +39,6 @@ option csharp_namespace = "Temporalio.Api.Schedule.V1"; import "google/protobuf/duration.proto"; import "google/protobuf/timestamp.proto"; -import "dependencies/gogoproto/gogo.proto"; - import "temporal/api/common/v1/message.proto"; import "temporal/api/enums/v1/schedule.proto"; import "temporal/api/workflow/v1/message.proto"; @@ -139,8 +137,8 @@ message StructuredCalendarSpec { // 2022-02-17T00:00:00Z (among other times). The same interval with a phase of 3 // days, 5 hours, and 23 minutes would match 2022-02-20T05:23:00Z instead. message IntervalSpec { - google.protobuf.Duration interval = 1 [(gogoproto.stdduration) = true]; - google.protobuf.Duration phase = 2 [(gogoproto.stdduration) = true]; + google.protobuf.Duration interval = 1; + google.protobuf.Duration phase = 2; } // ScheduleSpec is a complete description of a set of absolute timestamps @@ -158,6 +156,9 @@ message IntervalSpec { // On input, calendar and cron_string fields will be compiled into // structured_calendar (and maybe interval and timezone_name), so if you // Describe a schedule, you'll see only structured_calendar, interval, etc. +// +// If a spec has no matching times after the current time, then the schedule +// will be subject to automatic deletion (after several days). message ScheduleSpec { // Calendar-based specifications of times. repeated StructuredCalendarSpec structured_calendar = 7; @@ -191,12 +192,12 @@ message ScheduleSpec { repeated StructuredCalendarSpec exclude_structured_calendar = 9; // If start_time is set, any timestamps before start_time will be skipped. // (Together, start_time and end_time make an inclusive interval.) - google.protobuf.Timestamp start_time = 4 [(gogoproto.stdtime) = true]; + google.protobuf.Timestamp start_time = 4; // If end_time is set, any timestamps after end_time will be skipped. - google.protobuf.Timestamp end_time = 5 [(gogoproto.stdtime) = true]; + google.protobuf.Timestamp end_time = 5; // All timestamps will be incremented by a random value from 0 to this // amount of jitter. Default: 0 - google.protobuf.Duration jitter = 6 [(gogoproto.stdduration) = true]; + google.protobuf.Duration jitter = 6; // Time zone to interpret all calendar-based specs in. // @@ -235,8 +236,8 @@ message SchedulePolicies { // If the Temporal server misses an action due to one or more components // being down, and comes back up, the action will be run if the scheduled // time is within this window from the current time. - // This value defaults to 60 seconds, and can't be less than 10 seconds. - google.protobuf.Duration catchup_window = 2 [(gogoproto.stdduration) = true]; + // This value defaults to one year, and can't be less than 10 seconds. + google.protobuf.Duration catchup_window = 2; // If true, and a workflow run fails or times out, turn on "paused". // This applies after retry policies: the full chain of retries must fail to @@ -257,10 +258,10 @@ message ScheduleAction { message ScheduleActionResult { // Time that the action was taken (according to the schedule, including jitter). - google.protobuf.Timestamp schedule_time = 1 [(gogoproto.stdtime) = true]; + google.protobuf.Timestamp schedule_time = 1; // Time that the action was taken (real time). - google.protobuf.Timestamp actual_time = 2 [(gogoproto.stdtime) = true]; + google.protobuf.Timestamp actual_time = 2; // If action was start_workflow: temporal.api.common.v1.WorkflowExecution start_workflow_result = 11; @@ -280,20 +281,27 @@ message ScheduleState { // is zero. Actions may still be taken by explicit request (i.e. trigger // immediately or backfill). Skipped actions (due to overlap policy) do not // count against remaining actions. + // If a schedule has no more remaining actions, then the schedule will be + // subject to automatic deletion (after several days). bool limited_actions = 3; int64 remaining_actions = 4; } message TriggerImmediatelyRequest { - // Override overlap policy for this one request. + // If set, override overlap policy for this one request. temporal.api.enums.v1.ScheduleOverlapPolicy overlap_policy = 1; } message BackfillRequest { - // Time range to evaluate schedule in. - google.protobuf.Timestamp start_time = 1 [(gogoproto.stdtime) = true]; - google.protobuf.Timestamp end_time = 2 [(gogoproto.stdtime) = true]; - // Override overlap policy for this request. + // Time range to evaluate schedule in. Currently, this time range is + // exclusive on start_time and inclusive on end_time. (This is admittedly + // counterintuitive and it may change in the future, so to be safe, use a + // start time strictly before a scheduled time.) Also note that an action + // nominally scheduled in the interval but with jitter that pushes it after + // end_time will not be included. + google.protobuf.Timestamp start_time = 1; + google.protobuf.Timestamp end_time = 2; + // If set, override overlap policy for this request. temporal.api.enums.v1.ScheduleOverlapPolicy overlap_policy = 3; } @@ -322,6 +330,14 @@ message ScheduleInfo { // Number of skipped actions due to overlap. int64 overlap_skipped = 3; + // Number of dropped actions due to buffer limit. + int64 buffer_dropped = 10; + + // Number of actions in the buffer. The buffer holds the actions that cannot + // be immediately triggered (due to the overlap policy). These actions can be a result of + // the normal schedule or a backfill. + int64 buffer_size = 11; + // Currently-running workflows started by this schedule. (There might be // more than one if the overlap policy allows overlaps.) // Note that the run_ids in here are the original execution run ids as @@ -333,11 +349,11 @@ message ScheduleInfo { repeated ScheduleActionResult recent_actions = 4; // Next ten scheduled action times. - repeated google.protobuf.Timestamp future_action_times = 5 [(gogoproto.stdtime) = true]; + repeated google.protobuf.Timestamp future_action_times = 5; // Timestamps of schedule creation and last update. - google.protobuf.Timestamp create_time = 6 [(gogoproto.stdtime) = true]; - google.protobuf.Timestamp update_time = 7 [(gogoproto.stdtime) = true]; + google.protobuf.Timestamp create_time = 6; + google.protobuf.Timestamp update_time = 7; string invalid_schedule_error = 8 [deprecated = true]; } @@ -367,7 +383,7 @@ message ScheduleListInfo { // From info (maybe fewer entries): repeated ScheduleActionResult recent_actions = 5; - repeated google.protobuf.Timestamp future_action_times = 6 [(gogoproto.stdtime) = true]; + repeated google.protobuf.Timestamp future_action_times = 6; } // ScheduleListEntry is returned by ListSchedules. diff --git a/sdk-core-protos/protos/api_upstream/temporal/api/sdk/v1/task_complete_metadata.proto b/sdk-core-protos/protos/api_upstream/temporal/api/sdk/v1/task_complete_metadata.proto index eaad976af..8be7b8946 100644 --- a/sdk-core-protos/protos/api_upstream/temporal/api/sdk/v1/task_complete_metadata.proto +++ b/sdk-core-protos/protos/api_upstream/temporal/api/sdk/v1/task_complete_metadata.proto @@ -60,4 +60,17 @@ message WorkflowTaskCompletedMetadata { // (-- api-linter: core::0141::forbidden-types=disabled // aip.dev/not-precedent: These really shouldn't have negative values. --) repeated uint32 lang_used_flags = 2; + + // Name of the SDK that processed the task. This is usually something like "temporal-go" and is + // usually the same as client-name gRPC header. This should only be set if its value changed + // since the last time recorded on the workflow (or be set on the first task). + // + // (-- api-linter: core::0122::name-suffix=disabled + // aip.dev/not-precedent: We're ok with a name suffix here. --) + string sdk_name = 3; + + // Version of the SDK that processed the task. This is usually something like "1.20.0" and is + // usually the same as client-version gRPC header. This should only be set if its value changed + // since the last time recorded on the workflow (or be set on the first task). + string sdk_version = 4; } \ No newline at end of file diff --git a/sdk-core-protos/protos/api_upstream/temporal/api/taskqueue/v1/message.proto b/sdk-core-protos/protos/api_upstream/temporal/api/taskqueue/v1/message.proto index e8f027d12..7e6359ec1 100644 --- a/sdk-core-protos/protos/api_upstream/temporal/api/taskqueue/v1/message.proto +++ b/sdk-core-protos/protos/api_upstream/temporal/api/taskqueue/v1/message.proto @@ -35,8 +35,6 @@ import "google/protobuf/duration.proto"; import "google/protobuf/timestamp.proto"; import "google/protobuf/wrappers.proto"; -import "dependencies/gogoproto/gogo.proto"; - import "temporal/api/enums/v1/task_queue.proto"; import "temporal/api/common/v1/message.proto"; @@ -75,7 +73,7 @@ message TaskQueuePartitionMetadata { } message PollerInfo { - google.protobuf.Timestamp last_access_time = 1 [(gogoproto.stdtime) = true]; + google.protobuf.Timestamp last_access_time = 1; string identity = 2; double rate_per_second = 3; // If a worker has opted into the worker versioning feature while polling, its capabilities will @@ -87,7 +85,7 @@ message StickyExecutionAttributes { TaskQueue worker_task_queue = 1; // (-- api-linter: core::0140::prepositions=disabled // aip.dev/not-precedent: "to" is used to indicate interval. --) - google.protobuf.Duration schedule_to_start_timeout = 2 [(gogoproto.stdduration) = true]; + google.protobuf.Duration schedule_to_start_timeout = 2; } // Used by the worker versioning APIs, represents an unordered set of one or more versions which are diff --git a/sdk-core-protos/protos/api_upstream/temporal/api/update/v1/message.proto b/sdk-core-protos/protos/api_upstream/temporal/api/update/v1/message.proto index b6d82b5b0..db6f9cefa 100644 --- a/sdk-core-protos/protos/api_upstream/temporal/api/update/v1/message.proto +++ b/sdk-core-protos/protos/api_upstream/temporal/api/update/v1/message.proto @@ -44,7 +44,7 @@ message WaitPolicy { temporal.api.enums.v1.UpdateWorkflowExecutionLifecycleStage lifecycle_stage = 1; } -// The data needed by a client to refer to an previously invoked workflow +// The data needed by a client to refer to a previously invoked workflow // execution update process. message UpdateRef { temporal.api.common.v1.WorkflowExecution workflow_execution = 1; diff --git a/sdk-core-protos/protos/api_upstream/temporal/api/version/v1/message.proto b/sdk-core-protos/protos/api_upstream/temporal/api/version/v1/message.proto index 7c2d36097..cec0fe62d 100644 --- a/sdk-core-protos/protos/api_upstream/temporal/api/version/v1/message.proto +++ b/sdk-core-protos/protos/api_upstream/temporal/api/version/v1/message.proto @@ -32,13 +32,12 @@ option ruby_package = "Temporalio::Api::Version::V1"; option csharp_namespace = "Temporalio.Api.Version.V1"; import "google/protobuf/timestamp.proto"; -import "dependencies/gogoproto/gogo.proto"; import "temporal/api/enums/v1/common.proto"; // ReleaseInfo contains information about specific version of temporal. message ReleaseInfo { string version = 1; - google.protobuf.Timestamp release_time = 2 [(gogoproto.stdtime) = true]; + google.protobuf.Timestamp release_time = 2; string notes = 3; } @@ -54,6 +53,6 @@ message VersionInfo { ReleaseInfo recommended = 2; string instructions = 3; repeated Alert alerts = 4; - google.protobuf.Timestamp last_update_time = 5 [(gogoproto.stdtime) = true]; + google.protobuf.Timestamp last_update_time = 5; } diff --git a/sdk-core-protos/protos/api_upstream/temporal/api/workflow/v1/message.proto b/sdk-core-protos/protos/api_upstream/temporal/api/workflow/v1/message.proto index bb83f40bf..82a45dff2 100644 --- a/sdk-core-protos/protos/api_upstream/temporal/api/workflow/v1/message.proto +++ b/sdk-core-protos/protos/api_upstream/temporal/api/workflow/v1/message.proto @@ -34,8 +34,6 @@ option csharp_namespace = "Temporalio.Api.Workflow.V1"; import "google/protobuf/duration.proto"; import "google/protobuf/timestamp.proto"; -import "dependencies/gogoproto/gogo.proto"; - import "temporal/api/enums/v1/workflow.proto"; import "temporal/api/common/v1/message.proto"; import "temporal/api/failure/v1/message.proto"; @@ -44,13 +42,13 @@ import "temporal/api/taskqueue/v1/message.proto"; message WorkflowExecutionInfo { temporal.api.common.v1.WorkflowExecution execution = 1; temporal.api.common.v1.WorkflowType type = 2; - google.protobuf.Timestamp start_time = 3 [(gogoproto.stdtime) = true]; - google.protobuf.Timestamp close_time = 4 [(gogoproto.stdtime) = true]; + google.protobuf.Timestamp start_time = 3; + google.protobuf.Timestamp close_time = 4; temporal.api.enums.v1.WorkflowExecutionStatus status = 5; int64 history_length = 6; string parent_namespace_id = 7; temporal.api.common.v1.WorkflowExecution parent_execution = 8; - google.protobuf.Timestamp execution_time = 9 [(gogoproto.stdtime) = true]; + google.protobuf.Timestamp execution_time = 9; temporal.api.common.v1.Memo memo = 10; temporal.api.common.v1.SearchAttributes search_attributes = 11; ResetPoints auto_reset_points = 12; @@ -63,9 +61,9 @@ message WorkflowExecutionInfo { message WorkflowExecutionConfig { temporal.api.taskqueue.v1.TaskQueue task_queue = 1; - google.protobuf.Duration workflow_execution_timeout = 2 [(gogoproto.stdduration) = true]; - google.protobuf.Duration workflow_run_timeout = 3 [(gogoproto.stdduration) = true]; - google.protobuf.Duration default_workflow_task_timeout = 4 [(gogoproto.stdduration) = true]; + google.protobuf.Duration workflow_execution_timeout = 2; + google.protobuf.Duration workflow_run_timeout = 3; + google.protobuf.Duration default_workflow_task_timeout = 4; } message PendingActivityInfo { @@ -73,12 +71,12 @@ message PendingActivityInfo { temporal.api.common.v1.ActivityType activity_type = 2; temporal.api.enums.v1.PendingActivityState state = 3; temporal.api.common.v1.Payloads heartbeat_details = 4; - google.protobuf.Timestamp last_heartbeat_time = 5 [(gogoproto.stdtime) = true]; - google.protobuf.Timestamp last_started_time = 6 [(gogoproto.stdtime) = true]; + google.protobuf.Timestamp last_heartbeat_time = 5; + google.protobuf.Timestamp last_started_time = 6; int32 attempt = 7; int32 maximum_attempts = 8; - google.protobuf.Timestamp scheduled_time = 9 [(gogoproto.stdtime) = true]; - google.protobuf.Timestamp expiration_time = 10 [(gogoproto.stdtime) = true]; + google.protobuf.Timestamp scheduled_time = 9; + google.protobuf.Timestamp expiration_time = 10; temporal.api.failure.v1.Failure last_failure = 11; string last_worker_identity = 12; } @@ -94,13 +92,13 @@ message PendingChildExecutionInfo { message PendingWorkflowTaskInfo { temporal.api.enums.v1.PendingWorkflowTaskState state = 1; - google.protobuf.Timestamp scheduled_time = 2 [(gogoproto.stdtime) = true]; + google.protobuf.Timestamp scheduled_time = 2; // original_scheduled_time is the scheduled time of the first workflow task during workflow task heartbeat. // Heartbeat workflow task is done by RespondWorkflowTaskComplete with ForceCreateNewWorkflowTask == true and no command // In this case, OriginalScheduledTime won't change. Then when current time - original_scheduled_time exceeds // some threshold, the workflow task will be forced timeout. - google.protobuf.Timestamp original_scheduled_time = 3 [(gogoproto.stdtime) = true]; - google.protobuf.Timestamp started_time = 4 [(gogoproto.stdtime) = true]; + google.protobuf.Timestamp original_scheduled_time = 3; + google.protobuf.Timestamp started_time = 4; int32 attempt = 5; } @@ -116,11 +114,11 @@ message ResetPointInfo { string run_id = 2; // Event ID of the first WorkflowTaskCompleted event processed by this worker build. int64 first_workflow_task_completed_id = 3; - google.protobuf.Timestamp create_time = 4 [(gogoproto.stdtime) = true]; + google.protobuf.Timestamp create_time = 4; // (-- api-linter: core::0214::resource-expiry=disabled // aip.dev/not-precedent: TTL is not defined for ResetPointInfo. --) // The time that the run is deleted due to retention. - google.protobuf.Timestamp expire_time = 5 [(gogoproto.stdtime) = true]; + google.protobuf.Timestamp expire_time = 5; // false if the reset point has pending childWFs/reqCancels/signalExternals. bool resettable = 6; } @@ -134,11 +132,11 @@ message NewWorkflowExecutionInfo { // Serialized arguments to the workflow. temporal.api.common.v1.Payloads input = 4; // Total workflow execution timeout including retries and continue as new. - google.protobuf.Duration workflow_execution_timeout = 5 [(gogoproto.stdduration) = true]; + google.protobuf.Duration workflow_execution_timeout = 5; // Timeout of a single workflow run. - google.protobuf.Duration workflow_run_timeout = 6 [(gogoproto.stdduration) = true]; + google.protobuf.Duration workflow_run_timeout = 6; // Timeout of a single workflow task. - google.protobuf.Duration workflow_task_timeout = 7 [(gogoproto.stdduration) = true]; + google.protobuf.Duration workflow_task_timeout = 7; // Default: WORKFLOW_ID_REUSE_POLICY_ALLOW_DUPLICATE. temporal.api.enums.v1.WorkflowIdReusePolicy workflow_id_reuse_policy = 8; // The retry policy for the workflow. Will never exceed `workflow_execution_timeout`. diff --git a/sdk-core-protos/protos/api_upstream/temporal/api/workflowservice/v1/request_response.proto b/sdk-core-protos/protos/api_upstream/temporal/api/workflowservice/v1/request_response.proto index ffdf7829c..0785ae07a 100644 --- a/sdk-core-protos/protos/api_upstream/temporal/api/workflowservice/v1/request_response.proto +++ b/sdk-core-protos/protos/api_upstream/temporal/api/workflowservice/v1/request_response.proto @@ -39,6 +39,7 @@ import "temporal/api/enums/v1/common.proto"; import "temporal/api/enums/v1/query.proto"; import "temporal/api/enums/v1/reset.proto"; import "temporal/api/enums/v1/task_queue.proto"; +import "temporal/api/enums/v1/update.proto"; import "temporal/api/common/v1/message.proto"; import "temporal/api/history/v1/message.proto"; import "temporal/api/workflow/v1/message.proto"; @@ -59,13 +60,11 @@ import "temporal/api/sdk/v1/task_complete_metadata.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/timestamp.proto"; -import "dependencies/gogoproto/gogo.proto"; - message RegisterNamespaceRequest { string namespace = 1; string description = 2; string owner_email = 3; - google.protobuf.Duration workflow_execution_retention_period = 4 [(gogoproto.stdduration) = true]; + google.protobuf.Duration workflow_execution_retention_period = 4; repeated temporal.api.replication.v1.ClusterReplicationConfig clusters = 5; string active_cluster_name = 6; // A key-value map for any customized purpose. @@ -151,11 +150,11 @@ message StartWorkflowExecutionRequest { // Serialized arguments to the workflow. These are passed as arguments to the workflow function. temporal.api.common.v1.Payloads input = 5; // Total workflow execution timeout including retries and continue as new. - google.protobuf.Duration workflow_execution_timeout = 6 [(gogoproto.stdduration) = true]; + google.protobuf.Duration workflow_execution_timeout = 6; // Timeout of a single workflow run. - google.protobuf.Duration workflow_run_timeout = 7 [(gogoproto.stdduration) = true]; + google.protobuf.Duration workflow_run_timeout = 7; // Timeout of a single workflow task. - google.protobuf.Duration workflow_task_timeout = 8 [(gogoproto.stdduration) = true]; + google.protobuf.Duration workflow_task_timeout = 8; // The identity of the client who initiated this request string identity = 9; // A unique identifier for this start request. Typically UUIDv4. @@ -183,7 +182,7 @@ message StartWorkflowExecutionRequest { // Time to wait before dispatching the first workflow task. Cannot be used with `cron_schedule`. // If the workflow gets a signal before the delay, a workflow task will be dispatched and the rest // of the delay will be ignored. - google.protobuf.Duration workflow_start_delay = 20 [(gogoproto.stdduration) = true]; + google.protobuf.Duration workflow_start_delay = 20; } message StartWorkflowExecutionResponse { @@ -256,7 +255,9 @@ message PollWorkflowTaskQueueResponse { // Will be zero if no task has ever started. int64 previous_started_event_id = 4; // The id of the most recent workflow task started event, which will have been generated as a - // result of this poll request being served. + // result of this poll request being served. Will be zero if the task + // does not contain any events which would advance history (no new WFT started). + // Currently this can happen for queries. int64 started_event_id = 5; // Starting at 1, the number of attempts to complete this task by any worker. int32 attempt = 6; @@ -279,9 +280,9 @@ message PollWorkflowTaskQueueResponse { // for the queue, even if this response came from polling a sticky queue. temporal.api.taskqueue.v1.TaskQueue workflow_execution_task_queue = 11; // When this task was scheduled by the server - google.protobuf.Timestamp scheduled_time = 12 [(gogoproto.stdtime) = true]; + google.protobuf.Timestamp scheduled_time = 12; // When the current workflow task started event was generated, meaning the current attempt. - google.protobuf.Timestamp started_time = 13 [(gogoproto.stdtime) = true]; + google.protobuf.Timestamp started_time = 13; // Queries that should be executed after applying the history in this task. Responses should be // attached to `RespondWorkflowTaskCompletedRequest::query_results` map queries = 14; @@ -394,25 +395,25 @@ message PollActivityTaskQueueResponse { // was delivered. temporal.api.common.v1.Payloads heartbeat_details = 9; // When was this task first scheduled - google.protobuf.Timestamp scheduled_time = 10 [(gogoproto.stdtime) = true]; + google.protobuf.Timestamp scheduled_time = 10; // When was this task attempt scheduled - google.protobuf.Timestamp current_attempt_scheduled_time = 11 [(gogoproto.stdtime) = true]; + google.protobuf.Timestamp current_attempt_scheduled_time = 11; // When was this task started (this attempt) - google.protobuf.Timestamp started_time = 12 [(gogoproto.stdtime) = true]; + google.protobuf.Timestamp started_time = 12; // Starting at 1, the number of attempts to perform this activity int32 attempt = 13; // First scheduled -> final result reported timeout // // (-- api-linter: core::0140::prepositions=disabled // aip.dev/not-precedent: "to" is used to indicate interval. --) - google.protobuf.Duration schedule_to_close_timeout = 14 [(gogoproto.stdduration) = true]; + google.protobuf.Duration schedule_to_close_timeout = 14; // Current attempt start -> final result reported timeout // // (-- api-linter: core::0140::prepositions=disabled // aip.dev/not-precedent: "to" is used to indicate interval. --) - google.protobuf.Duration start_to_close_timeout = 15 [(gogoproto.stdduration) = true]; + google.protobuf.Duration start_to_close_timeout = 15; // Window within which the activity must report a heartbeat, or be timed out. - google.protobuf.Duration heartbeat_timeout = 16 [(gogoproto.stdduration) = true]; + google.protobuf.Duration heartbeat_timeout = 16; // This is the retry policy the service uses which may be different from the one provided // (or not) during activity scheduling. The service can override the provided one if some // values are not specified or exceed configured system limits. @@ -621,11 +622,11 @@ message SignalWithStartWorkflowExecutionRequest { // Serialized arguments to the workflow. These are passed as arguments to the workflow function. temporal.api.common.v1.Payloads input = 5; // Total workflow execution timeout including retries and continue as new - google.protobuf.Duration workflow_execution_timeout = 6 [(gogoproto.stdduration) = true]; + google.protobuf.Duration workflow_execution_timeout = 6; // Timeout of a single workflow run - google.protobuf.Duration workflow_run_timeout = 7 [(gogoproto.stdduration) = true]; + google.protobuf.Duration workflow_run_timeout = 7; // Timeout of a single workflow task - google.protobuf.Duration workflow_task_timeout = 8 [(gogoproto.stdduration) = true]; + google.protobuf.Duration workflow_task_timeout = 8; // The identity of the worker/client string identity = 9; // Used to de-dupe signal w/ start requests @@ -650,7 +651,7 @@ message SignalWithStartWorkflowExecutionRequest { // or not set, a workflow task will be dispatched immediately and the rest of the delay period // will be ignored, even if that request also had a delay. Signal via SignalWorkflowExecution // will not unblock the workflow. - google.protobuf.Duration workflow_start_delay = 20 [(gogoproto.stdduration) = true]; + google.protobuf.Duration workflow_start_delay = 20; // Indicates that a new workflow task should not be generated when this signal is received. bool skip_generate_workflow_task = 21; } @@ -668,7 +669,7 @@ message ResetWorkflowExecutionRequest { int64 workflow_task_finish_event_id = 4; // Used to de-dupe reset requests string request_id = 5; - // Reset reapplay(replay) options. + // Reset reapply (replay) options. temporal.api.enums.v1.ResetReapplyType reset_reapply_type = 6; } @@ -781,7 +782,21 @@ message CountWorkflowExecutionsRequest { } message CountWorkflowExecutionsResponse { + // If `query` is not grouping by any field, the count is an approximate number + // of workflows that matches the query. + // If `query` is grouping by a field, the count is simply the sum of the counts + // of the groups returned in the response. This number can be smaller than the + // total number of workflows matching the query. int64 count = 1; + + // `groups` contains the groups if the request is grouping by a field. + // The list might not be complete, and the counts of each group is approximate. + repeated AggregationGroup groups = 2; + + message AggregationGroup { + repeated temporal.api.common.v1.Payload group_values = 1; + int64 count = 2; + } } message GetSearchAttributesRequest { @@ -841,6 +856,7 @@ message DescribeWorkflowExecutionResponse { message DescribeTaskQueueRequest { string namespace = 1; temporal.api.taskqueue.v1.TaskQueue task_queue = 2; + // If unspecified (TASK_QUEUE_TYPE_UNSPECIFIED), then default value (TASK_QUEUE_TYPE_WORKFLOW) will be used. temporal.api.enums.v1.TaskQueueType task_queue_type = 3; bool include_task_queue_status = 4; } @@ -912,6 +928,10 @@ message GetSystemInfoResponse { // True if the server knows about the sdk metadata field on WFT completions and will record // it in history bool sdk_metadata = 9; + + // True if the server supports count group by execution status + // (-- api-linter: core::0140::prepositions=disabled --) + bool count_group_by_execution_status = 10; } } @@ -1028,12 +1048,12 @@ message ListScheduleMatchingTimesRequest { // The id of the schedule to query. string schedule_id = 2; // Time range to query. - google.protobuf.Timestamp start_time = 3 [(gogoproto.stdtime) = true]; - google.protobuf.Timestamp end_time = 4 [(gogoproto.stdtime) = true]; + google.protobuf.Timestamp start_time = 3; + google.protobuf.Timestamp end_time = 4; } message ListScheduleMatchingTimesResponse { - repeated google.protobuf.Timestamp start_time = 1 [(gogoproto.stdtime) = true]; + repeated google.protobuf.Timestamp start_time = 1; } // (-- api-linter: core::0135::request-name-required=disabled @@ -1126,9 +1146,8 @@ message UpdateWorkerBuildIdCompatibilityRequest { } } message UpdateWorkerBuildIdCompatibilityResponse { - // The id of the compatible set that the updated version was added to, or exists in. Users don't - // need to understand or care about this value, but it has value for debugging purposes. - string version_set_id = 1; + reserved 1; + reserved "version_set_id"; } // (-- api-linter: core::0134::request-resource-required=disabled @@ -1218,21 +1237,40 @@ message UpdateWorkflowExecutionResponse { // has completed. If this response is being returned before the update has // completed then this field will not be set. temporal.api.update.v1.Outcome outcome = 2; + + // The most advanced lifecycle stage that the Update is known to have + // reached, where lifecycle stages are ordered + // UPDATE_WORKFLOW_EXECUTION_LIFECYCLE_STAGE_UNSPECIFIED < + // UPDATE_WORKFLOW_EXECUTION_LIFECYCLE_STAGE_ADMITTED < + // UPDATE_WORKFLOW_EXECUTION_LIFECYCLE_STAGE_ACCEPTED < + // UPDATE_WORKFLOW_EXECUTION_LIFECYCLE_STAGE_COMPLETED. + // UNSPECIFIED will be returned if and only if the server's maximum wait + // time was reached before the Update reached the stage specified in the + // request WaitPolicy, and before the context deadline expired; clients may + // may then retry the call as needed. + temporal.api.enums.v1.UpdateWorkflowExecutionLifecycleStage stage = 3; } message StartBatchOperationRequest { // Namespace that contains the batch operation string namespace = 1; // Visibility query defines the the group of workflow to apply the batch operation - // This field and Executions are mutually exclusive + // This field and `executions` are mutually exclusive string visibility_query = 2; // Job ID defines the unique ID for the batch job string job_id = 3; // Reason to perform the batch operation string reason = 4; // Executions to apply the batch operation - // This field and VisibilityQuery are mutually exclusive + // This field and `visibility_query` are mutually exclusive repeated temporal.api.common.v1.WorkflowExecution executions = 5; + // Limit for the number of operations processed per second within this batch. + // Its purpose is to reduce the stress on the system caused by batch operations, which helps to prevent system + // overload and minimize potential delays in executing ongoing tasks for user workers. + // Note that when no explicit limit is provided, the server will operate according to its limit defined by the + // dynamic configuration key `worker.batcherRPS`. This also applies if the value in this field exceeds the + // server's configured limit. + float max_operations_per_second = 6; // Operation input oneof operation { temporal.api.batch.v1.BatchOperationTermination termination_operation = 10; @@ -1275,9 +1313,9 @@ message DescribeBatchOperationResponse { // Batch operation state temporal.api.enums.v1.BatchOperationState state = 3; // Batch operation start time - google.protobuf.Timestamp start_time = 4 [(gogoproto.stdtime) = true]; + google.protobuf.Timestamp start_time = 4; // Batch operation close time - google.protobuf.Timestamp close_time = 5 [(gogoproto.stdtime) = true]; + google.protobuf.Timestamp close_time = 5; // Total operation count int64 total_operation_count = 6; // Complete operation count @@ -1314,7 +1352,8 @@ message PollWorkflowExecutionUpdateRequest { temporal.api.update.v1.UpdateRef update_ref = 2; // The identity of the worker/client who is polling this update outcome string identity = 3; - // Describes when this poll request should return a response + // Describes when this poll request should return a response. + // Omit to request a non-blocking poll. temporal.api.update.v1.WaitPolicy wait_policy = 4; } @@ -1325,4 +1364,17 @@ message PollWorkflowExecutionUpdateResponse { // UPDATE_WORKFLOW_EXECUTION_LIFECYCLE_STAGE_ACCEPTED) then this field will // not be set. temporal.api.update.v1.Outcome outcome = 1; + // The most advanced lifecycle stage that the Update is known to have + // reached, where lifecycle stages are ordered + // UPDATE_WORKFLOW_EXECUTION_LIFECYCLE_STAGE_UNSPECIFIED < + // UPDATE_WORKFLOW_EXECUTION_LIFECYCLE_STAGE_ADMITTED < + // UPDATE_WORKFLOW_EXECUTION_LIFECYCLE_STAGE_ACCEPTED < + // UPDATE_WORKFLOW_EXECUTION_LIFECYCLE_STAGE_COMPLETED. + // UNSPECIFIED will be returned if and only if the server's maximum wait + // time was reached before the Update reached the stage specified in the + // request WaitPolicy, and before the context deadline expired; clients may + // may then retry the call as needed. + temporal.api.enums.v1.UpdateWorkflowExecutionLifecycleStage stage = 2; + // Sufficient information to address this update. + temporal.api.update.v1.UpdateRef update_ref = 3; } diff --git a/sdk-core-protos/protos/api_upstream/temporal/api/workflowservice/v1/service.proto b/sdk-core-protos/protos/api_upstream/temporal/api/workflowservice/v1/service.proto index b26b8f647..2bd2cf3e8 100644 --- a/sdk-core-protos/protos/api_upstream/temporal/api/workflowservice/v1/service.proto +++ b/sdk-core-protos/protos/api_upstream/temporal/api/workflowservice/v1/service.proto @@ -33,6 +33,7 @@ option csharp_namespace = "Temporalio.Api.WorkflowService.V1"; import "temporal/api/workflowservice/v1/request_response.proto"; +import "google/api/annotations.proto"; // WorkflowService API defines how Temporal SDKs and other clients interact with the Temporal server // to create and interact with workflows and activities. @@ -54,24 +55,33 @@ service WorkflowService { // isolation for all resources within the namespace. All resources belongs to exactly one // namespace. rpc RegisterNamespace (RegisterNamespaceRequest) returns (RegisterNamespaceResponse) { + option (google.api.http) = { + post: "/api/v1/namespaces" + body: "*" + }; } // DescribeNamespace returns the information and configuration for a registered namespace. rpc DescribeNamespace (DescribeNamespaceRequest) returns (DescribeNamespaceResponse) { + option (google.api.http) = { + get: "/api/v1/namespaces/{namespace}" + }; } // ListNamespaces returns the information and configuration for all namespaces. rpc ListNamespaces (ListNamespacesRequest) returns (ListNamespacesResponse) { + option (google.api.http) = { + get: "/api/v1/namespaces" + }; } // UpdateNamespace is used to update the information and configuration of a registered // namespace. - // - // (-- api-linter: core::0134::method-signature=disabled - // aip.dev/not-precedent: UpdateNamespace RPC doesn't follow Google API format. --) - // (-- api-linter: core::0134::response-message-name=disabled - // aip.dev/not-precedent: UpdateNamespace RPC doesn't follow Google API format. --) rpc UpdateNamespace (UpdateNamespaceRequest) returns (UpdateNamespaceResponse) { + option (google.api.http) = { + post: "/api/v1/namespaces/{namespace}/update" + body: "*" + }; } // DeprecateNamespace is used to update the state of a registered namespace to DEPRECATED. @@ -79,6 +89,9 @@ service WorkflowService { // Once the namespace is deprecated it cannot be used to start new workflow executions. Existing // workflow executions will continue to run on deprecated namespaces. // Deprecated. + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: Deprecated --) rpc DeprecateNamespace (DeprecateNamespaceRequest) returns (DeprecateNamespaceResponse) { } @@ -88,17 +101,27 @@ service WorkflowService { // also schedule the first workflow task. Returns `WorkflowExecutionAlreadyStarted`, if an // instance already exists with same workflow id. rpc StartWorkflowExecution (StartWorkflowExecutionRequest) returns (StartWorkflowExecutionResponse) { + option (google.api.http) = { + post: "/api/v1/namespaces/{namespace}/workflows/{workflow_id}" + body: "*" + }; } // GetWorkflowExecutionHistory returns the history of specified workflow execution. Fails with // `NotFound` if the specified workflow execution is unknown to the service. rpc GetWorkflowExecutionHistory (GetWorkflowExecutionHistoryRequest) returns (GetWorkflowExecutionHistoryResponse) { + option (google.api.http) = { + get: "/api/v1/namespaces/{namespace}/workflows/{execution.workflow_id}/history" + }; } // GetWorkflowExecutionHistoryReverse returns the history of specified workflow execution in reverse // order (starting from last event). Fails with`NotFound` if the specified workflow execution is // unknown to the service. rpc GetWorkflowExecutionHistoryReverse (GetWorkflowExecutionHistoryReverseRequest) returns (GetWorkflowExecutionHistoryReverseResponse) { + option (google.api.http) = { + get: "/api/v1/namespaces/{namespace}/workflows/{execution.workflow_id}/history-reverse" + }; } // PollWorkflowTaskQueue is called by workers to make progress on workflows. @@ -107,6 +130,9 @@ service WorkflowService { // tasks. The worker is expected to call `RespondWorkflowTaskCompleted` when it is done // processing the task. The service will create a `WorkflowTaskStarted` event in the history for // this task before handing it to the worker. + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: We do not expose worker API to HTTP. --) rpc PollWorkflowTaskQueue (PollWorkflowTaskQueueRequest) returns (PollWorkflowTaskQueueResponse) { } @@ -116,6 +142,9 @@ service WorkflowService { // Completing a WorkflowTask will write a `WORKFLOW_TASK_COMPLETED` event to the workflow's // history, along with events corresponding to whatever commands the SDK generated while // executing the task (ex timer started, activity task scheduled, etc). + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: We do not expose worker API to HTTP. --) rpc RespondWorkflowTaskCompleted (RespondWorkflowTaskCompletedRequest) returns (RespondWorkflowTaskCompletedResponse) { } @@ -128,6 +157,9 @@ service WorkflowService { // // Temporal will only append first WorkflowTaskFailed event to the history of workflow execution // for consecutive failures. + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: We do not expose worker API to HTTP. --) rpc RespondWorkflowTaskFailed (RespondWorkflowTaskFailedRequest) returns (RespondWorkflowTaskFailedResponse) { } @@ -143,6 +175,9 @@ service WorkflowService { // (`ACTIVITY_TASK_COMPLETED` / `ACTIVITY_TASK_FAILED` / `ACTIVITY_TASK_TIMED_OUT`) will both be // written permanently to Workflow execution history when Activity is finished. This is done to // avoid writing many events in the case of a failure/retry loop. + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: We do not expose worker API to HTTP. --) rpc PollActivityTaskQueue (PollActivityTaskQueueRequest) returns (PollActivityTaskQueueResponse) { } @@ -153,6 +188,10 @@ service WorkflowService { // the workflow history. Calling `RecordActivityTaskHeartbeat` will fail with `NotFound` in // such situations, in that event, the SDK should request cancellation of the activity. rpc RecordActivityTaskHeartbeat (RecordActivityTaskHeartbeatRequest) returns (RecordActivityTaskHeartbeatResponse) { + option (google.api.http) = { + post: "/api/v1/namespaces/{namespace}/activities/heartbeat" + body: "*" + }; } // See `RecordActivityTaskHeartbeat`. This version allows clients to record heartbeats by @@ -161,6 +200,10 @@ service WorkflowService { // (-- api-linter: core::0136::prepositions=disabled // aip.dev/not-precedent: "By" is used to indicate request type. --) rpc RecordActivityTaskHeartbeatById (RecordActivityTaskHeartbeatByIdRequest) returns (RecordActivityTaskHeartbeatByIdResponse) { + option (google.api.http) = { + post: "/api/v1/namespaces/{namespace}/activities/heartbeat-by-id" + body: "*" + }; } // RespondActivityTaskCompleted is called by workers when they successfully complete an activity @@ -170,6 +213,10 @@ service WorkflowService { // and a new workflow task created for the workflow. Fails with `NotFound` if the task token is // no longer valid due to activity timeout, already being completed, or never having existed. rpc RespondActivityTaskCompleted (RespondActivityTaskCompletedRequest) returns (RespondActivityTaskCompletedResponse) { + option (google.api.http) = { + post: "/api/v1/namespaces/{namespace}/activities/complete" + body: "*" + }; } // See `RecordActivityTaskCompleted`. This version allows clients to record completions by @@ -178,6 +225,10 @@ service WorkflowService { // (-- api-linter: core::0136::prepositions=disabled // aip.dev/not-precedent: "By" is used to indicate request type. --) rpc RespondActivityTaskCompletedById (RespondActivityTaskCompletedByIdRequest) returns (RespondActivityTaskCompletedByIdResponse) { + option (google.api.http) = { + post: "/api/v1/namespaces/{namespace}/activities/complete-by-id" + body: "*" + }; } // RespondActivityTaskFailed is called by workers when processing an activity task fails. @@ -186,6 +237,10 @@ service WorkflowService { // a new workflow task created for the workflow. Fails with `NotFound` if the task token is no // longer valid due to activity timeout, already being completed, or never having existed. rpc RespondActivityTaskFailed (RespondActivityTaskFailedRequest) returns (RespondActivityTaskFailedResponse) { + option (google.api.http) = { + post: "/api/v1/namespaces/{namespace}/activities/fail" + body: "*" + }; } // See `RecordActivityTaskFailed`. This version allows clients to record failures by @@ -194,6 +249,10 @@ service WorkflowService { // (-- api-linter: core::0136::prepositions=disabled // aip.dev/not-precedent: "By" is used to indicate request type. --) rpc RespondActivityTaskFailedById (RespondActivityTaskFailedByIdRequest) returns (RespondActivityTaskFailedByIdResponse) { + option (google.api.http) = { + post: "/api/v1/namespaces/{namespace}/activities/fail-by-id" + body: "*" + }; } // RespondActivityTaskFailed is called by workers when processing an activity task fails. @@ -202,6 +261,10 @@ service WorkflowService { // and a new workflow task created for the workflow. Fails with `NotFound` if the task token is // no longer valid due to activity timeout, already being completed, or never having existed. rpc RespondActivityTaskCanceled (RespondActivityTaskCanceledRequest) returns (RespondActivityTaskCanceledResponse) { + option (google.api.http) = { + post: "/api/v1/namespaces/{namespace}/activities/cancel" + body: "*" + }; } // See `RecordActivityTaskCanceled`. This version allows clients to record failures by @@ -210,6 +273,10 @@ service WorkflowService { // (-- api-linter: core::0136::prepositions=disabled // aip.dev/not-precedent: "By" is used to indicate request type. --) rpc RespondActivityTaskCanceledById (RespondActivityTaskCanceledByIdRequest) returns (RespondActivityTaskCanceledByIdResponse) { + option (google.api.http) = { + post: "/api/v1/namespaces/{namespace}/activities/cancel-by-id" + body: "*" + }; } // RequestCancelWorkflowExecution is called by workers when they want to request cancellation of @@ -219,6 +286,10 @@ service WorkflowService { // workflow history and a new workflow task created for the workflow. It returns success if the requested // workflow is already closed. It fails with 'NotFound' if the requested workflow doesn't exist. rpc RequestCancelWorkflowExecution (RequestCancelWorkflowExecutionRequest) returns (RequestCancelWorkflowExecutionResponse) { + option (google.api.http) = { + post: "/api/v1/namespaces/{namespace}/workflows/{workflow_execution.workflow_id}/cancel" + body: "*" + }; } // SignalWorkflowExecution is used to send a signal to a running workflow execution. @@ -226,6 +297,10 @@ service WorkflowService { // This results in a `WORKFLOW_EXECUTION_SIGNALED` event recorded in the history and a workflow // task being created for the execution. rpc SignalWorkflowExecution (SignalWorkflowExecutionRequest) returns (SignalWorkflowExecutionResponse) { + option (google.api.http) = { + post: "/api/v1/namespaces/{namespace}/workflows/{workflow_execution.workflow_id}/signal/{signal_name}" + body: "*" + }; } // SignalWithStartWorkflowExecution is used to ensure a signal is sent to a workflow, even if @@ -241,6 +316,10 @@ service WorkflowService { // (-- api-linter: core::0136::prepositions=disabled // aip.dev/not-precedent: "With" is used to indicate combined operation. --) rpc SignalWithStartWorkflowExecution (SignalWithStartWorkflowExecutionRequest) returns (SignalWithStartWorkflowExecutionResponse) { + option (google.api.http) = { + post: "/api/v1/namespaces/{namespace}/workflows/{workflow_id}/signal-with-start/{signal_name}" + body: "*" + }; } // ResetWorkflowExecution will reset an existing workflow execution to a specified @@ -248,60 +327,86 @@ service WorkflowService { // execution instance. // TODO: Does exclusive here mean *just* the completed event, or also WFT started? Otherwise the task is doomed to time out? rpc ResetWorkflowExecution (ResetWorkflowExecutionRequest) returns (ResetWorkflowExecutionResponse) { + option (google.api.http) = { + post: "/api/v1/namespaces/{namespace}/workflows/{workflow_execution.workflow_id}/reset" + body: "*" + }; } // TerminateWorkflowExecution terminates an existing workflow execution by recording a // `WORKFLOW_EXECUTION_TERMINATED` event in the history and immediately terminating the // execution instance. rpc TerminateWorkflowExecution (TerminateWorkflowExecutionRequest) returns (TerminateWorkflowExecutionResponse) { + option (google.api.http) = { + post: "/api/v1/namespaces/{namespace}/workflows/{workflow_execution.workflow_id}/terminate" + body: "*" + }; } // DeleteWorkflowExecution asynchronously deletes a specific Workflow Execution (when // WorkflowExecution.run_id is provided) or the latest Workflow Execution (when // WorkflowExecution.run_id is not provided). If the Workflow Execution is Running, it will be // terminated before deletion. - // (-- api-linter: core::0135::method-signature=disabled - // aip.dev/not-precedent: DeleteNamespace RPC doesn't follow Google API format. --) - // (-- api-linter: core::0135::response-message-name=disabled - // aip.dev/not-precedent: DeleteNamespace RPC doesn't follow Google API format. --) - rpc DeleteWorkflowExecution (DeleteWorkflowExecutionRequest) returns (DeleteWorkflowExecutionResponse) { - } + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: Workflow deletion not exposed to HTTP, users should use cancel or terminate. --) + rpc DeleteWorkflowExecution (DeleteWorkflowExecutionRequest) returns (DeleteWorkflowExecutionResponse) {} // ListOpenWorkflowExecutions is a visibility API to list the open executions in a specific namespace. - rpc ListOpenWorkflowExecutions (ListOpenWorkflowExecutionsRequest) returns (ListOpenWorkflowExecutionsResponse) { - } + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: HTTP users should use ListWorkflowExecutions instead. --) + rpc ListOpenWorkflowExecutions (ListOpenWorkflowExecutionsRequest) returns (ListOpenWorkflowExecutionsResponse) {} // ListClosedWorkflowExecutions is a visibility API to list the closed executions in a specific namespace. - rpc ListClosedWorkflowExecutions (ListClosedWorkflowExecutionsRequest) returns (ListClosedWorkflowExecutionsResponse) { - } + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: HTTP users should use ListWorkflowExecutions instead. --) + rpc ListClosedWorkflowExecutions (ListClosedWorkflowExecutionsRequest) returns (ListClosedWorkflowExecutionsResponse) {} // ListWorkflowExecutions is a visibility API to list workflow executions in a specific namespace. rpc ListWorkflowExecutions (ListWorkflowExecutionsRequest) returns (ListWorkflowExecutionsResponse) { + option (google.api.http) = { + get: "/api/v1/namespaces/{namespace}/workflows" + }; } // ListArchivedWorkflowExecutions is a visibility API to list archived workflow executions in a specific namespace. rpc ListArchivedWorkflowExecutions (ListArchivedWorkflowExecutionsRequest) returns (ListArchivedWorkflowExecutionsResponse) { + option (google.api.http) = { + get: "/api/v1/namespaces/{namespace}/archived-workflows" + }; } // ScanWorkflowExecutions is a visibility API to list large amount of workflow executions in a specific namespace without order. + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: HTTP users should use ListWorkflowExecutions instead. --) rpc ScanWorkflowExecutions (ScanWorkflowExecutionsRequest) returns (ScanWorkflowExecutionsResponse) { } // CountWorkflowExecutions is a visibility API to count of workflow executions in a specific namespace. rpc CountWorkflowExecutions (CountWorkflowExecutionsRequest) returns (CountWorkflowExecutionsResponse) { + option (google.api.http) = { + get: "/api/v1/namespaces/{namespace}/workflow-count" + }; } // GetSearchAttributes is a visibility API to get all legal keys that could be used in list APIs - rpc GetSearchAttributes (GetSearchAttributesRequest) returns (GetSearchAttributesResponse) { - } + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: We do not expose this search attribute API to HTTP (but may expose on OperatorService). --) + rpc GetSearchAttributes (GetSearchAttributesRequest) returns (GetSearchAttributesResponse) {} // RespondQueryTaskCompleted is called by workers to complete queries which were delivered on // the `query` (not `queries`) field of a `PollWorkflowTaskQueueResponse`. // // Completing the query will unblock the corresponding client call to `QueryWorkflow` and return // the query result a response. - rpc RespondQueryTaskCompleted (RespondQueryTaskCompletedRequest) returns (RespondQueryTaskCompletedResponse) { - } + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: We do not expose worker API to HTTP. --) + rpc RespondQueryTaskCompleted (RespondQueryTaskCompletedRequest) returns (RespondQueryTaskCompletedResponse) {} // ResetStickyTaskQueue resets the sticky task queue related information in the mutable state of // a given workflow. This is prudent for workers to perform if a workflow has been paged out of @@ -310,74 +415,103 @@ service WorkflowService { // Things cleared are: // 1. StickyTaskQueue // 2. StickyScheduleToStartTimeout + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: We do not expose worker API to HTTP. --) rpc ResetStickyTaskQueue (ResetStickyTaskQueueRequest) returns (ResetStickyTaskQueueResponse) { } // QueryWorkflow requests a query be executed for a specified workflow execution. rpc QueryWorkflow (QueryWorkflowRequest) returns (QueryWorkflowResponse) { + option (google.api.http) = { + post: "/api/v1/namespaces/{namespace}/workflows/{execution.workflow_id}/query/{query.query_type}" + body: "*" + }; } // DescribeWorkflowExecution returns information about the specified workflow execution. rpc DescribeWorkflowExecution (DescribeWorkflowExecutionRequest) returns (DescribeWorkflowExecutionResponse) { + option (google.api.http) = { + get: "/api/v1/namespaces/{namespace}/workflows/{execution.workflow_id}" + }; } // DescribeTaskQueue returns information about the target task queue. rpc DescribeTaskQueue (DescribeTaskQueueRequest) returns (DescribeTaskQueueResponse) { + option (google.api.http) = { + get: "/api/v1/namespaces/{namespace}/task-queues/{task_queue.name}" + }; } // GetClusterInfo returns information about temporal cluster - rpc GetClusterInfo(GetClusterInfoRequest) returns (GetClusterInfoResponse){ + rpc GetClusterInfo(GetClusterInfoRequest) returns (GetClusterInfoResponse) { + option (google.api.http) = { + get: "/api/v1/cluster-info" + }; } // GetSystemInfo returns information about the system. rpc GetSystemInfo(GetSystemInfoRequest) returns (GetSystemInfoResponse) { + option (google.api.http) = { + get: "/api/v1/system-info" + }; } + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: We do not expose this low-level API to HTTP. --) rpc ListTaskQueuePartitions(ListTaskQueuePartitionsRequest) returns (ListTaskQueuePartitionsResponse) { } // Creates a new schedule. - // (-- api-linter: core::0133::method-signature=disabled - // aip.dev/not-precedent: CreateSchedule doesn't follow Google API format --) - // (-- api-linter: core::0133::response-message-name=disabled - // aip.dev/not-precedent: CreateSchedule doesn't follow Google API format --) - // (-- api-linter: core::0133::http-uri-parent=disabled - // aip.dev/not-precedent: CreateSchedule doesn't follow Google API format --) rpc CreateSchedule (CreateScheduleRequest) returns (CreateScheduleResponse) { + option (google.api.http) = { + post: "/api/v1/namespaces/{namespace}/schedules/{schedule_id}" + body: "*" + }; } // Returns the schedule description and current state of an existing schedule. rpc DescribeSchedule (DescribeScheduleRequest) returns (DescribeScheduleResponse) { + option (google.api.http) = { + get: "/api/v1/namespaces/{namespace}/schedules/{schedule_id}" + }; } // Changes the configuration or state of an existing schedule. - // (-- api-linter: core::0134::response-message-name=disabled - // aip.dev/not-precedent: UpdateSchedule RPC doesn't follow Google API format. --) - // (-- api-linter: core::0134::method-signature=disabled - // aip.dev/not-precedent: UpdateSchedule RPC doesn't follow Google API format. --) rpc UpdateSchedule (UpdateScheduleRequest) returns (UpdateScheduleResponse) { + option (google.api.http) = { + post: "/api/v1/namespaces/{namespace}/schedules/{schedule_id}/update" + body: "*" + }; } // Makes a specific change to a schedule or triggers an immediate action. - // (-- api-linter: core::0134::synonyms=disabled - // aip.dev/not-precedent: we have both patch and update. --) rpc PatchSchedule (PatchScheduleRequest) returns (PatchScheduleResponse) { + option (google.api.http) = { + post: "/api/v1/namespaces/{namespace}/schedules/{schedule_id}/patch" + body: "*" + }; } // Lists matching times within a range. rpc ListScheduleMatchingTimes (ListScheduleMatchingTimesRequest) returns (ListScheduleMatchingTimesResponse) { + option (google.api.http) = { + get: "/api/v1/namespaces/{namespace}/schedules/{schedule_id}/matching-times" + }; } // Deletes a schedule, removing it from the system. - // (-- api-linter: core::0135::method-signature=disabled - // aip.dev/not-precedent: DeleteSchedule doesn't follow Google API format --) - // (-- api-linter: core::0135::response-message-name=disabled - // aip.dev/not-precedent: DeleteSchedule doesn't follow Google API format --) rpc DeleteSchedule (DeleteScheduleRequest) returns (DeleteScheduleResponse) { + option (google.api.http) = { + delete: "/api/v1/namespaces/{namespace}/schedules/{schedule_id}" + }; } // List all schedules in a namespace. rpc ListSchedules (ListSchedulesRequest) returns (ListSchedulesResponse) { + option (google.api.http) = { + get: "/api/v1/namespaces/{namespace}/schedules" + }; } // Allows users to specify sets of worker build id versions on a per task queue basis. Versions @@ -393,13 +527,16 @@ service WorkflowService { // NOTE: The number of task queues mapped to a single build id is limited by the `limit.taskQueuesPerBuildId` // (default is 20), if this limit is exceeded this API will error with a FailedPrecondition. // - // (-- api-linter: core::0134::response-message-name=disabled - // aip.dev/not-precedent: UpdateWorkerBuildIdCompatibility RPC doesn't follow Google API format. --) - // (-- api-linter: core::0134::method-signature=disabled - // aip.dev/not-precedent: UpdateWorkerBuildIdCompatibility RPC doesn't follow Google API format. --) + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: We do yet expose versioning API to HTTP. --) rpc UpdateWorkerBuildIdCompatibility (UpdateWorkerBuildIdCompatibilityRequest) returns (UpdateWorkerBuildIdCompatibilityResponse) {} + // Fetches the worker build id versioning sets for a task queue. - rpc GetWorkerBuildIdCompatibility (GetWorkerBuildIdCompatibilityRequest) returns (GetWorkerBuildIdCompatibilityResponse) {} + rpc GetWorkerBuildIdCompatibility (GetWorkerBuildIdCompatibilityRequest) returns (GetWorkerBuildIdCompatibilityResponse) { + option (google.api.http) = { + get: "/api/v1/namespaces/{namespace}/task-queues/{task_queue}/worker-build-id-compatibility" + }; + } // Fetches task reachability to determine whether a worker may be retired. // The request may specify task queues to query for or let the server fetch all task queues mapped to the given @@ -413,36 +550,57 @@ service WorkflowService { // // Open source users can adjust this limit by setting the server's dynamic config value for // `limit.reachabilityTaskQueueScan` with the caveat that this call can strain the visibility store. - rpc GetWorkerTaskReachability (GetWorkerTaskReachabilityRequest) returns (GetWorkerTaskReachabilityResponse) {} + rpc GetWorkerTaskReachability (GetWorkerTaskReachabilityRequest) returns (GetWorkerTaskReachabilityResponse) { + option (google.api.http) = { + get: "/api/v1/namespaces/{namespace}/worker-task-reachability" + }; + } // Invokes the specified update function on user workflow code. - // (-- api-linter: core::0134=disabled - // aip.dev/not-precedent: UpdateWorkflowExecution doesn't follow Google API format --) rpc UpdateWorkflowExecution(UpdateWorkflowExecutionRequest) returns (UpdateWorkflowExecutionResponse) { + option (google.api.http) = { + post: "/api/v1/namespaces/{namespace}/workflows/{workflow_execution.workflow_id}/update/{request.input.name}" + body: "*" + }; } // Polls a workflow execution for the outcome of a workflow execution update // previously issued through the UpdateWorkflowExecution RPC. The effective // timeout on this call will be shorter of the the caller-supplied gRPC // timeout and the server's configured long-poll timeout. - // (-- api-linter: core::0134=disabled - // aip.dev/not-precedent: UpdateWorkflowExecution doesn't follow Google API format --) - rpc PollWorkflowExecutionUpdate(PollWorkflowExecutionUpdateRequest) returns (PollWorkflowExecutionUpdateResponse){ + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: We don't expose update polling API to HTTP in favor of a potential future non-blocking form. --) + rpc PollWorkflowExecutionUpdate(PollWorkflowExecutionUpdateRequest) returns (PollWorkflowExecutionUpdateResponse) { } // StartBatchOperation starts a new batch operation rpc StartBatchOperation(StartBatchOperationRequest) returns (StartBatchOperationResponse) { + option (google.api.http) = { + post: "/api/v1/namespaces/{namespace}/batch-operations/{job_id}" + body: "*" + }; } // StopBatchOperation stops a batch operation rpc StopBatchOperation(StopBatchOperationRequest) returns (StopBatchOperationResponse) { + option (google.api.http) = { + post: "/api/v1/namespaces/{namespace}/batch-operations/{job_id}/stop" + body: "*" + }; } // DescribeBatchOperation returns the information about a batch operation rpc DescribeBatchOperation(DescribeBatchOperationRequest) returns (DescribeBatchOperationResponse) { + option (google.api.http) = { + get: "/api/v1/namespaces/{namespace}/batch-operations/{job_id}" + }; } // ListBatchOperations returns a list of batch operations rpc ListBatchOperations(ListBatchOperationsRequest) returns (ListBatchOperationsResponse) { + option (google.api.http) = { + get: "/api/v1/namespaces/{namespace}/batch-operations" + }; } } diff --git a/tests/integ_tests/workflow_tests/cancel_external.rs b/tests/integ_tests/workflow_tests/cancel_external.rs index a28f403de..c70909f9e 100644 --- a/tests/integ_tests/workflow_tests/cancel_external.rs +++ b/tests/integ_tests/workflow_tests/cancel_external.rs @@ -25,7 +25,7 @@ async fn cancel_sender(ctx: WfContext) -> WorkflowResult<()> { Ok(().into()) } -async fn cancel_receiver(mut ctx: WfContext) -> WorkflowResult<()> { +async fn cancel_receiver(ctx: WfContext) -> WorkflowResult<()> { ctx.cancelled().await; Ok(().into()) } diff --git a/tests/integ_tests/workflow_tests/cancel_wf.rs b/tests/integ_tests/workflow_tests/cancel_wf.rs index a29eb2bf8..b3f874979 100644 --- a/tests/integ_tests/workflow_tests/cancel_wf.rs +++ b/tests/integ_tests/workflow_tests/cancel_wf.rs @@ -4,7 +4,7 @@ use temporal_sdk::{WfContext, WfExitValue, WorkflowResult}; use temporal_sdk_core_protos::temporal::api::enums::v1::WorkflowExecutionStatus; use temporal_sdk_core_test_utils::CoreWfStarter; -async fn cancelled_wf(mut ctx: WfContext) -> WorkflowResult<()> { +async fn cancelled_wf(ctx: WfContext) -> WorkflowResult<()> { let cancelled = tokio::select! { _ = ctx.timer(Duration::from_secs(500)) => false, _ = ctx.cancelled() => true diff --git a/tests/integ_tests/workflow_tests/child_workflows.rs b/tests/integ_tests/workflow_tests/child_workflows.rs index 3e50b1352..6c716170a 100644 --- a/tests/integ_tests/workflow_tests/child_workflows.rs +++ b/tests/integ_tests/workflow_tests/child_workflows.rs @@ -64,7 +64,7 @@ async fn abandoned_child_bug_repro() { worker.register_wf( PARENT_WF_TYPE.to_string(), - move |mut ctx: WfContext| async move { + move |ctx: WfContext| async move { let child = ctx.child_workflow(ChildWorkflowOptions { workflow_id: "abandoned-child".to_owned(), workflow_type: CHILD_WF_TYPE.to_owned(), @@ -89,7 +89,7 @@ async fn abandoned_child_bug_repro() { Ok(().into()) }, ); - worker.register_wf(CHILD_WF_TYPE.to_string(), |mut ctx: WfContext| async move { + worker.register_wf(CHILD_WF_TYPE.to_string(), |ctx: WfContext| async move { ctx.cancelled().await; Ok(WfExitValue::<()>::Cancelled) }); @@ -135,7 +135,7 @@ async fn abandoned_child_resolves_post_cancel() { worker.register_wf( PARENT_WF_TYPE.to_string(), - move |mut ctx: WfContext| async move { + move |ctx: WfContext| async move { let child = ctx.child_workflow(ChildWorkflowOptions { workflow_id: "abandoned-child-resolve-post-cancel".to_owned(), workflow_type: CHILD_WF_TYPE.to_owned(),