From cd70817d73ea78a7442313325f5003920b01d6c4 Mon Sep 17 00:00:00 2001 From: morvencao Date: Tue, 26 Mar 2024 06:58:07 +0000 Subject: [PATCH] support config work driver. Signed-off-by: morvencao --- cmd/example/busybox/main.go | 2 +- cmd/example/helloworld/main.go | 14 +- cmd/example/helloworld_helm/main.go | 14 +- cmd/example/helloworld_hosted/main.go | 14 +- go.mod | 43 +- go.sum | 127 +-- pkg/addonmanager/manager.go | 35 +- test/integration/suite_test.go | 2 +- .../sdk-go/protocol/mqtt_paho/v2/LICENSE | 201 ++++ .../sdk-go/protocol/mqtt_paho/v2/message.go | 119 +++ .../sdk-go/protocol/mqtt_paho/v2/option.go | 48 + .../sdk-go/protocol/mqtt_paho/v2/protocol.go | 155 +++ .../protocol/mqtt_paho/v2/write_message.go | 133 +++ .../github.com/cloudevents/sdk-go/v2/LICENSE | 201 ++++ .../github.com/cloudevents/sdk-go/v2/alias.go | 187 ++++ .../sdk-go/v2/binding/binary_writer.go | 52 + .../cloudevents/sdk-go/v2/binding/doc.go | 68 ++ .../cloudevents/sdk-go/v2/binding/encoding.go | 50 + .../sdk-go/v2/binding/event_message.go | 108 ++ .../sdk-go/v2/binding/finish_message.go | 42 + .../sdk-go/v2/binding/format/doc.go | 12 + .../sdk-go/v2/binding/format/format.go | 105 ++ .../cloudevents/sdk-go/v2/binding/message.go | 153 +++ .../sdk-go/v2/binding/spec/attributes.go | 141 +++ .../cloudevents/sdk-go/v2/binding/spec/doc.go | 13 + .../v2/binding/spec/match_exact_version.go | 81 ++ .../sdk-go/v2/binding/spec/spec.go | 189 ++++ .../sdk-go/v2/binding/structured_writer.go | 22 + .../cloudevents/sdk-go/v2/binding/to_event.go | 153 +++ .../sdk-go/v2/binding/transformer.go | 42 + .../cloudevents/sdk-go/v2/binding/write.go | 179 ++++ .../cloudevents/sdk-go/v2/client/client.go | 288 ++++++ .../sdk-go/v2/client/client_http.go | 35 + .../sdk-go/v2/client/client_observed.go | 12 + .../sdk-go/v2/client/defaulters.go | 57 ++ .../cloudevents/sdk-go/v2/client/doc.go | 11 + .../sdk-go/v2/client/http_receiver.go | 45 + .../cloudevents/sdk-go/v2/client/invoker.go | 137 +++ .../sdk-go/v2/client/observability.go | 54 + .../cloudevents/sdk-go/v2/client/options.go | 128 +++ .../cloudevents/sdk-go/v2/client/receiver.go | 194 ++++ .../cloudevents/sdk-go/v2/context/context.go | 110 +++ .../sdk-go/v2/context/delegating.go | 25 + .../cloudevents/sdk-go/v2/context/doc.go | 10 + .../cloudevents/sdk-go/v2/context/logger.go | 48 + .../cloudevents/sdk-go/v2/context/retry.go | 76 ++ .../sdk-go/v2/event/content_type.go | 47 + .../sdk-go/v2/event/data_content_encoding.go | 16 + .../sdk-go/v2/event/datacodec/codec.go | 78 ++ .../sdk-go/v2/event/datacodec/doc.go | 10 + .../sdk-go/v2/event/datacodec/json/data.go | 56 ++ .../sdk-go/v2/event/datacodec/json/doc.go | 9 + .../sdk-go/v2/event/datacodec/text/data.go | 30 + .../sdk-go/v2/event/datacodec/text/doc.go | 9 + .../sdk-go/v2/event/datacodec/xml/data.go | 40 + .../sdk-go/v2/event/datacodec/xml/doc.go | 9 + .../cloudevents/sdk-go/v2/event/doc.go | 9 + .../cloudevents/sdk-go/v2/event/event.go | 126 +++ .../cloudevents/sdk-go/v2/event/event_data.go | 118 +++ .../sdk-go/v2/event/event_interface.go | 102 ++ .../sdk-go/v2/event/event_marshal.go | 203 ++++ .../sdk-go/v2/event/event_reader.go | 103 ++ .../sdk-go/v2/event/event_unmarshal.go | 480 +++++++++ .../sdk-go/v2/event/event_validation.go | 50 + .../sdk-go/v2/event/event_writer.go | 117 +++ .../sdk-go/v2/event/eventcontext.go | 125 +++ .../sdk-go/v2/event/eventcontext_v03.go | 329 +++++++ .../v2/event/eventcontext_v03_reader.go | 99 ++ .../v2/event/eventcontext_v03_writer.go | 103 ++ .../sdk-go/v2/event/eventcontext_v1.go | 315 ++++++ .../sdk-go/v2/event/eventcontext_v1_reader.go | 104 ++ .../sdk-go/v2/event/eventcontext_v1_writer.go | 97 ++ .../cloudevents/sdk-go/v2/event/extensions.go | 57 ++ .../cloudevents/sdk-go/v2/protocol/doc.go | 26 + .../cloudevents/sdk-go/v2/protocol/error.go | 42 + .../v2/protocol/http/abuse_protection.go | 128 +++ .../sdk-go/v2/protocol/http/context.go | 48 + .../sdk-go/v2/protocol/http/doc.go | 9 + .../sdk-go/v2/protocol/http/headers.go | 55 ++ .../sdk-go/v2/protocol/http/message.go | 175 ++++ .../sdk-go/v2/protocol/http/options.go | 301 ++++++ .../sdk-go/v2/protocol/http/protocol.go | 408 ++++++++ .../v2/protocol/http/protocol_lifecycle.go | 143 +++ .../sdk-go/v2/protocol/http/protocol_rate.go | 34 + .../sdk-go/v2/protocol/http/protocol_retry.go | 145 +++ .../sdk-go/v2/protocol/http/result.go | 60 ++ .../sdk-go/v2/protocol/http/retries_result.go | 59 ++ .../sdk-go/v2/protocol/http/utility.go | 89 ++ .../sdk-go/v2/protocol/http/write_request.go | 141 +++ .../v2/protocol/http/write_responsewriter.go | 126 +++ .../cloudevents/sdk-go/v2/protocol/inbound.go | 54 + .../sdk-go/v2/protocol/lifecycle.go | 23 + .../sdk-go/v2/protocol/outbound.go | 49 + .../cloudevents/sdk-go/v2/protocol/result.go | 127 +++ .../cloudevents/sdk-go/v2/staticcheck.conf | 3 + .../cloudevents/sdk-go/v2/types/allocate.go | 41 + .../cloudevents/sdk-go/v2/types/doc.go | 46 + .../cloudevents/sdk-go/v2/types/timestamp.go | 75 ++ .../cloudevents/sdk-go/v2/types/uri.go | 86 ++ .../cloudevents/sdk-go/v2/types/uriref.go | 82 ++ .../cloudevents/sdk-go/v2/types/value.go | 335 +++++++ vendor/github.com/eclipse/paho.golang/LICENSE | 277 ++++++ .../eclipse/paho.golang/packets/auth.go | 77 ++ .../eclipse/paho.golang/packets/connack.go | 145 +++ .../eclipse/paho.golang/packets/connect.go | 189 ++++ .../eclipse/paho.golang/packets/disconnect.go | 152 +++ .../eclipse/paho.golang/packets/packets.go | 447 +++++++++ .../eclipse/paho.golang/packets/pingreq.go | 34 + .../eclipse/paho.golang/packets/pingresp.go | 34 + .../eclipse/paho.golang/packets/properties.go | 804 +++++++++++++++ .../eclipse/paho.golang/packets/puback.go | 115 +++ .../eclipse/paho.golang/packets/pubcomp.go | 95 ++ .../eclipse/paho.golang/packets/publish.go | 80 ++ .../eclipse/paho.golang/packets/pubrec.go | 117 +++ .../eclipse/paho.golang/packets/pubrel.go | 77 ++ .../eclipse/paho.golang/packets/suback.go | 103 ++ .../eclipse/paho.golang/packets/subscribe.go | 116 +++ .../eclipse/paho.golang/packets/unsuback.go | 88 ++ .../paho.golang/packets/unsubscribe.go | 67 ++ .../eclipse/paho.golang/paho/acks_tracker.go | 79 ++ .../eclipse/paho.golang/paho/auth.go | 8 + .../eclipse/paho.golang/paho/client.go | 923 ++++++++++++++++++ .../eclipse/paho.golang/paho/cp_auth.go | 92 ++ .../eclipse/paho.golang/paho/cp_connack.go | 84 ++ .../eclipse/paho.golang/paho/cp_connect.go | 180 ++++ .../eclipse/paho.golang/paho/cp_disconnect.go | 58 ++ .../eclipse/paho.golang/paho/cp_publish.go | 123 +++ .../eclipse/paho.golang/paho/cp_pubresp.go | 55 ++ .../eclipse/paho.golang/paho/cp_suback.go | 41 + .../eclipse/paho.golang/paho/cp_subscribe.go | 67 ++ .../eclipse/paho.golang/paho/cp_unsuback.go | 41 + .../paho.golang/paho/cp_unsubscribe.go | 31 + .../eclipse/paho.golang/paho/cp_utils.go | 100 ++ .../eclipse/paho.golang/paho/message_ids.go | 93 ++ .../paho.golang/paho/noop_persistence.go | 23 + .../eclipse/paho.golang/paho/persistence.go | 98 ++ .../eclipse/paho.golang/paho/pinger.go | 122 +++ .../eclipse/paho.golang/paho/router.go | 212 ++++ .../eclipse/paho.golang/paho/trace.go | 22 + vendor/github.com/go-logr/logr/slogr/slogr.go | 61 -- .../golang/protobuf/jsonpb/decode.go | 531 ---------- .../golang/protobuf/jsonpb/encode.go | 560 ----------- .../github.com/golang/protobuf/jsonpb/json.go | 69 -- .../golang/protobuf/ptypes/empty/empty.pb.go | 62 ++ vendor/github.com/google/uuid/.travis.yml | 9 - vendor/github.com/google/uuid/CHANGELOG.md | 41 + vendor/github.com/google/uuid/CONTRIBUTING.md | 16 + vendor/github.com/google/uuid/README.md | 10 +- vendor/github.com/google/uuid/hash.go | 6 + vendor/github.com/google/uuid/node_js.go | 2 +- vendor/github.com/google/uuid/time.go | 21 +- vendor/github.com/google/uuid/uuid.go | 89 +- vendor/github.com/google/uuid/version6.go | 56 ++ vendor/github.com/google/uuid/version7.go | 104 ++ vendor/github.com/onsi/gomega/CHANGELOG.md | 25 + vendor/github.com/onsi/gomega/gomega_dsl.go | 2 +- .../onsi/gomega/internal/async_assertion.go | 7 +- vendor/github.com/onsi/gomega/matchers.go | 2 +- .../matchers/be_comparable_to_matcher.go | 4 +- .../build-machinery-go/.ci-operator.yaml | 2 +- .../Dockerfile.commitchecker | 13 + .../x/crypto/internal/poly1305/bits_compat.go | 39 - .../x/crypto/internal/poly1305/bits_go1.13.go | 21 - .../x/crypto/internal/poly1305/sum_generic.go | 43 +- vendor/golang.org/x/net/html/token.go | 12 +- vendor/golang.org/x/net/http2/frame.go | 11 +- vendor/golang.org/x/oauth2/deviceauth.go | 198 ++++ vendor/golang.org/x/oauth2/oauth2.go | 29 +- vendor/golang.org/x/oauth2/pkce.go | 68 ++ .../golang.org/x/sync/semaphore/semaphore.go | 136 +++ vendor/golang.org/x/sys/unix/mkerrors.sh | 2 +- vendor/golang.org/x/sys/unix/zerrors_linux.go | 36 +- .../x/sys/unix/zerrors_linux_386.go | 3 + .../x/sys/unix/zerrors_linux_amd64.go | 3 + .../x/sys/unix/zerrors_linux_arm.go | 3 + .../x/sys/unix/zerrors_linux_arm64.go | 3 + .../x/sys/unix/zerrors_linux_loong64.go | 3 + .../x/sys/unix/zerrors_linux_mips.go | 3 + .../x/sys/unix/zerrors_linux_mips64.go | 3 + .../x/sys/unix/zerrors_linux_mips64le.go | 3 + .../x/sys/unix/zerrors_linux_mipsle.go | 3 + .../x/sys/unix/zerrors_linux_ppc.go | 3 + .../x/sys/unix/zerrors_linux_ppc64.go | 3 + .../x/sys/unix/zerrors_linux_ppc64le.go | 3 + .../x/sys/unix/zerrors_linux_riscv64.go | 3 + .../x/sys/unix/zerrors_linux_s390x.go | 3 + .../x/sys/unix/zerrors_linux_sparc64.go | 3 + .../x/sys/unix/zsysnum_linux_386.go | 4 + .../x/sys/unix/zsysnum_linux_amd64.go | 3 + .../x/sys/unix/zsysnum_linux_arm.go | 4 + .../x/sys/unix/zsysnum_linux_arm64.go | 4 + .../x/sys/unix/zsysnum_linux_loong64.go | 4 + .../x/sys/unix/zsysnum_linux_mips.go | 4 + .../x/sys/unix/zsysnum_linux_mips64.go | 4 + .../x/sys/unix/zsysnum_linux_mips64le.go | 4 + .../x/sys/unix/zsysnum_linux_mipsle.go | 4 + .../x/sys/unix/zsysnum_linux_ppc.go | 4 + .../x/sys/unix/zsysnum_linux_ppc64.go | 4 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 4 + .../x/sys/unix/zsysnum_linux_riscv64.go | 4 + .../x/sys/unix/zsysnum_linux_s390x.go | 4 + .../x/sys/unix/zsysnum_linux_sparc64.go | 4 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 125 +-- .../golang.org/x/sys/windows/env_windows.go | 17 +- .../x/sys/windows/syscall_windows.go | 3 +- .../appengine/internal/api.go | 347 +++---- .../appengine/internal/api_classic.go | 29 +- .../appengine/internal/api_common.go | 50 +- .../appengine/internal/identity.go | 7 +- .../appengine/internal/identity_classic.go | 23 +- .../appengine/internal/identity_flex.go | 1 + .../appengine/internal/identity_vm.go | 20 +- .../appengine/internal/main.go | 1 + .../appengine/internal/main_vm.go | 3 +- .../appengine/internal/transaction.go | 10 +- .../appengine/urlfetch/urlfetch.go | 9 +- .../googleapis/api/annotations/client.pb.go | 139 +-- .../api/annotations/field_behavior.pb.go | 22 +- .../api/annotations/field_info.pb.go | 295 ++++++ vendor/google.golang.org/grpc/README.md | 2 +- .../grpc/attributes/attributes.go | 4 +- .../grpc/balancer/balancer.go | 15 + ...r_conn_wrappers.go => balancer_wrapper.go} | 304 +++--- .../grpc_binarylog_v1/binarylog.pb.go | 6 +- vendor/google.golang.org/grpc/clientconn.go | 522 ++++------ vendor/google.golang.org/grpc/codes/codes.go | 8 +- .../google.golang.org/grpc/credentials/tls.go | 75 +- vendor/google.golang.org/grpc/dialoptions.go | 51 +- .../grpc/encoding/encoding.go | 13 +- .../grpc/encoding/proto/proto.go | 24 +- .../grpc/health/grpc_health_v1/health.pb.go | 4 +- .../health/grpc_health_v1/health_grpc.pb.go | 24 +- .../grpc/internal/backoff/backoff.go | 36 + .../grpc/internal/binarylog/method_logger.go | 9 +- .../grpc/internal/binarylog/sink.go | 2 +- .../grpc/internal/buffer/unbounded.go | 41 +- .../grpc/internal/channelz/funcs.go | 7 + .../grpc/internal/envconfig/envconfig.go | 3 - .../grpc/internal/envconfig/xds.go | 39 - .../grpc/internal/experimental.go | 28 + .../grpc/internal/grpcrand/grpcrand.go | 5 + .../grpc/internal/grpcrand/grpcrand_go1.21.go | 73 ++ .../internal/grpcsync/callback_serializer.go | 51 +- .../grpc/internal/idle/idle.go | 175 ++-- .../grpc/internal/internal.go | 33 +- .../grpc/internal/pretty/pretty.go | 7 +- .../internal/resolver/dns/dns_resolver.go | 69 +- .../resolver/dns/internal/internal.go | 70 ++ .../grpc/internal/resolver/unix/unix.go | 4 + .../grpc/internal/status/status.go | 43 +- .../grpc/internal/tcp_keepalive_others.go | 29 + .../grpc/internal/tcp_keepalive_unix.go | 54 + .../grpc/internal/tcp_keepalive_windows.go | 54 + .../grpc/internal/transport/controlbuf.go | 5 +- .../grpc/internal/transport/handler_server.go | 78 +- .../grpc/internal/transport/http2_client.go | 43 +- .../grpc/internal/transport/http2_server.go | 149 ++- .../grpc/internal/transport/http_util.go | 18 +- .../grpc/internal/transport/proxy.go | 14 +- .../grpc/internal/transport/transport.go | 31 +- .../grpc/metadata/metadata.go | 31 +- vendor/google.golang.org/grpc/peer/peer.go | 2 + .../google.golang.org/grpc/picker_wrapper.go | 21 +- vendor/google.golang.org/grpc/pickfirst.go | 14 - .../grpc/resolver/dns/dns_resolver.go | 36 + .../grpc/resolver/manual/manual.go | 40 +- vendor/google.golang.org/grpc/resolver/map.go | 113 +++ .../grpc/resolver/resolver.go | 23 +- .../grpc/resolver_conn_wrapper.go | 247 ----- .../grpc/resolver_wrapper.go | 198 ++++ vendor/google.golang.org/grpc/rpc_util.go | 77 +- vendor/google.golang.org/grpc/server.go | 415 ++++---- vendor/google.golang.org/grpc/stream.go | 9 +- vendor/google.golang.org/grpc/tap/tap.go | 6 + vendor/google.golang.org/grpc/trace.go | 26 +- .../google.golang.org/grpc/trace_notrace.go | 52 + .../google.golang.org/grpc/trace_withtrace.go | 39 + vendor/google.golang.org/grpc/version.go | 2 +- vendor/google.golang.org/grpc/vet.sh | 169 ++-- .../protobuf/protoadapt/convert.go | 31 + vendor/k8s.io/api/core/v1/generated.proto | 2 +- vendor/k8s.io/api/core/v1/types.go | 2 +- .../core/v1/types_swagger_doc_generated.go | 2 +- vendor/k8s.io/klog/v2/OWNERS | 4 +- vendor/k8s.io/klog/v2/contextual_slog.go | 31 + vendor/k8s.io/klog/v2/klog.go | 23 +- vendor/k8s.io/klog/v2/klogr_slog.go | 10 +- vendor/k8s.io/klog/v2/safeptr.go | 34 + vendor/modules.txt | 91 +- .../pkg/cloudevents/generic/agentclient.go | 310 ++++++ .../pkg/cloudevents/generic/baseclient.go | 223 +++++ .../pkg/cloudevents/generic/interface.go | 74 ++ .../generic/options/grpc/agentoptions.go | 57 ++ .../generic/options/grpc/options.go | 155 +++ .../options/grpc/protobuf/v1/README.md | 30 + .../options/grpc/protobuf/v1/cloudevent.pb.go | 649 ++++++++++++ .../options/grpc/protobuf/v1/cloudevent.proto | 81 ++ .../grpc/protobuf/v1/cloudevent_grpc.pb.go | 179 ++++ .../generic/options/grpc/protobuf/v1/gen.go | 3 + .../generic/options/grpc/protocol/message.go | 205 ++++ .../generic/options/grpc/protocol/option.go | 24 + .../generic/options/grpc/protocol/protocol.go | 149 +++ .../options/grpc/protocol/write_message.go | 215 ++++ .../generic/options/grpc/sourceoptions.go | 52 + .../generic/options/mqtt/agentoptions.go | 117 +++ .../generic/options/mqtt/options.go | 326 +++++++ .../generic/options/mqtt/sourceoptions.go | 113 +++ .../cloudevents/generic/options/options.go | 66 ++ .../cloudevents/generic/payload/payload.go | 48 + .../pkg/cloudevents/generic/ratelimiter.go | 34 + .../pkg/cloudevents/generic/sourceclient.go | 314 ++++++ .../pkg/cloudevents/generic/types/types.go | 283 ++++++ .../work/agent/client/manifestwork.go | 160 +++ .../work/agent/handler/resourcehandler.go | 76 ++ .../pkg/cloudevents/work/clientbuilder.go | 270 +++++ .../pkg/cloudevents/work/common/common.go | 29 + .../pkg/cloudevents/work/configloader.go | 81 ++ .../cloudevents/work/internal/clientset.go | 54 + .../sdk-go/pkg/cloudevents/work/lister.go | 31 + .../work/source/client/manifestwork.go | 225 +++++ .../work/source/handler/resourcehandler.go | 167 ++++ .../sdk-go/pkg/cloudevents/work/statushash.go | 18 + .../pkg/cloudevents/work/utils/utils.go | 55 ++ .../pkg/cloudevents/work/watcher/watcher.go | 64 ++ 324 files changed, 24055 insertions(+), 3501 deletions(-) create mode 100644 vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/LICENSE create mode 100644 vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/message.go create mode 100644 vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/option.go create mode 100644 vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/protocol.go create mode 100644 vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/write_message.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/LICENSE create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/alias.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/binary_writer.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/encoding.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/event_message.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/finish_message.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/format/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/format/format.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/message.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/spec/attributes.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/spec/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/spec/match_exact_version.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/spec/spec.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/structured_writer.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/to_event.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/transformer.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/write.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/client/client.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/client/client_http.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/client/client_observed.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/client/defaulters.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/client/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/client/http_receiver.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/client/invoker.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/client/observability.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/client/options.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/client/receiver.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/context/context.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/context/delegating.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/context/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/context/logger.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/context/retry.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/content_type.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/data_content_encoding.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/codec.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/data.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/data.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/data.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/event.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/event_data.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/event_interface.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/event_marshal.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/event_reader.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/event_unmarshal.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/event_validation.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/event_writer.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_reader.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_writer.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_reader.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_writer.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/extensions.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/error.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/abuse_protection.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/context.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/headers.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/message.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/options.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_lifecycle.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_rate.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_retry.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/result.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/retries_result.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/utility.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_request.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_responsewriter.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/inbound.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/lifecycle.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/outbound.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/result.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/staticcheck.conf create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/types/allocate.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/types/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/types/timestamp.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/types/uri.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/types/uriref.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/types/value.go create mode 100644 vendor/github.com/eclipse/paho.golang/LICENSE create mode 100644 vendor/github.com/eclipse/paho.golang/packets/auth.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/connack.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/connect.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/disconnect.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/packets.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/pingreq.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/pingresp.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/properties.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/puback.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/pubcomp.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/publish.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/pubrec.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/pubrel.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/suback.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/subscribe.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/unsuback.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/unsubscribe.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/acks_tracker.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/auth.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/client.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/cp_auth.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/cp_connack.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/cp_connect.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/cp_disconnect.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/cp_publish.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/cp_pubresp.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/cp_suback.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/cp_subscribe.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/cp_unsuback.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/cp_unsubscribe.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/cp_utils.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/message_ids.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/noop_persistence.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/persistence.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/pinger.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/router.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/trace.go delete mode 100644 vendor/github.com/go-logr/logr/slogr/slogr.go delete mode 100644 vendor/github.com/golang/protobuf/jsonpb/decode.go delete mode 100644 vendor/github.com/golang/protobuf/jsonpb/encode.go delete mode 100644 vendor/github.com/golang/protobuf/jsonpb/json.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go delete mode 100644 vendor/github.com/google/uuid/.travis.yml create mode 100644 vendor/github.com/google/uuid/CHANGELOG.md create mode 100644 vendor/github.com/google/uuid/version6.go create mode 100644 vendor/github.com/google/uuid/version7.go create mode 100644 vendor/github.com/openshift/build-machinery-go/Dockerfile.commitchecker delete mode 100644 vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go delete mode 100644 vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go create mode 100644 vendor/golang.org/x/oauth2/deviceauth.go create mode 100644 vendor/golang.org/x/oauth2/pkce.go create mode 100644 vendor/golang.org/x/sync/semaphore/semaphore.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go rename vendor/google.golang.org/grpc/{balancer_conn_wrappers.go => balancer_wrapper.go} (57%) create mode 100644 vendor/google.golang.org/grpc/internal/experimental.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcrand/grpcrand_go1.21.go create mode 100644 vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go create mode 100644 vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go create mode 100644 vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go create mode 100644 vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go create mode 100644 vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go delete mode 100644 vendor/google.golang.org/grpc/resolver_conn_wrapper.go create mode 100644 vendor/google.golang.org/grpc/resolver_wrapper.go create mode 100644 vendor/google.golang.org/grpc/trace_notrace.go create mode 100644 vendor/google.golang.org/grpc/trace_withtrace.go create mode 100644 vendor/google.golang.org/protobuf/protoadapt/convert.go create mode 100644 vendor/k8s.io/klog/v2/contextual_slog.go create mode 100644 vendor/k8s.io/klog/v2/safeptr.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/agentclient.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/baseclient.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/interface.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/agentoptions.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/options.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/README.md create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/cloudevent.pb.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/cloudevent.proto create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/cloudevent_grpc.pb.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/gen.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/message.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/option.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/protocol.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/write_message.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/sourceoptions.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt/agentoptions.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt/options.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt/sourceoptions.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/options.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/payload/payload.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/ratelimiter.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/sourceclient.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types/types.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/client/manifestwork.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/handler/resourcehandler.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/clientbuilder.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/common/common.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/configloader.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/internal/clientset.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/lister.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/client/manifestwork.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/handler/resourcehandler.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/statushash.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/utils/utils.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/watcher/watcher.go diff --git a/cmd/example/busybox/main.go b/cmd/example/busybox/main.go index 8ff7a44d6..0e35f1cd0 100644 --- a/cmd/example/busybox/main.go +++ b/cmd/example/busybox/main.go @@ -24,7 +24,7 @@ func main() { if err != nil { os.Exit(1) } - addonMgr, err := addonmanager.New(kubeConfig) + addonMgr, err := addonmanager.New(kubeConfig, addonmanager.NewManagerOptions()) if err != nil { klog.Errorf("unable to setup addon manager: %v", err) os.Exit(1) diff --git a/cmd/example/helloworld/main.go b/cmd/example/helloworld/main.go index b933922a5..3de46ecf4 100644 --- a/cmd/example/helloworld/main.go +++ b/cmd/example/helloworld/main.go @@ -66,22 +66,30 @@ func newCommand() *cobra.Command { } func newControllerCommand() *cobra.Command { + o := addonmanager.NewManagerOptions() + c := &addManagerConfig{managerOptions: o} cmd := cmdfactory. - NewControllerCommandConfig("helloworld-addon-controller", version.Get(), runController). + NewControllerCommandConfig("helloworld-addon-controller", version.Get(), c.runController). NewCommand() cmd.Use = "controller" cmd.Short = "Start the addon controller" + o.AddFlags(cmd) return cmd } -func runController(ctx context.Context, kubeConfig *rest.Config) error { +// addManagerConfig holds configuration for addon manager +type addManagerConfig struct { + managerOptions *addonmanager.ManagerOptions +} + +func (c *addManagerConfig) runController(ctx context.Context, kubeConfig *rest.Config) error { addonClient, err := addonv1alpha1client.NewForConfig(kubeConfig) if err != nil { return err } - mgr, err := addonmanager.New(kubeConfig) + mgr, err := addonmanager.New(kubeConfig, c.managerOptions) if err != nil { return err } diff --git a/cmd/example/helloworld_helm/main.go b/cmd/example/helloworld_helm/main.go index 099523226..4609c9513 100644 --- a/cmd/example/helloworld_helm/main.go +++ b/cmd/example/helloworld_helm/main.go @@ -69,16 +69,24 @@ func newCommand() *cobra.Command { } func newControllerCommand() *cobra.Command { + o := addonmanager.NewManagerOptions() + c := &addManagerConfig{managerOptions: o} cmd := cmdfactory. - NewControllerCommandConfig("helloworldhelm-addon-controller", version.Get(), runController). + NewControllerCommandConfig("helloworldhelm-addon-controller", version.Get(), c.runController). NewCommand() cmd.Use = "controller" cmd.Short = "Start the addon controller" + o.AddFlags(cmd) return cmd } -func runController(ctx context.Context, kubeConfig *rest.Config) error { +// addManagerConfig holds configuration for addon manager +type addManagerConfig struct { + managerOptions *addonmanager.ManagerOptions +} + +func (c *addManagerConfig) runController(ctx context.Context, kubeConfig *rest.Config) error { kubeClient, err := kubernetes.NewForConfig(kubeConfig) if err != nil { return err @@ -89,7 +97,7 @@ func runController(ctx context.Context, kubeConfig *rest.Config) error { return err } - mgr, err := addonmanager.New(kubeConfig) + mgr, err := addonmanager.New(kubeConfig, c.managerOptions) if err != nil { klog.Errorf("failed to new addon manager %v", err) return err diff --git a/cmd/example/helloworld_hosted/main.go b/cmd/example/helloworld_hosted/main.go index 5a4376f31..21bf9545c 100644 --- a/cmd/example/helloworld_hosted/main.go +++ b/cmd/example/helloworld_hosted/main.go @@ -67,17 +67,25 @@ func newCommand() *cobra.Command { } func newControllerCommand() *cobra.Command { + o := addonmanager.NewManagerOptions() + c := &addManagerConfig{managerOptions: o} cmd := cmdfactory. - NewControllerCommandConfig("helloworld-addon-controller", version.Get(), runController). + NewControllerCommandConfig("helloworld-addon-controller", version.Get(), c.runController). NewCommand() cmd.Use = "controller" cmd.Short = "Start the addon controller" + o.AddFlags(cmd) return cmd } -func runController(ctx context.Context, kubeConfig *rest.Config) error { - mgr, err := addonmanager.New(kubeConfig) +// addManagerConfig holds configuration for addon manager +type addManagerConfig struct { + managerOptions *addonmanager.ManagerOptions +} + +func (c *addManagerConfig) runController(ctx context.Context, kubeConfig *rest.Config) error { + mgr, err := addonmanager.New(kubeConfig, c.managerOptions) if err != nil { return err } diff --git a/go.mod b/go.mod index 92d7688bd..c260f15d0 100644 --- a/go.mod +++ b/go.mod @@ -6,23 +6,23 @@ require ( github.com/evanphx/json-patch v5.7.0+incompatible github.com/fatih/structs v1.1.0 github.com/onsi/ginkgo v1.16.5 - github.com/onsi/gomega v1.30.0 - github.com/openshift/build-machinery-go v0.0.0-20230306181456-d321ffa04533 + github.com/onsi/gomega v1.31.1 + github.com/openshift/build-machinery-go v0.0.0-20231128094528-1e9b1b0595c8 github.com/openshift/library-go v0.0.0-20240116081341-964bcb3f545c github.com/spf13/cobra v1.8.0 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.8.4 helm.sh/helm/v3 v3.14.2 - k8s.io/api v0.29.1 + k8s.io/api v0.29.2 k8s.io/apiextensions-apiserver v0.29.0 - k8s.io/apimachinery v0.29.1 + k8s.io/apimachinery v0.29.2 k8s.io/apiserver v0.29.0 - k8s.io/client-go v0.29.1 + k8s.io/client-go v0.29.2 k8s.io/component-base v0.29.1 - k8s.io/klog/v2 v2.110.1 - k8s.io/utils v0.0.0-20240102154912-e7106e64919e + k8s.io/klog/v2 v2.120.1 + k8s.io/utils v0.0.0-20240310230437-4693a0247e57 open-cluster-management.io/api v0.13.0 - open-cluster-management.io/sdk-go v0.13.0 + open-cluster-management.io/sdk-go v0.13.1-0.20240321032811-7dbdd1b5c63d sigs.k8s.io/controller-runtime v0.17.2 ) @@ -38,10 +38,13 @@ require ( github.com/blang/semver/v4 v4.0.0 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2 v2.0.0-20231030012137-0836a524e995 // indirect + github.com/cloudevents/sdk-go/v2 v2.14.0 // indirect github.com/coreos/go-semver v0.3.1 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/davecgh/go-spew v1.1.1 // indirect + github.com/eclipse/paho.golang v0.11.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch/v5 v5.8.0 // indirect github.com/felixge/httpsnoop v1.0.3 // indirect @@ -59,7 +62,7 @@ require ( github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/uuid v1.3.0 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect github.com/huandu/xstrings v1.4.0 // indirect @@ -101,20 +104,20 @@ require ( go.opentelemetry.io/proto/otlp v1.0.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.26.0 // indirect - golang.org/x/crypto v0.17.0 // indirect + golang.org/x/crypto v0.19.0 // indirect golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 // indirect - golang.org/x/net v0.19.0 // indirect - golang.org/x/oauth2 v0.12.0 // indirect - golang.org/x/sync v0.5.0 // indirect - golang.org/x/sys v0.16.0 // indirect - golang.org/x/term v0.15.0 // indirect + golang.org/x/net v0.21.0 // indirect + golang.org/x/oauth2 v0.16.0 // indirect + golang.org/x/sync v0.6.0 // indirect + golang.org/x/sys v0.17.0 // indirect + golang.org/x/term v0.17.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.3.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect - google.golang.org/grpc v1.58.3 // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect + google.golang.org/grpc v1.62.1 // indirect google.golang.org/protobuf v1.33.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect diff --git a/go.sum b/go.sum index 58b344ab5..b84d8d577 100644 --- a/go.sum +++ b/go.sum @@ -1,6 +1,6 @@ -cloud.google.com/go v0.110.6 h1:8uYAkj3YHTP/1iwReuHPxLSbdcyc+dSBbzFMrVwDR6Q= -cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= -cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM= +cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= +cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= @@ -26,8 +26,12 @@ github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqy github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= -github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2 v2.0.0-20231030012137-0836a524e995 h1:pXyRKZ0T5WoB6X9QnHS5cEyW0Got39bNQIECxGUKVO4= +github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2 v2.0.0-20231030012137-0836a524e995/go.mod h1:mz9oS2Yhh/S7cvrrsgGMMR+6Shy0ZyL2lDN1sHQO1wE= +github.com/cloudevents/sdk-go/v2 v2.14.0 h1:Nrob4FwVgi5L4tV9lhjzZcjYqFVyJzsA56CwPaPfv6s= +github.com/cloudevents/sdk-go/v2 v2.14.0/go.mod h1:xDmKfzNjM8gBvjaF8ijFjM1VYOVUEeUfapHMUX1T5To= +github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ= +github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= @@ -41,10 +45,12 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/eclipse/paho.golang v0.11.0 h1:6Avu5dkkCfcB61/y1vx+XrPQ0oAl4TPYtY0uw3HbQdM= +github.com/eclipse/paho.golang v0.11.0/go.mod h1:rhrV37IEwauUyx8FHrvmXOKo+QRKng5ncoN1vJiJMcs= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= -github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= +github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= +github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI= github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.8.0 h1:lRj6N9Nci7MvzrXuX6HFzU8XjmhPiXPlsKEy1u0KQro= @@ -60,7 +66,6 @@ github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4 github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= @@ -83,18 +88,19 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= -github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= +github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68= +github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= @@ -106,6 +112,7 @@ github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYu github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -115,10 +122,11 @@ github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= -github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= +github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= @@ -175,14 +183,14 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.14.0 h1:vSmGj2Z5YPb9JwCWT6z6ihcUvDhuXLc3sJiqd3jMKAY= -github.com/onsi/ginkgo/v2 v2.14.0/go.mod h1:JkUdW7JkN0V6rFvsHcJ478egV3XH9NxpD27Hal/PhZw= +github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY= +github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= -github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= -github.com/openshift/build-machinery-go v0.0.0-20230306181456-d321ffa04533 h1:mh3ZYs7kPIIe3UUY6tJcTExmtjnXXUu0MrBuK2W/Qvw= -github.com/openshift/build-machinery-go v0.0.0-20230306181456-d321ffa04533/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= +github.com/onsi/gomega v1.31.1 h1:KYppCUK+bUgAZwHOu7EXVBKyQA6ILvOESHkn/tgoqvo= +github.com/onsi/gomega v1.31.1/go.mod h1:y40C95dwAD1Nz36SsEnxvfFe8FFfNxzI5eJ0EYGyAy0= +github.com/openshift/build-machinery-go v0.0.0-20231128094528-1e9b1b0595c8 h1:cu3YUMVGsKIyFyJGO3F6BZKGYQZpCKxAv9cBPgQAca8= +github.com/openshift/build-machinery-go v0.0.0-20231128094528-1e9b1b0595c8/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= github.com/openshift/library-go v0.0.0-20240116081341-964bcb3f545c h1:gLylEQQryG+A6nqWYIwE1wUzn1eFUmthjADvflMWKnM= github.com/openshift/library-go v0.0.0-20240116081341-964bcb3f545c/go.mod h1:82B0gt8XawdXWRtKMrm3jSMTeRsiOSYKCi4F0fvPjG0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -222,6 +230,7 @@ github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpE github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= @@ -229,6 +238,8 @@ github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcU github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= +github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -286,8 +297,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= -golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc= golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -295,7 +306,6 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= @@ -303,17 +313,18 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= -golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4= -golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= +golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= -golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -328,17 +339,17 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= -golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= @@ -358,22 +369,24 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 h1:L6iMMGrtzgHsWofoFcihmDEMYeDR9KN/ThbPWGrh++g= -google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8= -google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e h1:z3vDksarJxsAKM5dmEGv0GHwE2hKJ096wZra71Vs4sw= -google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= -google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= -google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= +google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= +google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= +google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 h1:Lj5rbfG876hIAYFjqiJnPHfhXbv+nzTWfm04Fg/XSVU= +google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 h1:AjyfHzEPEFp/NpvfN5g+KDla3EMojjhRVZc1i7cj+oM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s= +google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= +google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -398,30 +411,30 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= helm.sh/helm/v3 v3.14.2 h1:V71fv+NGZv0icBlr+in1MJXuUIHCiPG1hW9gEBISTIA= helm.sh/helm/v3 v3.14.2/go.mod h1:2itvvDv2WSZXTllknfQo6j7u3VVgMAvm8POCDgYH424= -k8s.io/api v0.29.1 h1:DAjwWX/9YT7NQD4INu49ROJuZAAAP/Ijki48GUPzxqw= -k8s.io/api v0.29.1/go.mod h1:7Kl10vBRUXhnQQI8YR/R327zXC8eJ7887/+Ybta+RoQ= +k8s.io/api v0.29.2 h1:hBC7B9+MU+ptchxEqTNW2DkUosJpp1P+Wn6YncZ474A= +k8s.io/api v0.29.2/go.mod h1:sdIaaKuU7P44aoyyLlikSLayT6Vb7bvJNCX105xZXY0= k8s.io/apiextensions-apiserver v0.29.0 h1:0VuspFG7Hj+SxyF/Z/2T0uFbI5gb5LRgEyUVE3Q4lV0= k8s.io/apiextensions-apiserver v0.29.0/go.mod h1:TKmpy3bTS0mr9pylH0nOt/QzQRrW7/h7yLdRForMZwc= -k8s.io/apimachinery v0.29.1 h1:KY4/E6km/wLBguvCZv8cKTeOwwOBqFNjwJIdMkMbbRc= -k8s.io/apimachinery v0.29.1/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= +k8s.io/apimachinery v0.29.2 h1:EWGpfJ856oj11C52NRCHuU7rFDwxev48z+6DSlGNsV8= +k8s.io/apimachinery v0.29.2/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= k8s.io/apiserver v0.29.0 h1:Y1xEMjJkP+BIi0GSEv1BBrf1jLU9UPfAnnGGbbDdp7o= k8s.io/apiserver v0.29.0/go.mod h1:31n78PsRKPmfpee7/l9NYEv67u6hOL6AfcE761HapDM= -k8s.io/client-go v0.29.1 h1:19B/+2NGEwnFLzt0uB5kNJnfTsbV8w6TgQRz9l7ti7A= -k8s.io/client-go v0.29.1/go.mod h1:TDG/psL9hdet0TI9mGyHJSgRkW3H9JZk2dNEUS7bRks= +k8s.io/client-go v0.29.2 h1:FEg85el1TeZp+/vYJM7hkDlSTFZ+c5nnK44DJ4FyoRg= +k8s.io/client-go v0.29.2/go.mod h1:knlvFZE58VpqbQpJNbCbctTVXcd35mMyAAwBdpt4jrA= k8s.io/component-base v0.29.1 h1:MUimqJPCRnnHsskTTjKD+IC1EHBbRCVyi37IoFBrkYw= k8s.io/component-base v0.29.1/go.mod h1:fP9GFjxYrLERq1GcWWZAE3bqbNcDKDytn2srWuHTtKc= -k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= -k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= +k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= +k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kms v0.29.0 h1:KJ1zaZt74CgvgV3NR7tnURJ/mJOKC5X3nwon/WdwgxI= k8s.io/kms v0.29.0/go.mod h1:mB0f9HLxRXeXUfHfn1A7rpwOlzXI1gIWu86z6buNoYA= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= -k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ= -k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20240310230437-4693a0247e57 h1:gbqbevonBh57eILzModw6mrkbwM0gQBEuevE/AaBsHY= +k8s.io/utils v0.0.0-20240310230437-4693a0247e57/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= open-cluster-management.io/api v0.13.0 h1:dlcJEZlNlE0DmSDctK2s7iWKg9l+Tgb0V78Z040nMuk= open-cluster-management.io/api v0.13.0/go.mod h1:CuCPEzXDvOyxBB0H1d1eSeajbHqaeGEKq9c63vQc63w= -open-cluster-management.io/sdk-go v0.13.0 h1:ddMGsPUekQr9z03tVN6vF39Uf+WEKMtGU/xSd81HdoA= -open-cluster-management.io/sdk-go v0.13.0/go.mod h1:UnsjzYOrDTF9a8rHEXksoIAtAdO1o5CD5Jtaw6T5B9w= +open-cluster-management.io/sdk-go v0.13.1-0.20240321032811-7dbdd1b5c63d h1:mWrwuvY3K/MLfOLbwJB6VSNsdDol98fvK3hHXuDlxfQ= +open-cluster-management.io/sdk-go v0.13.1-0.20240321032811-7dbdd1b5c63d/go.mod h1:sq+amR9Ls9JzMP5dypvlCx4jIGfDg45gicS67Z/MnlI= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= sigs.k8s.io/controller-runtime v0.17.2 h1:FwHwD1CTUemg0pW2otk7/U5/i5m2ymzvOXdbeGOUvw0= diff --git a/pkg/addonmanager/manager.go b/pkg/addonmanager/manager.go index b2f3575e5..39f5cc9e5 100644 --- a/pkg/addonmanager/manager.go +++ b/pkg/addonmanager/manager.go @@ -5,6 +5,7 @@ import ( "fmt" "time" + "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" @@ -34,6 +35,7 @@ import ( "open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration" "open-cluster-management.io/addon-framework/pkg/manager/controllers/addonowner" "open-cluster-management.io/addon-framework/pkg/utils" + cloudeventswork "open-cluster-management.io/sdk-go/pkg/cloudevents/work" ) // AddonManager is the interface to initialize a manager on hub to manage the addon @@ -62,6 +64,7 @@ type addonManager struct { addonAgents map[string]agent.AgentAddon addonConfigs map[schema.GroupVersionResource]bool config *rest.Config + options *ManagerOptions syncContexts []factory.SyncContext } @@ -356,10 +359,40 @@ func (a *addonManager) StartWithInformers(ctx context.Context, return nil } +// ManagerOptions defines the flags for addon manager +type ManagerOptions struct { + WorkDriver string + WorkDriverConfig string + + CloudEventsClientID string + SourceID string +} + +// NewManagerOptions returns the flags with default value set +func NewManagerOptions() *ManagerOptions { + return &ManagerOptions{ + // set default work driver to kube + WorkDriver: cloudeventswork.ConfigTypeKube, + } +} + +func (o *ManagerOptions) AddFlags(cmd *cobra.Command) { + flags := cmd.Flags() + flags.StringVar(&o.WorkDriver, "work-driver", + o.WorkDriver, "The type of work driver, currently it can be kube, mqtt or grpc") + flags.StringVar(&o.WorkDriverConfig, "work-driver-config", + o.WorkDriverConfig, "The config file path of current work driver") + flags.StringVar(&o.CloudEventsClientID, "cloudevents-client-id", + o.CloudEventsClientID, "The ID of the cloudevents client when publishing works with cloudevents") + flags.StringVar(&o.SourceID, "source-id", + o.SourceID, "The ID of the source when publishing works with cloudevents") +} + // New returns a new Manager for creating addon agents. -func New(config *rest.Config) (AddonManager, error) { +func New(config *rest.Config, opts *ManagerOptions) (AddonManager, error) { return &addonManager{ config: config, + options: opts, syncContexts: []factory.SyncContext{}, addonConfigs: map[schema.GroupVersionResource]bool{}, addonAgents: map[string]agent.AgentAddon{}, diff --git a/test/integration/suite_test.go b/test/integration/suite_test.go index 0181cf1ea..ce3942ff6 100644 --- a/test/integration/suite_test.go +++ b/test/integration/suite_test.go @@ -114,7 +114,7 @@ var _ = ginkgo.BeforeSuite(func(done ginkgo.Done) { mgrContext, cancel = context.WithCancel(context.TODO()) // start hub controller go func() { - addonManager, err = addonmanager.New(cfg) + addonManager, err = addonmanager.New(cfg, addonmanager.NewManagerOptions()) gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = addonManager.AddAgent(testAddonImpl) gomega.Expect(err).NotTo(gomega.HaveOccurred()) diff --git a/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/LICENSE b/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/message.go b/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/message.go new file mode 100644 index 000000000..8dd938545 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/message.go @@ -0,0 +1,119 @@ +/* + Copyright 2023 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package mqtt_paho + +import ( + "bytes" + "context" + "strings" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/binding/format" + "github.com/cloudevents/sdk-go/v2/binding/spec" + "github.com/eclipse/paho.golang/paho" +) + +const ( + prefix = "ce-" + contentType = "Content-Type" +) + +var specs = spec.WithPrefix(prefix) + +// Message represents a MQTT message. +// This message *can* be read several times safely +type Message struct { + internal *paho.Publish + version spec.Version + format format.Format +} + +// Check if Message implements binding.Message +var ( + _ binding.Message = (*Message)(nil) + _ binding.MessageMetadataReader = (*Message)(nil) +) + +func NewMessage(msg *paho.Publish) *Message { + var f format.Format + var v spec.Version + if msg.Properties != nil { + // Use properties.User["Content-type"] to determine if message is structured + if s := msg.Properties.User.Get(contentType); format.IsFormat(s) { + f = format.Lookup(s) + } else if s := msg.Properties.User.Get(specs.PrefixedSpecVersionName()); s != "" { + v = specs.Version(s) + } + } + return &Message{ + internal: msg, + version: v, + format: f, + } +} + +func (m *Message) ReadEncoding() binding.Encoding { + if m.version != nil { + return binding.EncodingBinary + } + if m.format != nil { + return binding.EncodingStructured + } + return binding.EncodingUnknown +} + +func (m *Message) ReadStructured(ctx context.Context, encoder binding.StructuredWriter) error { + if m.version != nil { + return binding.ErrNotStructured + } + if m.format == nil { + return binding.ErrNotStructured + } + return encoder.SetStructuredEvent(ctx, m.format, bytes.NewReader(m.internal.Payload)) +} + +func (m *Message) ReadBinary(ctx context.Context, encoder binding.BinaryWriter) (err error) { + if m.format != nil { + return binding.ErrNotBinary + } + + for _, userProperty := range m.internal.Properties.User { + if strings.HasPrefix(userProperty.Key, prefix) { + attr := m.version.Attribute(userProperty.Key) + if attr != nil { + err = encoder.SetAttribute(attr, userProperty.Value) + } else { + err = encoder.SetExtension(strings.TrimPrefix(userProperty.Key, prefix), userProperty.Value) + } + } else if userProperty.Key == contentType { + err = encoder.SetAttribute(m.version.AttributeFromKind(spec.DataContentType), string(userProperty.Value)) + } + if err != nil { + return + } + } + + if m.internal.Payload != nil { + return encoder.SetData(bytes.NewBuffer(m.internal.Payload)) + } + return nil +} + +func (m *Message) Finish(error) error { + return nil +} + +func (m *Message) GetAttribute(k spec.Kind) (spec.Attribute, interface{}) { + attr := m.version.AttributeFromKind(k) + if attr != nil { + return attr, m.internal.Properties.User.Get(prefix + attr.Name()) + } + return nil, nil +} + +func (m *Message) GetExtension(name string) interface{} { + return m.internal.Properties.User.Get(prefix + name) +} diff --git a/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/option.go b/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/option.go new file mode 100644 index 000000000..955a16219 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/option.go @@ -0,0 +1,48 @@ +/* + Copyright 2023 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package mqtt_paho + +import ( + "fmt" + + "github.com/eclipse/paho.golang/paho" +) + +// Option is the function signature required to be considered an mqtt_paho.Option. +type Option func(*Protocol) error + +// WithConnect sets the paho.Connect configuration for the client. This option is not required. +func WithConnect(connOpt *paho.Connect) Option { + return func(p *Protocol) error { + if connOpt == nil { + return fmt.Errorf("the paho.Connect option must not be nil") + } + p.connOption = connOpt + return nil + } +} + +// WithPublish sets the paho.Publish configuration for the client. This option is required if you want to send messages. +func WithPublish(publishOpt *paho.Publish) Option { + return func(p *Protocol) error { + if publishOpt == nil { + return fmt.Errorf("the paho.Publish option must not be nil") + } + p.publishOption = publishOpt + return nil + } +} + +// WithSubscribe sets the paho.Subscribe configuration for the client. This option is required if you want to receive messages. +func WithSubscribe(subscribeOpt *paho.Subscribe) Option { + return func(p *Protocol) error { + if subscribeOpt == nil { + return fmt.Errorf("the paho.Subscribe option must not be nil") + } + p.subscribeOption = subscribeOpt + return nil + } +} diff --git a/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/protocol.go b/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/protocol.go new file mode 100644 index 000000000..261fc6c37 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/protocol.go @@ -0,0 +1,155 @@ +/* + Copyright 2023 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package mqtt_paho + +import ( + "context" + "fmt" + "io" + "sync" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/protocol" + "github.com/eclipse/paho.golang/paho" + + cecontext "github.com/cloudevents/sdk-go/v2/context" +) + +type Protocol struct { + client *paho.Client + config *paho.ClientConfig + connOption *paho.Connect + publishOption *paho.Publish + subscribeOption *paho.Subscribe + + // receiver + incoming chan *paho.Publish + // inOpen + openerMutex sync.Mutex + + closeChan chan struct{} +} + +var ( + _ protocol.Sender = (*Protocol)(nil) + _ protocol.Opener = (*Protocol)(nil) + _ protocol.Receiver = (*Protocol)(nil) + _ protocol.Closer = (*Protocol)(nil) +) + +func New(ctx context.Context, config *paho.ClientConfig, opts ...Option) (*Protocol, error) { + if config == nil { + return nil, fmt.Errorf("the paho.ClientConfig must not be nil") + } + + p := &Protocol{ + client: paho.NewClient(*config), + // default connect option + connOption: &paho.Connect{ + KeepAlive: 30, + CleanStart: true, + }, + incoming: make(chan *paho.Publish), + closeChan: make(chan struct{}), + } + if err := p.applyOptions(opts...); err != nil { + return nil, err + } + + // Connect to the MQTT broker + connAck, err := p.client.Connect(ctx, p.connOption) + if err != nil { + return nil, err + } + if connAck.ReasonCode != 0 { + return nil, fmt.Errorf("failed to connect to %q : %d - %q", p.client.Conn.RemoteAddr(), connAck.ReasonCode, + connAck.Properties.ReasonString) + } + + return p, nil +} + +func (p *Protocol) applyOptions(opts ...Option) error { + for _, fn := range opts { + if err := fn(p); err != nil { + return err + } + } + return nil +} + +func (p *Protocol) Send(ctx context.Context, m binding.Message, transformers ...binding.Transformer) error { + if p.publishOption == nil { + return fmt.Errorf("the paho.Publish option must not be nil") + } + + var err error + defer m.Finish(err) + + msg := p.publishOption + if cecontext.TopicFrom(ctx) != "" { + msg.Topic = cecontext.TopicFrom(ctx) + cecontext.WithTopic(ctx, "") + } + + err = WritePubMessage(ctx, m, msg, transformers...) + if err != nil { + return err + } + + _, err = p.client.Publish(ctx, msg) + if err != nil { + return err + } + return err +} + +func (p *Protocol) OpenInbound(ctx context.Context) error { + if p.subscribeOption == nil { + return fmt.Errorf("the paho.Subscribe option must not be nil") + } + + p.openerMutex.Lock() + defer p.openerMutex.Unlock() + + logger := cecontext.LoggerFrom(ctx) + + p.client.Router = paho.NewSingleHandlerRouter(func(m *paho.Publish) { + p.incoming <- m + }) + + logger.Infof("subscribing to topics: %v", p.subscribeOption.Subscriptions) + _, err := p.client.Subscribe(ctx, p.subscribeOption) + if err != nil { + return err + } + + // Wait until external or internal context done + select { + case <-ctx.Done(): + case <-p.closeChan: + } + return p.client.Disconnect(&paho.Disconnect{ReasonCode: 0}) +} + +// Receive implements Receiver.Receive +func (p *Protocol) Receive(ctx context.Context) (binding.Message, error) { + select { + case m, ok := <-p.incoming: + if !ok { + return nil, io.EOF + } + msg := NewMessage(m) + return msg, nil + case <-ctx.Done(): + return nil, io.EOF + } +} + +func (p *Protocol) Close(ctx context.Context) error { + close(p.closeChan) + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/write_message.go b/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/write_message.go new file mode 100644 index 000000000..a4b87f4aa --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/write_message.go @@ -0,0 +1,133 @@ +/* +Copyright 2023 The CloudEvents Authors +SPDX-License-Identifier: Apache-2.0 +*/ + +package mqtt_paho + +import ( + "bytes" + "context" + "io" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/binding/format" + "github.com/cloudevents/sdk-go/v2/binding/spec" + "github.com/cloudevents/sdk-go/v2/types" + "github.com/eclipse/paho.golang/paho" +) + +// WritePubMessage fills the provided pubMessage with the message m. +// Using context you can tweak the encoding processing (more details on binding.Write documentation). +func WritePubMessage(ctx context.Context, m binding.Message, pubMessage *paho.Publish, transformers ...binding.Transformer) error { + structuredWriter := (*pubMessageWriter)(pubMessage) + binaryWriter := (*pubMessageWriter)(pubMessage) + + _, err := binding.Write( + ctx, + m, + structuredWriter, + binaryWriter, + transformers..., + ) + return err +} + +type pubMessageWriter paho.Publish + +var ( + _ binding.StructuredWriter = (*pubMessageWriter)(nil) + _ binding.BinaryWriter = (*pubMessageWriter)(nil) +) + +func (b *pubMessageWriter) SetStructuredEvent(ctx context.Context, f format.Format, event io.Reader) error { + if b.Properties == nil { + b.Properties = &paho.PublishProperties{ + User: make([]paho.UserProperty, 0), + } + } + b.Properties.User.Add(contentType, f.MediaType()) + var buf bytes.Buffer + _, err := io.Copy(&buf, event) + if err != nil { + return err + } + b.Payload = buf.Bytes() + return nil +} + +func (b *pubMessageWriter) Start(ctx context.Context) error { + if b.Properties == nil { + b.Properties = &paho.PublishProperties{} + } + // the UserProperties of publish message is used to load event extensions + b.Properties.User = make([]paho.UserProperty, 0) + return nil +} + +func (b *pubMessageWriter) End(ctx context.Context) error { + return nil +} + +func (b *pubMessageWriter) SetData(reader io.Reader) error { + buf, ok := reader.(*bytes.Buffer) + if !ok { + buf = new(bytes.Buffer) + _, err := io.Copy(buf, reader) + if err != nil { + return err + } + } + b.Payload = buf.Bytes() + return nil +} + +func (b *pubMessageWriter) SetAttribute(attribute spec.Attribute, value interface{}) error { + if attribute.Kind() == spec.DataContentType { + if value == nil { + b.removeProperty(contentType) + } + s, err := types.Format(value) + if err != nil { + return err + } + if err := b.addProperty(contentType, s); err != nil { + return err + } + } else { + if value == nil { + b.removeProperty(prefix + attribute.Name()) + } + return b.addProperty(prefix+attribute.Name(), value) + } + return nil +} + +func (b *pubMessageWriter) SetExtension(name string, value interface{}) error { + if value == nil { + b.removeProperty(prefix + name) + } + return b.addProperty(prefix+name, value) +} + +func (b *pubMessageWriter) removeProperty(key string) { + for i, v := range b.Properties.User { + if v.Key == key { + b.Properties.User = append(b.Properties.User[:i], b.Properties.User[i+1:]...) + break + } + } +} + +func (b *pubMessageWriter) addProperty(key string, value interface{}) error { + s, err := types.Format(value) + if err != nil { + return err + } + + b.Properties.User = append(b.Properties.User, paho.UserProperty{ + Key: key, + Value: s, + }) + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/LICENSE b/vendor/github.com/cloudevents/sdk-go/v2/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/cloudevents/sdk-go/v2/alias.go b/vendor/github.com/cloudevents/sdk-go/v2/alias.go new file mode 100644 index 000000000..2fbfaa9a7 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/alias.go @@ -0,0 +1,187 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +// Package v2 reexports a subset of the SDK v2 API. +package v2 + +// Package cloudevents alias' common functions and types to improve discoverability and reduce +// the number of imports for simple HTTP clients. + +import ( + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/client" + "github.com/cloudevents/sdk-go/v2/context" + "github.com/cloudevents/sdk-go/v2/event" + "github.com/cloudevents/sdk-go/v2/protocol" + "github.com/cloudevents/sdk-go/v2/protocol/http" + "github.com/cloudevents/sdk-go/v2/types" +) + +// Client + +type ClientOption = client.Option +type Client = client.Client + +// Event + +type Event = event.Event +type Result = protocol.Result + +// Context + +type EventContext = event.EventContext +type EventContextV1 = event.EventContextV1 +type EventContextV03 = event.EventContextV03 + +// Custom Types + +type Timestamp = types.Timestamp +type URIRef = types.URIRef + +// HTTP Protocol + +type HTTPOption = http.Option + +type HTTPProtocol = http.Protocol + +// Encoding + +type Encoding = binding.Encoding + +// Message + +type Message = binding.Message + +const ( + // ReadEncoding + + ApplicationXML = event.ApplicationXML + ApplicationJSON = event.ApplicationJSON + TextPlain = event.TextPlain + ApplicationCloudEventsJSON = event.ApplicationCloudEventsJSON + ApplicationCloudEventsBatchJSON = event.ApplicationCloudEventsBatchJSON + Base64 = event.Base64 + + // Event Versions + + VersionV1 = event.CloudEventsVersionV1 + VersionV03 = event.CloudEventsVersionV03 + + // Encoding + + EncodingBinary = binding.EncodingBinary + EncodingStructured = binding.EncodingStructured +) + +var ( + + // ContentType Helpers + + StringOfApplicationJSON = event.StringOfApplicationJSON + StringOfApplicationXML = event.StringOfApplicationXML + StringOfTextPlain = event.StringOfTextPlain + StringOfApplicationCloudEventsJSON = event.StringOfApplicationCloudEventsJSON + StringOfApplicationCloudEventsBatchJSON = event.StringOfApplicationCloudEventsBatchJSON + StringOfBase64 = event.StringOfBase64 + + // Client Creation + + NewClient = client.New + NewClientHTTP = client.NewHTTP + // Deprecated: please use New with the observability options. + NewClientObserved = client.NewObserved + // Deprecated: Please use NewClientHTTP with the observability options. + NewDefaultClient = client.NewDefault + NewHTTPReceiveHandler = client.NewHTTPReceiveHandler + + // Client Options + + WithEventDefaulter = client.WithEventDefaulter + WithUUIDs = client.WithUUIDs + WithTimeNow = client.WithTimeNow + // Deprecated: this is now noop and will be removed in future releases. + WithTracePropagation = client.WithTracePropagation() + + // Event Creation + + NewEvent = event.New + + // Results + + NewResult = protocol.NewResult + ResultIs = protocol.ResultIs + ResultAs = protocol.ResultAs + + // Receipt helpers + + NewReceipt = protocol.NewReceipt + + ResultACK = protocol.ResultACK + ResultNACK = protocol.ResultNACK + + IsACK = protocol.IsACK + IsNACK = protocol.IsNACK + IsUndelivered = protocol.IsUndelivered + + // HTTP Results + + NewHTTPResult = http.NewResult + NewHTTPRetriesResult = http.NewRetriesResult + + // Message Creation + + ToMessage = binding.ToMessage + + // Event Creation + + NewEventFromHTTPRequest = http.NewEventFromHTTPRequest + NewEventFromHTTPResponse = http.NewEventFromHTTPResponse + NewEventsFromHTTPRequest = http.NewEventsFromHTTPRequest + NewEventsFromHTTPResponse = http.NewEventsFromHTTPResponse + NewHTTPRequestFromEvent = http.NewHTTPRequestFromEvent + NewHTTPRequestFromEvents = http.NewHTTPRequestFromEvents + IsHTTPBatch = http.IsHTTPBatch + + // HTTP Messages + + WriteHTTPRequest = http.WriteRequest + + // Context + + ContextWithTarget = context.WithTarget + TargetFromContext = context.TargetFrom + ContextWithRetriesConstantBackoff = context.WithRetriesConstantBackoff + ContextWithRetriesLinearBackoff = context.WithRetriesLinearBackoff + ContextWithRetriesExponentialBackoff = context.WithRetriesExponentialBackoff + + WithEncodingBinary = binding.WithForceBinary + WithEncodingStructured = binding.WithForceStructured + + // Custom Types + + ParseTimestamp = types.ParseTimestamp + ParseURIRef = types.ParseURIRef + ParseURI = types.ParseURI + + // HTTP Protocol + + NewHTTP = http.New + + // HTTP Protocol Options + + WithTarget = http.WithTarget + WithHeader = http.WithHeader + WithShutdownTimeout = http.WithShutdownTimeout + //WithEncoding = http.WithEncoding + //WithStructuredEncoding = http.WithStructuredEncoding // TODO: expose new way + WithPort = http.WithPort + WithPath = http.WithPath + WithMiddleware = http.WithMiddleware + WithListener = http.WithListener + WithRoundTripper = http.WithRoundTripper + WithGetHandlerFunc = http.WithGetHandlerFunc + WithOptionsHandlerFunc = http.WithOptionsHandlerFunc + WithDefaultOptionsHandlerFunc = http.WithDefaultOptionsHandlerFunc +) diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/binary_writer.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/binary_writer.go new file mode 100644 index 000000000..97f2c4dd7 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/binary_writer.go @@ -0,0 +1,52 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package binding + +import ( + "context" + "io" + + "github.com/cloudevents/sdk-go/v2/binding/spec" +) + +// MessageMetadataWriter is used to set metadata when a binary Message is visited. +type MessageMetadataWriter interface { + // Set a standard attribute. + // + // The value can either be the correct golang type for the attribute, or a canonical + // string encoding, or nil. If value is nil, then the attribute should be deleted. + // See package types to perform the needed conversions. + SetAttribute(attribute spec.Attribute, value interface{}) error + + // Set an extension attribute. + // + // The value can either be the correct golang type for the attribute, or a canonical + // string encoding, or nil. If value is nil, then the extension should be deleted. + // See package types to perform the needed conversions. + SetExtension(name string, value interface{}) error +} + +// BinaryWriter is used to visit a binary Message and generate a new representation. +// +// Protocols that supports binary encoding should implement this interface to implement direct +// binary to binary encoding and event to binary encoding. +// +// Start() and End() methods must be invoked by the caller of Message.ReadBinary() every time +// the BinaryWriter implementation is used to visit a Message. +type BinaryWriter interface { + MessageMetadataWriter + + // Method invoked at the beginning of the visit. Useful to perform initial memory allocations + Start(ctx context.Context) error + + // SetData receives an io.Reader for the data attribute. + // io.Reader is not invoked when the data attribute is empty + SetData(data io.Reader) error + + // End method is invoked only after the whole encoding process ends successfully. + // If it fails, it's never invoked. It can be used to finalize the message. + End(ctx context.Context) error +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/doc.go new file mode 100644 index 000000000..8fa999789 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/doc.go @@ -0,0 +1,68 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* + +Package binding defines interfaces for protocol bindings. + +NOTE: Most applications that emit or consume events should use the ../client +package, which provides a simpler API to the underlying binding. + +The interfaces in this package provide extra encoding and protocol information +to allow efficient forwarding and end-to-end reliable delivery between a +Receiver and a Sender belonging to different bindings. This is useful for +intermediary applications that route or forward events, but not necessary for +most "endpoint" applications that emit or consume events. + +Protocol Bindings + +A protocol binding usually implements a Message, a Sender and Receiver, a StructuredWriter and a BinaryWriter (depending on the supported encodings of the protocol) and an Write[ProtocolMessage] method. + +Read and write events + +The core of this package is the binding.Message interface. +Through binding.MessageReader It defines how to read a protocol specific message for an +encoded event in structured mode or binary mode. +The entity who receives a protocol specific data structure representing a message +(e.g. an HttpRequest) encapsulates it in a binding.Message implementation using a NewMessage method (e.g. http.NewMessage). +Then the entity that wants to send the binding.Message back on the wire, +translates it back to the protocol specific data structure (e.g. a Kafka ConsumerMessage), using +the writers BinaryWriter and StructuredWriter specific to that protocol. +Binding implementations exposes their writers +through a specific Write[ProtocolMessage] function (e.g. kafka.EncodeProducerMessage), +in order to simplify the encoding process. + +The encoding process can be customized in order to mutate the final result with binding.TransformerFactory. +A bunch of these are provided directly by the binding/transformer module. + +Usually binding.Message implementations can be encoded only one time, because the encoding process drain the message itself. +In order to consume a message several times, the binding/buffering package provides several APIs to buffer the Message. + +A message can be converted to an event.Event using binding.ToEvent() method. +An event.Event can be used as Message casting it to binding.EventMessage. + +In order to simplify the encoding process for each protocol, this package provide several utility methods like binding.Write and binding.DirectWrite. +The binding.Write method tries to preserve the structured/binary encoding, in order to be as much efficient as possible. + +Messages can be eventually wrapped to change their behaviours and binding their lifecycle, like the binding.FinishMessage. +Every Message wrapper implements the MessageWrapper interface + +Sender and Receiver + +A Receiver receives protocol specific messages and wraps them to into binding.Message implementations. + +A Sender converts arbitrary Message implementations to a protocol-specific form using the protocol specific Write method +and sends them. + +Message and ExactlyOnceMessage provide methods to allow acknowledgments to +propagate when a reliable messages is forwarded from a Receiver to a Sender. +QoS 0 (unreliable), 1 (at-least-once) and 2 (exactly-once) are supported. + +Transport + +A binding implementation providing Sender and Receiver implementations can be used as a Transport through the BindingTransport adapter. + +*/ +package binding diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/encoding.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/encoding.go new file mode 100644 index 000000000..5070b7295 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/encoding.go @@ -0,0 +1,50 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package binding + +import "errors" + +// Encoding enum specifies the type of encodings supported by binding interfaces +type Encoding int + +const ( + // Binary encoding as specified in https://github.com/cloudevents/spec/blob/master/spec.md#message + EncodingBinary Encoding = iota + // Structured encoding as specified in https://github.com/cloudevents/spec/blob/master/spec.md#message + EncodingStructured + // Message is an instance of EventMessage or it contains EventMessage nested (through MessageWrapper) + EncodingEvent + // When the encoding is unknown (which means that the message is a non-event) + EncodingUnknown + + // EncodingBatch is an instance of JSON Batched Events + EncodingBatch +) + +func (e Encoding) String() string { + switch e { + case EncodingBinary: + return "binary" + case EncodingStructured: + return "structured" + case EncodingEvent: + return "event" + case EncodingBatch: + return "batch" + case EncodingUnknown: + return "unknown" + } + return "" +} + +// ErrUnknownEncoding specifies that the Message is not an event or it is encoded with an unknown encoding +var ErrUnknownEncoding = errors.New("unknown Message encoding") + +// ErrNotStructured returned by Message.Structured for non-structured messages. +var ErrNotStructured = errors.New("message is not in structured mode") + +// ErrNotBinary returned by Message.Binary for non-binary messages. +var ErrNotBinary = errors.New("message is not in binary mode") diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/event_message.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/event_message.go new file mode 100644 index 000000000..f82c729c4 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/event_message.go @@ -0,0 +1,108 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package binding + +import ( + "bytes" + "context" + + "github.com/cloudevents/sdk-go/v2/binding/format" + "github.com/cloudevents/sdk-go/v2/binding/spec" + "github.com/cloudevents/sdk-go/v2/event" +) + +type eventFormatKey int + +const ( + formatEventStructured eventFormatKey = iota +) + +// EventMessage type-converts a event.Event object to implement Message. +// This allows local event.Event objects to be sent directly via Sender.Send() +// s.Send(ctx, binding.EventMessage(e)) +// When an event is wrapped into a EventMessage, the original event could be +// potentially mutated. If you need to use the Event again, after wrapping it into +// an Event message, you should copy it before +type EventMessage event.Event + +func ToMessage(e *event.Event) Message { + return (*EventMessage)(e) +} + +func (m *EventMessage) ReadEncoding() Encoding { + return EncodingEvent +} + +func (m *EventMessage) ReadStructured(ctx context.Context, builder StructuredWriter) error { + f := GetOrDefaultFromCtx(ctx, formatEventStructured, format.JSON).(format.Format) + b, err := f.Marshal((*event.Event)(m)) + if err != nil { + return err + } + return builder.SetStructuredEvent(ctx, f, bytes.NewReader(b)) +} + +func (m *EventMessage) ReadBinary(ctx context.Context, b BinaryWriter) (err error) { + err = eventContextToBinaryWriter(m.Context, b) + if err != nil { + return err + } + // Pass the body + body := (*event.Event)(m).Data() + if len(body) > 0 { + err = b.SetData(bytes.NewBuffer(body)) + if err != nil { + return err + } + } + return nil +} + +func (m *EventMessage) GetAttribute(k spec.Kind) (spec.Attribute, interface{}) { + sv := spec.VS.Version(m.Context.GetSpecVersion()) + a := sv.AttributeFromKind(k) + if a != nil { + return a, a.Get(m.Context) + } + return nil, nil +} + +func (m *EventMessage) GetExtension(name string) interface{} { + ext, _ := m.Context.GetExtension(name) + return ext +} + +func eventContextToBinaryWriter(c event.EventContext, b BinaryWriter) (err error) { + // Pass all attributes + sv := spec.VS.Version(c.GetSpecVersion()) + for _, a := range sv.Attributes() { + value := a.Get(c) + if value != nil { + err = b.SetAttribute(a, value) + } + if err != nil { + return err + } + } + // Pass all extensions + for k, v := range c.GetExtensions() { + err = b.SetExtension(k, v) + if err != nil { + return err + } + } + return nil +} + +func (*EventMessage) Finish(error) error { return nil } + +var _ Message = (*EventMessage)(nil) // Test it conforms to the interface +var _ MessageMetadataReader = (*EventMessage)(nil) // Test it conforms to the interface + +// UseFormatForEvent configures which format to use when marshalling the event to structured mode +func UseFormatForEvent(ctx context.Context, f format.Format) context.Context { + return context.WithValue(ctx, formatEventStructured, f) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/finish_message.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/finish_message.go new file mode 100644 index 000000000..8b51c4c61 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/finish_message.go @@ -0,0 +1,42 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package binding + +import "github.com/cloudevents/sdk-go/v2/binding/spec" + +type finishMessage struct { + Message + finish func(error) +} + +func (m *finishMessage) GetAttribute(k spec.Kind) (spec.Attribute, interface{}) { + return m.Message.(MessageMetadataReader).GetAttribute(k) +} + +func (m *finishMessage) GetExtension(s string) interface{} { + return m.Message.(MessageMetadataReader).GetExtension(s) +} + +func (m *finishMessage) GetWrappedMessage() Message { + return m.Message +} + +func (m *finishMessage) Finish(err error) error { + err2 := m.Message.Finish(err) // Finish original message first + if m.finish != nil { + m.finish(err) // Notify callback + } + return err2 +} + +var _ MessageWrapper = (*finishMessage)(nil) + +// WithFinish returns a wrapper for m that calls finish() and +// m.Finish() in its Finish(). +// Allows code to be notified when a message is Finished. +func WithFinish(m Message, finish func(error)) Message { + return &finishMessage{Message: m, finish: finish} +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/format/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/format/doc.go new file mode 100644 index 000000000..54c3f1a8c --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/format/doc.go @@ -0,0 +1,12 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package format formats structured events. + +The "application/cloudevents+json" format is built-in and always +available. Other formats may be added. +*/ +package format diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/format/format.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/format/format.go new file mode 100644 index 000000000..6bdd1842b --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/format/format.go @@ -0,0 +1,105 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package format + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + + "github.com/cloudevents/sdk-go/v2/event" +) + +// Format marshals and unmarshals structured events to bytes. +type Format interface { + // MediaType identifies the format + MediaType() string + // Marshal event to bytes + Marshal(*event.Event) ([]byte, error) + // Unmarshal bytes to event + Unmarshal([]byte, *event.Event) error +} + +// Prefix for event-format media types. +const Prefix = "application/cloudevents" + +// IsFormat returns true if mediaType begins with "application/cloudevents" +func IsFormat(mediaType string) bool { return strings.HasPrefix(mediaType, Prefix) } + +// JSON is the built-in "application/cloudevents+json" format. +var JSON = jsonFmt{} + +type jsonFmt struct{} + +func (jsonFmt) MediaType() string { return event.ApplicationCloudEventsJSON } + +func (jsonFmt) Marshal(e *event.Event) ([]byte, error) { return json.Marshal(e) } +func (jsonFmt) Unmarshal(b []byte, e *event.Event) error { + return json.Unmarshal(b, e) +} + +// JSONBatch is the built-in "application/cloudevents-batch+json" format. +var JSONBatch = jsonBatchFmt{} + +type jsonBatchFmt struct{} + +func (jb jsonBatchFmt) MediaType() string { + return event.ApplicationCloudEventsBatchJSON +} + +// Marshal will return an error for jsonBatchFmt since the Format interface doesn't support batch Marshalling, and we +// know it's structured batch json, we'll go direct to the json.UnMarshall() (see `ToEvents()`) since that is the best +// way to support batch operations for now. +func (jb jsonBatchFmt) Marshal(e *event.Event) ([]byte, error) { + return nil, errors.New("not supported for batch events") +} + +func (jb jsonBatchFmt) Unmarshal(b []byte, e *event.Event) error { + return errors.New("not supported for batch events") +} + +// built-in formats +var formats map[string]Format + +func init() { + formats = map[string]Format{} + Add(JSON) + Add(JSONBatch) +} + +// Lookup returns the format for contentType, or nil if not found. +func Lookup(contentType string) Format { + i := strings.IndexRune(contentType, ';') + if i == -1 { + i = len(contentType) + } + contentType = strings.TrimSpace(strings.ToLower(contentType[0:i])) + return formats[contentType] +} + +func unknown(mediaType string) error { + return fmt.Errorf("unknown event format media-type %#v", mediaType) +} + +// Add a new Format. It can be retrieved by Lookup(f.MediaType()) +func Add(f Format) { formats[f.MediaType()] = f } + +// Marshal an event to bytes using the mediaType event format. +func Marshal(mediaType string, e *event.Event) ([]byte, error) { + if f := formats[mediaType]; f != nil { + return f.Marshal(e) + } + return nil, unknown(mediaType) +} + +// Unmarshal bytes to an event using the mediaType event format. +func Unmarshal(mediaType string, b []byte, e *event.Event) error { + if f := formats[mediaType]; f != nil { + return f.Unmarshal(b, e) + } + return unknown(mediaType) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/message.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/message.go new file mode 100644 index 000000000..e30e150c0 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/message.go @@ -0,0 +1,153 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package binding + +import ( + "context" + + "github.com/cloudevents/sdk-go/v2/binding/spec" +) + +// MessageReader defines the read-related portion of the Message interface. +// +// The ReadStructured and ReadBinary methods allows to perform an optimized encoding of a Message to a specific data structure. +// +// If MessageReader.ReadEncoding() can be equal to EncodingBinary, then the implementation of MessageReader +// MUST also implement MessageMetadataReader. +// +// A Sender should try each method of interest and fall back to binding.ToEvent() if none are supported. +// An out of the box algorithm is provided for writing a message: binding.Write(). +type MessageReader interface { + // Return the type of the message Encoding. + // The encoding should be preferably computed when the message is constructed. + ReadEncoding() Encoding + + // ReadStructured transfers a structured-mode event to a StructuredWriter. + // It must return ErrNotStructured if message is not in structured mode. + // + // Returns a different err if something wrong happened while trying to read the structured event. + // In this case, the caller must Finish the message with appropriate error. + // + // This allows Senders to avoid re-encoding messages that are + // already in suitable structured form. + ReadStructured(context.Context, StructuredWriter) error + + // ReadBinary transfers a binary-mode event to an BinaryWriter. + // It must return ErrNotBinary if message is not in binary mode. + // + // The implementation of ReadBinary must not control the lifecycle with BinaryWriter.Start() and BinaryWriter.End(), + // because the caller must control the lifecycle. + // + // Returns a different err if something wrong happened while trying to read the binary event + // In this case, the caller must Finish the message with appropriate error + // + // This allows Senders to avoid re-encoding messages that are + // already in suitable binary form. + ReadBinary(context.Context, BinaryWriter) error +} + +// MessageMetadataReader defines how to read metadata from a binary/event message +// +// If a message implementing MessageReader is encoded as binary (MessageReader.ReadEncoding() == EncodingBinary) +// or it's an EventMessage, then it's safe to assume that it also implements this interface +type MessageMetadataReader interface { + // GetAttribute returns: + // + // * attribute, value: if the message contains an attribute of that attribute kind + // * attribute, nil: if the message spec version supports the attribute kind, but doesn't have any value + // * nil, nil: if the message spec version doesn't support the attribute kind + GetAttribute(attributeKind spec.Kind) (spec.Attribute, interface{}) + // GetExtension returns the value of that extension, if any. + GetExtension(name string) interface{} +} + +// Message is the interface to a binding-specific message containing an event. +// +// Reliable Delivery +// +// There are 3 reliable qualities of service for messages: +// +// 0/at-most-once/unreliable: messages can be dropped silently. +// +// 1/at-least-once: messages are not dropped without signaling an error +// to the sender, but they may be duplicated in the event of a re-send. +// +// 2/exactly-once: messages are never dropped (without error) or +// duplicated, as long as both sending and receiving ends maintain +// some binding-specific delivery state. Whether this is persisted +// depends on the configuration of the binding implementations. +// +// The Message interface supports QoS 0 and 1, the ExactlyOnceMessage interface +// supports QoS 2 +// +// Message includes the MessageReader interface to read messages. Every binding.Message implementation *must* specify if the message can be accessed one or more times. +// +// When a Message can be forgotten by the entity who produced the message, Message.Finish() *must* be invoked. +type Message interface { + MessageReader + + // Finish *must* be called when message from a Receiver can be forgotten by + // the receiver. A QoS 1 sender should not call Finish() until it gets an acknowledgment of + // receipt on the underlying transport. For QoS 2 see ExactlyOnceMessage. + // + // Note that, depending on the Message implementation, forgetting to Finish the message + // could produce memory/resources leaks! + // + // Passing a non-nil err indicates sending or processing failed. + // A non-nil return indicates that the message was not accepted + // by the receivers peer. + Finish(error) error +} + +// ExactlyOnceMessage is implemented by received Messages +// that support QoS 2. Only transports that support QoS 2 need to +// implement or use this interface. +type ExactlyOnceMessage interface { + Message + + // Received is called by a forwarding QoS2 Sender when it gets + // acknowledgment of receipt (e.g. AMQP 'accept' or MQTT PUBREC) + // + // The receiver must call settle(nil) when it get's the ack-of-ack + // (e.g. AMQP 'settle' or MQTT PUBCOMP) or settle(err) if the + // transfer fails. + // + // Finally the Sender calls Finish() to indicate the message can be + // discarded. + // + // If sending fails, or if the sender does not support QoS 2, then + // Finish() may be called without any call to Received() + Received(settle func(error)) +} + +// MessageContext interface exposes the internal context that a message might contain +// Only some Message implementations implement this interface. +type MessageContext interface { + // Get the context associated with this message + Context() context.Context +} + +// MessageWrapper interface is used to walk through a decorated Message and unwrap it. +type MessageWrapper interface { + Message + MessageMetadataReader + + // Method to get the wrapped message + GetWrappedMessage() Message +} + +func UnwrapMessage(message Message) Message { + m := message + for m != nil { + switch mt := m.(type) { + case MessageWrapper: + m = mt.GetWrappedMessage() + default: + return m + } + } + return m +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/attributes.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/attributes.go new file mode 100644 index 000000000..3c3021d46 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/attributes.go @@ -0,0 +1,141 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package spec + +import ( + "fmt" + "time" + + "github.com/cloudevents/sdk-go/v2/event" + + "github.com/cloudevents/sdk-go/v2/types" +) + +// Kind is a version-independent identifier for a CloudEvent context attribute. +type Kind uint8 + +const ( + // Required cloudevents attributes + ID Kind = iota + Source + SpecVersion + Type + // Optional cloudevents attributes + DataContentType + DataSchema + Subject + Time +) +const nAttrs = int(Time) + 1 + +var kindNames = [nAttrs]string{ + "id", + "source", + "specversion", + "type", + "datacontenttype", + "dataschema", + "subject", + "time", +} + +// String is a human-readable string, for a valid attribute name use Attribute.Name +func (k Kind) String() string { return kindNames[k] } + +// IsRequired returns true for attributes defined as "required" by the CE spec. +func (k Kind) IsRequired() bool { return k < DataContentType } + +// Attribute is a named attribute accessor. +// The attribute name is specific to a Version. +type Attribute interface { + Kind() Kind + // Name of the attribute with respect to the current spec Version() with prefix + PrefixedName() string + // Name of the attribute with respect to the current spec Version() + Name() string + // Version of the spec that this attribute belongs to + Version() Version + // Get the value of this attribute from an event context + Get(event.EventContextReader) interface{} + // Set the value of this attribute on an event context + Set(event.EventContextWriter, interface{}) error + // Delete this attribute from and event context, when possible + Delete(event.EventContextWriter) error +} + +// accessor provides Kind, Get, Set. +type accessor interface { + Kind() Kind + Get(event.EventContextReader) interface{} + Set(event.EventContextWriter, interface{}) error + Delete(event.EventContextWriter) error +} + +var acc = [nAttrs]accessor{ + &aStr{aKind(ID), event.EventContextReader.GetID, event.EventContextWriter.SetID}, + &aStr{aKind(Source), event.EventContextReader.GetSource, event.EventContextWriter.SetSource}, + &aStr{aKind(SpecVersion), event.EventContextReader.GetSpecVersion, func(writer event.EventContextWriter, s string) error { return nil }}, + &aStr{aKind(Type), event.EventContextReader.GetType, event.EventContextWriter.SetType}, + &aStr{aKind(DataContentType), event.EventContextReader.GetDataContentType, event.EventContextWriter.SetDataContentType}, + &aStr{aKind(DataSchema), event.EventContextReader.GetDataSchema, event.EventContextWriter.SetDataSchema}, + &aStr{aKind(Subject), event.EventContextReader.GetSubject, event.EventContextWriter.SetSubject}, + &aTime{aKind(Time), event.EventContextReader.GetTime, event.EventContextWriter.SetTime}, +} + +// aKind implements Kind() +type aKind Kind + +func (kind aKind) Kind() Kind { return Kind(kind) } + +type aStr struct { + aKind + get func(event.EventContextReader) string + set func(event.EventContextWriter, string) error +} + +func (a *aStr) Get(c event.EventContextReader) interface{} { + if s := a.get(c); s != "" { + return s + } + return nil // Treat blank as missing +} + +func (a *aStr) Set(c event.EventContextWriter, v interface{}) error { + s, err := types.ToString(v) + if err != nil { + return fmt.Errorf("invalid value for %s: %#v", a.Kind(), v) + } + return a.set(c, s) +} + +func (a *aStr) Delete(c event.EventContextWriter) error { + return a.set(c, "") +} + +type aTime struct { + aKind + get func(event.EventContextReader) time.Time + set func(event.EventContextWriter, time.Time) error +} + +func (a *aTime) Get(c event.EventContextReader) interface{} { + if v := a.get(c); !v.IsZero() { + return v + } + return nil // Treat zero time as missing. +} + +func (a *aTime) Set(c event.EventContextWriter, v interface{}) error { + t, err := types.ToTime(v) + if err != nil { + return fmt.Errorf("invalid value for %s: %#v", a.Kind(), v) + } + return a.set(c, t) +} + +func (a *aTime) Delete(c event.EventContextWriter) error { + return a.set(c, time.Time{}) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/doc.go new file mode 100644 index 000000000..44c0b3145 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/doc.go @@ -0,0 +1,13 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package spec provides spec-version metadata. + +For use by code that maps events using (prefixed) attribute name strings. +Supports handling multiple spec versions uniformly. + +*/ +package spec diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/match_exact_version.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/match_exact_version.go new file mode 100644 index 000000000..110787ddc --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/match_exact_version.go @@ -0,0 +1,81 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package spec + +import ( + "github.com/cloudevents/sdk-go/v2/event" +) + +type matchExactVersion struct { + version +} + +func (v *matchExactVersion) Attribute(name string) Attribute { return v.attrMap[name] } + +var _ Version = (*matchExactVersion)(nil) + +func newMatchExactVersionVersion( + prefix string, + attributeNameMatchMapper func(string) string, + context event.EventContext, + convert func(event.EventContextConverter) event.EventContext, + attrs ...*attribute, +) *matchExactVersion { + v := &matchExactVersion{ + version: version{ + prefix: prefix, + context: context, + convert: convert, + attrMap: map[string]Attribute{}, + attrs: make([]Attribute, len(attrs)), + }, + } + for i, a := range attrs { + a.version = v + v.attrs[i] = a + v.attrMap[attributeNameMatchMapper(a.name)] = a + } + return v +} + +// WithPrefixMatchExact returns a set of versions with prefix added to all attribute names. +func WithPrefixMatchExact(attributeNameMatchMapper func(string) string, prefix string) *Versions { + attr := func(name string, kind Kind) *attribute { + return &attribute{accessor: acc[kind], name: name} + } + vs := &Versions{ + m: map[string]Version{}, + prefix: prefix, + all: []Version{ + newMatchExactVersionVersion(prefix, attributeNameMatchMapper, event.EventContextV1{}.AsV1(), + func(c event.EventContextConverter) event.EventContext { return c.AsV1() }, + attr("id", ID), + attr("source", Source), + attr("specversion", SpecVersion), + attr("type", Type), + attr("datacontenttype", DataContentType), + attr("dataschema", DataSchema), + attr("subject", Subject), + attr("time", Time), + ), + newMatchExactVersionVersion(prefix, attributeNameMatchMapper, event.EventContextV03{}.AsV03(), + func(c event.EventContextConverter) event.EventContext { return c.AsV03() }, + attr("specversion", SpecVersion), + attr("type", Type), + attr("source", Source), + attr("schemaurl", DataSchema), + attr("subject", Subject), + attr("id", ID), + attr("time", Time), + attr("datacontenttype", DataContentType), + ), + }, + } + for _, v := range vs.all { + vs.m[v.String()] = v + } + return vs +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/spec.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/spec.go new file mode 100644 index 000000000..7fa0f5840 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/spec.go @@ -0,0 +1,189 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package spec + +import ( + "strings" + + "github.com/cloudevents/sdk-go/v2/event" +) + +// Version provides meta-data for a single spec-version. +type Version interface { + // String name of the version, e.g. "1.0" + String() string + // Prefix for attribute names. + Prefix() string + // Attribute looks up a prefixed attribute name (case insensitive). + // Returns nil if not found. + Attribute(prefixedName string) Attribute + // Attribute looks up the attribute from kind. + // Returns nil if not found. + AttributeFromKind(kind Kind) Attribute + // Attributes returns all the context attributes for this version. + Attributes() []Attribute + // Convert translates a context to this version. + Convert(event.EventContextConverter) event.EventContext + // NewContext returns a new context for this version. + NewContext() event.EventContext + // SetAttribute sets named attribute to value. + // + // Name is case insensitive. + // Does nothing if name does not start with prefix. + SetAttribute(context event.EventContextWriter, name string, value interface{}) error +} + +// Versions contains all known versions with the same attribute prefix. +type Versions struct { + prefix string + all []Version + m map[string]Version +} + +// Versions returns the list of all known versions, most recent first. +func (vs *Versions) Versions() []Version { return vs.all } + +// Version returns the named version. +func (vs *Versions) Version(name string) Version { + return vs.m[name] +} + +// Latest returns the latest Version +func (vs *Versions) Latest() Version { return vs.all[0] } + +// PrefixedSpecVersionName returns the specversion attribute PrefixedName +func (vs *Versions) PrefixedSpecVersionName() string { return vs.prefix + "specversion" } + +// Prefix is the lowercase attribute name prefix. +func (vs *Versions) Prefix() string { return vs.prefix } + +type attribute struct { + accessor + name string + version Version +} + +func (a *attribute) PrefixedName() string { return a.version.Prefix() + a.name } +func (a *attribute) Name() string { return a.name } +func (a *attribute) Version() Version { return a.version } + +type version struct { + prefix string + context event.EventContext + convert func(event.EventContextConverter) event.EventContext + attrMap map[string]Attribute + attrs []Attribute +} + +func (v *version) Attribute(name string) Attribute { return v.attrMap[strings.ToLower(name)] } +func (v *version) Attributes() []Attribute { return v.attrs } +func (v *version) String() string { return v.context.GetSpecVersion() } +func (v *version) Prefix() string { return v.prefix } +func (v *version) NewContext() event.EventContext { return v.context.Clone() } + +// HasPrefix is a case-insensitive prefix check. +func (v *version) HasPrefix(name string) bool { + return strings.HasPrefix(strings.ToLower(name), v.prefix) +} + +func (v *version) Convert(c event.EventContextConverter) event.EventContext { return v.convert(c) } + +func (v *version) SetAttribute(c event.EventContextWriter, name string, value interface{}) error { + if a := v.Attribute(name); a != nil { // Standard attribute + return a.Set(c, value) + } + name = strings.ToLower(name) + var err error + if v.HasPrefix(name) { // Extension attribute + return c.SetExtension(strings.TrimPrefix(name, v.prefix), value) + } + return err +} + +func (v *version) AttributeFromKind(kind Kind) Attribute { + for _, a := range v.Attributes() { + if a.Kind() == kind { + return a + } + } + return nil +} + +func newVersion( + prefix string, + context event.EventContext, + convert func(event.EventContextConverter) event.EventContext, + attrs ...*attribute, +) *version { + v := &version{ + prefix: strings.ToLower(prefix), + context: context, + convert: convert, + attrMap: map[string]Attribute{}, + attrs: make([]Attribute, len(attrs)), + } + for i, a := range attrs { + a.version = v + v.attrs[i] = a + v.attrMap[strings.ToLower(a.PrefixedName())] = a + } + return v +} + +// WithPrefix returns a set of versions with prefix added to all attribute names. +func WithPrefix(prefix string) *Versions { + attr := func(name string, kind Kind) *attribute { + return &attribute{accessor: acc[kind], name: name} + } + vs := &Versions{ + m: map[string]Version{}, + prefix: prefix, + all: []Version{ + newVersion(prefix, event.EventContextV1{}.AsV1(), + func(c event.EventContextConverter) event.EventContext { return c.AsV1() }, + attr("id", ID), + attr("source", Source), + attr("specversion", SpecVersion), + attr("type", Type), + attr("datacontenttype", DataContentType), + attr("dataschema", DataSchema), + attr("subject", Subject), + attr("time", Time), + ), + newVersion(prefix, event.EventContextV03{}.AsV03(), + func(c event.EventContextConverter) event.EventContext { return c.AsV03() }, + attr("specversion", SpecVersion), + attr("type", Type), + attr("source", Source), + attr("schemaurl", DataSchema), + attr("subject", Subject), + attr("id", ID), + attr("time", Time), + attr("datacontenttype", DataContentType), + ), + }, + } + for _, v := range vs.all { + vs.m[v.String()] = v + } + return vs +} + +// New returns a set of versions +func New() *Versions { return WithPrefix("") } + +// Built-in un-prefixed versions. +var ( + VS *Versions + V03 Version + V1 Version +) + +func init() { + VS = New() + V03 = VS.Version("0.3") + V1 = VS.Version("1.0") +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/structured_writer.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/structured_writer.go new file mode 100644 index 000000000..60256f2b3 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/structured_writer.go @@ -0,0 +1,22 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package binding + +import ( + "context" + "io" + + "github.com/cloudevents/sdk-go/v2/binding/format" +) + +// StructuredWriter is used to visit a structured Message and generate a new representation. +// +// Protocols that supports structured encoding should implement this interface to implement direct +// structured to structured encoding and event to structured encoding. +type StructuredWriter interface { + // Event receives an io.Reader for the whole event. + SetStructuredEvent(ctx context.Context, format format.Format, event io.Reader) error +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/to_event.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/to_event.go new file mode 100644 index 000000000..d3332c158 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/to_event.go @@ -0,0 +1,153 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package binding + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/cloudevents/sdk-go/v2/binding/format" + "github.com/cloudevents/sdk-go/v2/binding/spec" + "github.com/cloudevents/sdk-go/v2/event" + "github.com/cloudevents/sdk-go/v2/types" +) + +// ErrCannotConvertToEvent is a generic error when a conversion of a Message to an Event fails +var ErrCannotConvertToEvent = errors.New("cannot convert message to event") + +// ErrCannotConvertToEvents is a generic error when a conversion of a Message to a Batched Event fails +var ErrCannotConvertToEvents = errors.New("cannot convert message to batched events") + +// ToEvent translates a Message with a valid Structured or Binary representation to an Event. +// This function returns the Event generated from the Message and the original encoding of the message or +// an error that points the conversion error. +// transformers can be nil and this function guarantees that they are invoked only once during the encoding process. +func ToEvent(ctx context.Context, message MessageReader, transformers ...Transformer) (*event.Event, error) { + if message == nil { + return nil, nil + } + + messageEncoding := message.ReadEncoding() + if messageEncoding == EncodingEvent { + m := message + for m != nil { + switch mt := m.(type) { + case *EventMessage: + e := (*event.Event)(mt) + return e, Transformers(transformers).Transform(mt, (*messageToEventBuilder)(e)) + case MessageWrapper: + m = mt.GetWrappedMessage() + default: + break + } + } + return nil, ErrCannotConvertToEvent + } + + e := event.New() + encoder := (*messageToEventBuilder)(&e) + _, err := DirectWrite( + context.Background(), + message, + encoder, + encoder, + ) + if err != nil { + return nil, err + } + return &e, Transformers(transformers).Transform((*EventMessage)(&e), encoder) +} + +// ToEvents translates a Batch Message and corresponding Reader data to a slice of Events. +// This function returns the Events generated from the body data, or an error that points +// to the conversion issue. +func ToEvents(ctx context.Context, message MessageReader, body io.Reader) ([]event.Event, error) { + messageEncoding := message.ReadEncoding() + if messageEncoding != EncodingBatch { + return nil, ErrCannotConvertToEvents + } + + // Since Format doesn't support batch Marshalling, and we know it's structured batch json, we'll go direct to the + // json.UnMarshall(), since that is the best way to support batch operations for now. + var events []event.Event + return events, json.NewDecoder(body).Decode(&events) +} + +type messageToEventBuilder event.Event + +var _ StructuredWriter = (*messageToEventBuilder)(nil) +var _ BinaryWriter = (*messageToEventBuilder)(nil) + +func (b *messageToEventBuilder) SetStructuredEvent(ctx context.Context, format format.Format, ev io.Reader) error { + var buf bytes.Buffer + _, err := io.Copy(&buf, ev) + if err != nil { + return err + } + return format.Unmarshal(buf.Bytes(), (*event.Event)(b)) +} + +func (b *messageToEventBuilder) Start(ctx context.Context) error { + return nil +} + +func (b *messageToEventBuilder) End(ctx context.Context) error { + return nil +} + +func (b *messageToEventBuilder) SetData(data io.Reader) error { + buf, ok := data.(*bytes.Buffer) + if !ok { + buf = new(bytes.Buffer) + _, err := io.Copy(buf, data) + if err != nil { + return err + } + } + if buf.Len() > 0 { + b.DataEncoded = buf.Bytes() + } + return nil +} + +func (b *messageToEventBuilder) SetAttribute(attribute spec.Attribute, value interface{}) error { + if value == nil { + _ = attribute.Delete(b.Context) + return nil + } + // If spec version we need to change to right context struct + if attribute.Kind() == spec.SpecVersion { + str, err := types.ToString(value) + if err != nil { + return err + } + switch str { + case event.CloudEventsVersionV03: + b.Context = b.Context.AsV03() + case event.CloudEventsVersionV1: + b.Context = b.Context.AsV1() + default: + return fmt.Errorf("unrecognized event version %s", str) + } + return nil + } + return attribute.Set(b.Context, value) +} + +func (b *messageToEventBuilder) SetExtension(name string, value interface{}) error { + if value == nil { + return b.Context.SetExtension(name, nil) + } + value, err := types.Validate(value) + if err != nil { + return err + } + return b.Context.SetExtension(name, value) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/transformer.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/transformer.go new file mode 100644 index 000000000..de3bec44f --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/transformer.go @@ -0,0 +1,42 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package binding + +// Transformer is an interface that implements a transformation +// process while transferring the event from the Message +// implementation to the provided encoder +// +// When a write function (binding.Write, binding.ToEvent, buffering.CopyMessage, etc.) +// takes Transformer(s) as parameter, it eventually converts the message to a form +// which correctly implements MessageMetadataReader, in order to guarantee that transformation +// is applied +type Transformer interface { + Transform(MessageMetadataReader, MessageMetadataWriter) error +} + +// TransformerFunc is a type alias to implement a Transformer through a function pointer +type TransformerFunc func(MessageMetadataReader, MessageMetadataWriter) error + +func (t TransformerFunc) Transform(r MessageMetadataReader, w MessageMetadataWriter) error { + return t(r, w) +} + +var _ Transformer = (TransformerFunc)(nil) + +// Transformers is a utility alias to run several Transformer +type Transformers []Transformer + +func (t Transformers) Transform(r MessageMetadataReader, w MessageMetadataWriter) error { + for _, transformer := range t { + err := transformer.Transform(r, w) + if err != nil { + return err + } + } + return nil +} + +var _ Transformer = (Transformers)(nil) diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/write.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/write.go new file mode 100644 index 000000000..cb498e62d --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/write.go @@ -0,0 +1,179 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package binding + +import ( + "context" + + "github.com/cloudevents/sdk-go/v2/event" +) + +type eventEncodingKey int + +const ( + skipDirectStructuredEncoding eventEncodingKey = iota + skipDirectBinaryEncoding + preferredEventEncoding +) + +// DirectWrite invokes the encoders. structuredWriter and binaryWriter could be nil if the protocol doesn't support it. +// transformers can be nil and this function guarantees that they are invoked only once during the encoding process. +// This function MUST be invoked only if message.ReadEncoding() == EncodingBinary or message.ReadEncoding() == EncodingStructured +// +// Returns: +// * EncodingStructured, nil if message is correctly encoded in structured encoding +// * EncodingBinary, nil if message is correctly encoded in binary encoding +// * EncodingStructured, err if message was structured but error happened during the encoding +// * EncodingBinary, err if message was binary but error happened during the encoding +// * EncodingUnknown, ErrUnknownEncoding if message is not a structured or a binary Message +func DirectWrite( + ctx context.Context, + message MessageReader, + structuredWriter StructuredWriter, + binaryWriter BinaryWriter, + transformers ...Transformer, +) (Encoding, error) { + if structuredWriter != nil && len(transformers) == 0 && !GetOrDefaultFromCtx(ctx, skipDirectStructuredEncoding, false).(bool) { + if err := message.ReadStructured(ctx, structuredWriter); err == nil { + return EncodingStructured, nil + } else if err != ErrNotStructured { + return EncodingStructured, err + } + } + + if binaryWriter != nil && !GetOrDefaultFromCtx(ctx, skipDirectBinaryEncoding, false).(bool) && message.ReadEncoding() == EncodingBinary { + return EncodingBinary, writeBinaryWithTransformer(ctx, message, binaryWriter, transformers) + } + + return EncodingUnknown, ErrUnknownEncoding +} + +// Write executes the full algorithm to encode a Message using transformers: +// 1. It first tries direct encoding using DirectWrite +// 2. If no direct encoding is possible, it uses ToEvent to generate an Event representation +// 3. From the Event, the message is encoded back to the provided structured or binary encoders +// You can tweak the encoding process using the context decorators WithForceStructured, WithForceStructured, etc. +// transformers can be nil and this function guarantees that they are invoked only once during the encoding process. +// Returns: +// * EncodingStructured, nil if message is correctly encoded in structured encoding +// * EncodingBinary, nil if message is correctly encoded in binary encoding +// * EncodingUnknown, ErrUnknownEncoding if message.ReadEncoding() == EncodingUnknown +// * _, err if error happened during the encoding +func Write( + ctx context.Context, + message MessageReader, + structuredWriter StructuredWriter, + binaryWriter BinaryWriter, + transformers ...Transformer, +) (Encoding, error) { + enc := message.ReadEncoding() + var err error + // Skip direct encoding if the event is an event message + if enc != EncodingEvent { + enc, err = DirectWrite(ctx, message, structuredWriter, binaryWriter, transformers...) + if enc != EncodingUnknown { + // Message directly encoded, nothing else to do here + return enc, err + } + } + + var e *event.Event + e, err = ToEvent(ctx, message, transformers...) + if err != nil { + return enc, err + } + + message = (*EventMessage)(e) + + if GetOrDefaultFromCtx(ctx, preferredEventEncoding, EncodingBinary).(Encoding) == EncodingStructured { + if structuredWriter != nil { + return EncodingStructured, message.ReadStructured(ctx, structuredWriter) + } + if binaryWriter != nil { + return EncodingBinary, writeBinary(ctx, message, binaryWriter) + } + } else { + if binaryWriter != nil { + return EncodingBinary, writeBinary(ctx, message, binaryWriter) + } + if structuredWriter != nil { + return EncodingStructured, message.ReadStructured(ctx, structuredWriter) + } + } + + return EncodingUnknown, ErrUnknownEncoding +} + +// WithSkipDirectStructuredEncoding skips direct structured to structured encoding during the encoding process +func WithSkipDirectStructuredEncoding(ctx context.Context, skip bool) context.Context { + return context.WithValue(ctx, skipDirectStructuredEncoding, skip) +} + +// WithSkipDirectBinaryEncoding skips direct binary to binary encoding during the encoding process +func WithSkipDirectBinaryEncoding(ctx context.Context, skip bool) context.Context { + return context.WithValue(ctx, skipDirectBinaryEncoding, skip) +} + +// WithPreferredEventEncoding defines the preferred encoding from event to message during the encoding process +func WithPreferredEventEncoding(ctx context.Context, enc Encoding) context.Context { + return context.WithValue(ctx, preferredEventEncoding, enc) +} + +// WithForceStructured forces structured encoding during the encoding process +func WithForceStructured(ctx context.Context) context.Context { + return context.WithValue(context.WithValue(ctx, preferredEventEncoding, EncodingStructured), skipDirectBinaryEncoding, true) +} + +// WithForceBinary forces binary encoding during the encoding process +func WithForceBinary(ctx context.Context) context.Context { + return context.WithValue(context.WithValue(ctx, preferredEventEncoding, EncodingBinary), skipDirectStructuredEncoding, true) +} + +// GetOrDefaultFromCtx gets a configuration value from the provided context +func GetOrDefaultFromCtx(ctx context.Context, key interface{}, def interface{}) interface{} { + if val := ctx.Value(key); val != nil { + return val + } else { + return def + } +} + +func writeBinaryWithTransformer( + ctx context.Context, + message MessageReader, + binaryWriter BinaryWriter, + transformers Transformers, +) error { + err := binaryWriter.Start(ctx) + if err != nil { + return err + } + err = message.ReadBinary(ctx, binaryWriter) + if err != nil { + return err + } + err = transformers.Transform(message.(MessageMetadataReader), binaryWriter) + if err != nil { + return err + } + return binaryWriter.End(ctx) +} + +func writeBinary( + ctx context.Context, + message MessageReader, + binaryWriter BinaryWriter, +) error { + err := binaryWriter.Start(ctx) + if err != nil { + return err + } + err = message.ReadBinary(ctx, binaryWriter) + if err != nil { + return err + } + return binaryWriter.End(ctx) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/client.go b/vendor/github.com/cloudevents/sdk-go/v2/client/client.go new file mode 100644 index 000000000..ea8fbfbb4 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/client.go @@ -0,0 +1,288 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "context" + "errors" + "fmt" + "io" + "runtime" + "sync" + + "go.uber.org/zap" + + "github.com/cloudevents/sdk-go/v2/binding" + cecontext "github.com/cloudevents/sdk-go/v2/context" + "github.com/cloudevents/sdk-go/v2/event" + "github.com/cloudevents/sdk-go/v2/protocol" +) + +// Client interface defines the runtime contract the CloudEvents client supports. +type Client interface { + // Send will transmit the given event over the client's configured transport. + Send(ctx context.Context, event event.Event) protocol.Result + + // Request will transmit the given event over the client's configured + // transport and return any response event. + Request(ctx context.Context, event event.Event) (*event.Event, protocol.Result) + + // StartReceiver will register the provided function for callback on receipt + // of a cloudevent. It will also start the underlying protocol as it has + // been configured. + // This call is blocking. + // Valid fn signatures are: + // * func() + // * func() error + // * func(context.Context) + // * func(context.Context) protocol.Result + // * func(event.Event) + // * func(event.Event) protocol.Result + // * func(context.Context, event.Event) + // * func(context.Context, event.Event) protocol.Result + // * func(event.Event) *event.Event + // * func(event.Event) (*event.Event, protocol.Result) + // * func(context.Context, event.Event) *event.Event + // * func(context.Context, event.Event) (*event.Event, protocol.Result) + StartReceiver(ctx context.Context, fn interface{}) error +} + +// New produces a new client with the provided transport object and applied +// client options. +func New(obj interface{}, opts ...Option) (Client, error) { + c := &ceClient{ + // Running runtime.GOMAXPROCS(0) doesn't update the value, just returns the current one + pollGoroutines: runtime.GOMAXPROCS(0), + observabilityService: noopObservabilityService{}, + } + + if p, ok := obj.(protocol.Sender); ok { + c.sender = p + } + if p, ok := obj.(protocol.Requester); ok { + c.requester = p + } + if p, ok := obj.(protocol.Responder); ok { + c.responder = p + } + if p, ok := obj.(protocol.Receiver); ok { + c.receiver = p + } + if p, ok := obj.(protocol.Opener); ok { + c.opener = p + } + + if err := c.applyOptions(opts...); err != nil { + return nil, err + } + return c, nil +} + +type ceClient struct { + sender protocol.Sender + requester protocol.Requester + receiver protocol.Receiver + responder protocol.Responder + // Optional. + opener protocol.Opener + + observabilityService ObservabilityService + + inboundContextDecorators []func(context.Context, binding.Message) context.Context + outboundContextDecorators []func(context.Context) context.Context + invoker Invoker + receiverMu sync.Mutex + eventDefaulterFns []EventDefaulter + pollGoroutines int + blockingCallback bool +} + +func (c *ceClient) applyOptions(opts ...Option) error { + for _, fn := range opts { + if err := fn(c); err != nil { + return err + } + } + return nil +} + +func (c *ceClient) Send(ctx context.Context, e event.Event) protocol.Result { + var err error + if c.sender == nil { + err = errors.New("sender not set") + return err + } + + for _, f := range c.outboundContextDecorators { + ctx = f(ctx) + } + + if len(c.eventDefaulterFns) > 0 { + for _, fn := range c.eventDefaulterFns { + e = fn(ctx, e) + } + } + if err = e.Validate(); err != nil { + return err + } + + // Event has been defaulted and validated, record we are going to perform send. + ctx, cb := c.observabilityService.RecordSendingEvent(ctx, e) + err = c.sender.Send(ctx, (*binding.EventMessage)(&e)) + defer cb(err) + return err +} + +func (c *ceClient) Request(ctx context.Context, e event.Event) (*event.Event, protocol.Result) { + var resp *event.Event + var err error + + if c.requester == nil { + err = errors.New("requester not set") + return nil, err + } + for _, f := range c.outboundContextDecorators { + ctx = f(ctx) + } + + if len(c.eventDefaulterFns) > 0 { + for _, fn := range c.eventDefaulterFns { + e = fn(ctx, e) + } + } + + if err = e.Validate(); err != nil { + return nil, err + } + + // Event has been defaulted and validated, record we are going to perform request. + ctx, cb := c.observabilityService.RecordRequestEvent(ctx, e) + + // If provided a requester, use it to do request/response. + var msg binding.Message + msg, err = c.requester.Request(ctx, (*binding.EventMessage)(&e)) + if msg != nil { + defer func() { + if err := msg.Finish(err); err != nil { + cecontext.LoggerFrom(ctx).Warnw("failed calling message.Finish", zap.Error(err)) + } + }() + } + if protocol.IsUndelivered(err) { + return nil, err + } + + // try to turn msg into an event, it might not work and that is ok. + if rs, rserr := binding.ToEvent(ctx, msg); rserr != nil { + cecontext.LoggerFrom(ctx).Debugw("response: failed calling ToEvent", zap.Error(rserr), zap.Any("resp", msg)) + // If the protocol returns no error, it is an ACK on the request, but we had + // issues turning the response into an event, so make an ACK Result and pass + // down the ToEvent error as well. + err = protocol.NewReceipt(true, "failed to convert response into event: %v\n%w", rserr, err) + } else { + resp = rs + } + defer cb(err, resp) + return resp, err +} + +// StartReceiver sets up the given fn to handle Receive. +// See Client.StartReceiver for details. This is a blocking call. +func (c *ceClient) StartReceiver(ctx context.Context, fn interface{}) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + c.receiverMu.Lock() + defer c.receiverMu.Unlock() + + if c.invoker != nil { + return fmt.Errorf("client already has a receiver") + } + + invoker, err := newReceiveInvoker(fn, c.observabilityService, c.inboundContextDecorators, c.eventDefaulterFns...) + if err != nil { + return err + } + if invoker.IsReceiver() && c.receiver == nil { + return fmt.Errorf("mismatched receiver callback without protocol.Receiver supported by protocol") + } + if invoker.IsResponder() && c.responder == nil { + return fmt.Errorf("mismatched receiver callback without protocol.Responder supported by protocol") + } + c.invoker = invoker + + if c.responder == nil && c.receiver == nil { + return errors.New("responder nor receiver set") + } + + defer func() { + c.invoker = nil + }() + + // Start Polling. + wg := sync.WaitGroup{} + for i := 0; i < c.pollGoroutines; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for { + var msg binding.Message + var respFn protocol.ResponseFn + var err error + + if c.responder != nil { + msg, respFn, err = c.responder.Respond(ctx) + } else if c.receiver != nil { + msg, err = c.receiver.Receive(ctx) + respFn = noRespFn + } + + if err == io.EOF { // Normal close + return + } + + if err != nil { + cecontext.LoggerFrom(ctx).Warn("Error while receiving a message: ", err) + continue + } + + callback := func() { + if err := c.invoker.Invoke(ctx, msg, respFn); err != nil { + cecontext.LoggerFrom(ctx).Warn("Error while handling a message: ", err) + } + } + + if c.blockingCallback { + callback() + } else { + // Do not block on the invoker. + wg.Add(1) + go func() { + defer wg.Done() + callback() + }() + } + } + }() + } + + // Start the opener, if set. + if c.opener != nil { + if err = c.opener.OpenInbound(ctx); err != nil { + err = fmt.Errorf("error while opening the inbound connection: %w", err) + cancel() + } + } + + wg.Wait() + + return err +} + +// noRespFn is used to simply forward the protocol.Result for receivers that aren't responders +func noRespFn(_ context.Context, _ binding.Message, r protocol.Result, _ ...binding.Transformer) error { + return r +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/client_http.go b/vendor/github.com/cloudevents/sdk-go/v2/client/client_http.go new file mode 100644 index 000000000..d48cc2042 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/client_http.go @@ -0,0 +1,35 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "github.com/cloudevents/sdk-go/v2/protocol/http" +) + +// NewHTTP provides the good defaults for the common case using an HTTP +// Protocol client. +// The WithTimeNow, and WithUUIDs client options are also applied to the +// client, all outbound events will have a time and id set if not already +// present. +func NewHTTP(opts ...http.Option) (Client, error) { + p, err := http.New(opts...) + if err != nil { + return nil, err + } + + c, err := New(p, WithTimeNow(), WithUUIDs()) + if err != nil { + return nil, err + } + + return c, nil +} + +// NewDefault has been replaced by NewHTTP +// Deprecated. To get the same as NewDefault provided, please use NewHTTP with +// the observability service passed as an option, or client.NewClientHTTP from +// package github.com/cloudevents/sdk-go/observability/opencensus/v2/client +var NewDefault = NewHTTP diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/client_observed.go b/vendor/github.com/cloudevents/sdk-go/v2/client/client_observed.go new file mode 100644 index 000000000..82985b8a7 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/client_observed.go @@ -0,0 +1,12 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +// NewObserved produces a new client with the provided transport object and applied +// client options. +// Deprecated: This now has the same behaviour of New, and will be removed in future releases. +// As New, you must provide the observability service to use. +var NewObserved = New diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/defaulters.go b/vendor/github.com/cloudevents/sdk-go/v2/client/defaulters.go new file mode 100644 index 000000000..7bfebf35c --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/defaulters.go @@ -0,0 +1,57 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "context" + "time" + + "github.com/cloudevents/sdk-go/v2/event" + + "github.com/google/uuid" +) + +// EventDefaulter is the function signature for extensions that are able +// to perform event defaulting. +type EventDefaulter func(ctx context.Context, event event.Event) event.Event + +// DefaultIDToUUIDIfNotSet will inspect the provided event and assign a UUID to +// context.ID if it is found to be empty. +func DefaultIDToUUIDIfNotSet(ctx context.Context, event event.Event) event.Event { + if event.Context != nil { + if event.ID() == "" { + event.Context = event.Context.Clone() + event.SetID(uuid.New().String()) + } + } + return event +} + +// DefaultTimeToNowIfNotSet will inspect the provided event and assign a new +// Timestamp to context.Time if it is found to be nil or zero. +func DefaultTimeToNowIfNotSet(ctx context.Context, event event.Event) event.Event { + if event.Context != nil { + if event.Time().IsZero() { + event.Context = event.Context.Clone() + event.SetTime(time.Now()) + } + } + return event +} + +// NewDefaultDataContentTypeIfNotSet returns a defaulter that will inspect the +// provided event and set the provided content type if content type is found +// to be empty. +func NewDefaultDataContentTypeIfNotSet(contentType string) EventDefaulter { + return func(ctx context.Context, event event.Event) event.Event { + if event.Context != nil { + if event.DataContentType() == "" { + event.SetDataContentType(contentType) + } + } + return event + } +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/client/doc.go new file mode 100644 index 000000000..e09962ce6 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/doc.go @@ -0,0 +1,11 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package client holds the recommended entry points for interacting with the CloudEvents Golang SDK. The client wraps +a selected transport. The client adds validation and defaulting for sending events, and flexible receiver method +registration. For full details, read the `client.Client` documentation. +*/ +package client diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/http_receiver.go b/vendor/github.com/cloudevents/sdk-go/v2/client/http_receiver.go new file mode 100644 index 000000000..94a4b4e65 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/http_receiver.go @@ -0,0 +1,45 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "context" + cecontext "github.com/cloudevents/sdk-go/v2/context" + thttp "github.com/cloudevents/sdk-go/v2/protocol/http" + "go.uber.org/zap" + "net/http" +) + +func NewHTTPReceiveHandler(ctx context.Context, p *thttp.Protocol, fn interface{}) (*EventReceiver, error) { + invoker, err := newReceiveInvoker(fn, noopObservabilityService{}, nil) //TODO(slinkydeveloper) maybe not nil? + if err != nil { + return nil, err + } + + return &EventReceiver{ + p: p, + invoker: invoker, + }, nil +} + +type EventReceiver struct { + p *thttp.Protocol + invoker Invoker +} + +func (r *EventReceiver) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + // Prepare to handle the message if there's one (context cancellation will ensure this closes) + go func() { + ctx := req.Context() + msg, respFn, err := r.p.Respond(ctx) + if err != nil { + cecontext.LoggerFrom(context.TODO()).Debugw("failed to call Respond", zap.Error(err)) + } else if err := r.invoker.Invoke(ctx, msg, respFn); err != nil { + cecontext.LoggerFrom(context.TODO()).Debugw("failed to call Invoke", zap.Error(err)) + } + }() + r.p.ServeHTTP(rw, req) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/invoker.go b/vendor/github.com/cloudevents/sdk-go/v2/client/invoker.go new file mode 100644 index 000000000..403fb0f55 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/invoker.go @@ -0,0 +1,137 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "context" + "fmt" + + "github.com/cloudevents/sdk-go/v2/binding" + cecontext "github.com/cloudevents/sdk-go/v2/context" + "github.com/cloudevents/sdk-go/v2/event" + "github.com/cloudevents/sdk-go/v2/protocol" +) + +type Invoker interface { + Invoke(context.Context, binding.Message, protocol.ResponseFn) error + IsReceiver() bool + IsResponder() bool +} + +var _ Invoker = (*receiveInvoker)(nil) + +func newReceiveInvoker(fn interface{}, observabilityService ObservabilityService, inboundContextDecorators []func(context.Context, binding.Message) context.Context, fns ...EventDefaulter) (Invoker, error) { + r := &receiveInvoker{ + eventDefaulterFns: fns, + observabilityService: observabilityService, + inboundContextDecorators: inboundContextDecorators, + } + + if fn, err := receiver(fn); err != nil { + return nil, err + } else { + r.fn = fn + } + + return r, nil +} + +type receiveInvoker struct { + fn *receiverFn + observabilityService ObservabilityService + eventDefaulterFns []EventDefaulter + inboundContextDecorators []func(context.Context, binding.Message) context.Context +} + +func (r *receiveInvoker) Invoke(ctx context.Context, m binding.Message, respFn protocol.ResponseFn) (err error) { + defer func() { + err = m.Finish(err) + }() + + var respMsg binding.Message + var result protocol.Result + + e, eventErr := binding.ToEvent(ctx, m) + switch { + case eventErr != nil && r.fn.hasEventIn: + r.observabilityService.RecordReceivedMalformedEvent(ctx, eventErr) + return respFn(ctx, nil, protocol.NewReceipt(false, "failed to convert Message to Event: %w", eventErr)) + case r.fn != nil: + // Check if event is valid before invoking the receiver function + if e != nil { + if validationErr := e.Validate(); validationErr != nil { + r.observabilityService.RecordReceivedMalformedEvent(ctx, validationErr) + return respFn(ctx, nil, protocol.NewReceipt(false, "validation error in incoming event: %w", validationErr)) + } + } + + // Let's invoke the receiver fn + var resp *event.Event + resp, result = func() (resp *event.Event, result protocol.Result) { + defer func() { + if r := recover(); r != nil { + result = fmt.Errorf("call to Invoker.Invoke(...) has panicked: %v", r) + cecontext.LoggerFrom(ctx).Error(result) + } + }() + ctx = computeInboundContext(m, ctx, r.inboundContextDecorators) + + var cb func(error) + ctx, cb = r.observabilityService.RecordCallingInvoker(ctx, e) + + resp, result = r.fn.invoke(ctx, e) + defer cb(result) + return + }() + + if respFn == nil { + break + } + + // Apply the defaulter chain to the outgoing event. + if resp != nil && len(r.eventDefaulterFns) > 0 { + for _, fn := range r.eventDefaulterFns { + *resp = fn(ctx, *resp) + } + // Validate the event conforms to the CloudEvents Spec. + if vErr := resp.Validate(); vErr != nil { + cecontext.LoggerFrom(ctx).Errorf("cloudevent validation failed on response event: %v", vErr) + } + } + + // because binding.Message is an interface, casting a nil resp + // here would make future comparisons to nil false + if resp != nil { + respMsg = (*binding.EventMessage)(resp) + } + } + + if respFn == nil { + // let the protocol ACK based on the result + return result + } + + return respFn(ctx, respMsg, result) +} + +func (r *receiveInvoker) IsReceiver() bool { + return !r.fn.hasEventOut +} + +func (r *receiveInvoker) IsResponder() bool { + return r.fn.hasEventOut +} + +func computeInboundContext(message binding.Message, fallback context.Context, inboundContextDecorators []func(context.Context, binding.Message) context.Context) context.Context { + result := fallback + if mctx, ok := message.(binding.MessageContext); ok { + result = cecontext.ValuesDelegating(mctx.Context(), fallback) + } + for _, f := range inboundContextDecorators { + result = f(result, message) + } + return result +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/observability.go b/vendor/github.com/cloudevents/sdk-go/v2/client/observability.go new file mode 100644 index 000000000..75005d3bb --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/observability.go @@ -0,0 +1,54 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "context" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/event" +) + +// ObservabilityService is an interface users can implement to record metrics, create tracing spans, and plug other observability tools in the Client +type ObservabilityService interface { + // InboundContextDecorators is a method that returns the InboundContextDecorators that must be mounted in the Client to properly propagate some tracing informations. + InboundContextDecorators() []func(context.Context, binding.Message) context.Context + + // RecordReceivedMalformedEvent is invoked when an event was received but it's malformed or invalid. + RecordReceivedMalformedEvent(ctx context.Context, err error) + // RecordCallingInvoker is invoked before the user function is invoked. + // The returned callback will be invoked after the user finishes to process the event with the eventual processing error + // The error provided to the callback could be both a processing error, or a result + RecordCallingInvoker(ctx context.Context, event *event.Event) (context.Context, func(errOrResult error)) + // RecordSendingEvent is invoked before the event is sent. + // The returned callback will be invoked when the response is received + // The error provided to the callback could be both a processing error, or a result + RecordSendingEvent(ctx context.Context, event event.Event) (context.Context, func(errOrResult error)) + + // RecordRequestEvent is invoked before the event is requested. + // The returned callback will be invoked when the response is received + RecordRequestEvent(ctx context.Context, event event.Event) (context.Context, func(errOrResult error, event *event.Event)) +} + +type noopObservabilityService struct{} + +func (n noopObservabilityService) InboundContextDecorators() []func(context.Context, binding.Message) context.Context { + return nil +} + +func (n noopObservabilityService) RecordReceivedMalformedEvent(ctx context.Context, err error) {} + +func (n noopObservabilityService) RecordCallingInvoker(ctx context.Context, event *event.Event) (context.Context, func(errOrResult error)) { + return ctx, func(errOrResult error) {} +} + +func (n noopObservabilityService) RecordSendingEvent(ctx context.Context, event event.Event) (context.Context, func(errOrResult error)) { + return ctx, func(errOrResult error) {} +} + +func (n noopObservabilityService) RecordRequestEvent(ctx context.Context, e event.Event) (context.Context, func(errOrResult error, event *event.Event)) { + return ctx, func(errOrResult error, event *event.Event) {} +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/options.go b/vendor/github.com/cloudevents/sdk-go/v2/client/options.go new file mode 100644 index 000000000..938478162 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/options.go @@ -0,0 +1,128 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "context" + "fmt" + + "github.com/cloudevents/sdk-go/v2/binding" +) + +// Option is the function signature required to be considered an client.Option. +type Option func(interface{}) error + +// WithEventDefaulter adds an event defaulter to the end of the defaulter chain. +func WithEventDefaulter(fn EventDefaulter) Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + if fn == nil { + return fmt.Errorf("client option was given an nil event defaulter") + } + c.eventDefaulterFns = append(c.eventDefaulterFns, fn) + } + return nil + } +} + +func WithForceBinary() Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.outboundContextDecorators = append(c.outboundContextDecorators, binding.WithForceBinary) + } + return nil + } +} + +func WithForceStructured() Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.outboundContextDecorators = append(c.outboundContextDecorators, binding.WithForceStructured) + } + return nil + } +} + +// WithUUIDs adds DefaultIDToUUIDIfNotSet event defaulter to the end of the +// defaulter chain. +func WithUUIDs() Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.eventDefaulterFns = append(c.eventDefaulterFns, DefaultIDToUUIDIfNotSet) + } + return nil + } +} + +// WithTimeNow adds DefaultTimeToNowIfNotSet event defaulter to the end of the +// defaulter chain. +func WithTimeNow() Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.eventDefaulterFns = append(c.eventDefaulterFns, DefaultTimeToNowIfNotSet) + } + return nil + } +} + +// WithTracePropagation enables trace propagation via the distributed tracing +// extension. +// Deprecated: this is now noop and will be removed in future releases. +// Don't use distributed tracing extension to propagate traces: +// https://github.com/cloudevents/spec/blob/v1.0.1/extensions/distributed-tracing.md#using-the-distributed-tracing-extension +func WithTracePropagation() Option { + return func(i interface{}) error { + return nil + } +} + +// WithPollGoroutines configures how much goroutines should be used to +// poll the Receiver/Responder/Protocol implementations. +// Default value is GOMAXPROCS +func WithPollGoroutines(pollGoroutines int) Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.pollGoroutines = pollGoroutines + } + return nil + } +} + +// WithObservabilityService configures the observability service to use +// to record traces and metrics +func WithObservabilityService(service ObservabilityService) Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.observabilityService = service + c.inboundContextDecorators = append(c.inboundContextDecorators, service.InboundContextDecorators()...) + } + return nil + } +} + +// WithInboundContextDecorator configures a new inbound context decorator. +// Inbound context decorators are invoked to wrap additional informations from the binding.Message +// and propagate these informations in the context passed to the event receiver. +func WithInboundContextDecorator(dec func(context.Context, binding.Message) context.Context) Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.inboundContextDecorators = append(c.inboundContextDecorators, dec) + } + return nil + } +} + +// WithBlockingCallback makes the callback passed into StartReceiver is executed as a blocking call, +// i.e. in each poll go routine, the next event will not be received until the callback on current event completes. +// To make event processing serialized (no concurrency), use this option along with WithPollGoroutines(1) +func WithBlockingCallback() Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.blockingCallback = true + } + return nil + } +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/receiver.go b/vendor/github.com/cloudevents/sdk-go/v2/client/receiver.go new file mode 100644 index 000000000..b1ab532d7 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/receiver.go @@ -0,0 +1,194 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "context" + "errors" + "fmt" + "reflect" + + "github.com/cloudevents/sdk-go/v2/event" + "github.com/cloudevents/sdk-go/v2/protocol" +) + +// ReceiveFull is the signature of a fn to be invoked for incoming cloudevents. +type ReceiveFull func(context.Context, event.Event) protocol.Result + +type receiverFn struct { + numIn int + numOut int + fnValue reflect.Value + + hasContextIn bool + hasEventIn bool + + hasEventOut bool + hasResultOut bool +} + +const ( + inParamUsage = "expected a function taking either no parameters, one or more of (context.Context, event.Event) ordered" + outParamUsage = "expected a function returning one or mode of (*event.Event, protocol.Result) ordered" +) + +var ( + contextType = reflect.TypeOf((*context.Context)(nil)).Elem() + eventType = reflect.TypeOf((*event.Event)(nil)).Elem() + eventPtrType = reflect.TypeOf((*event.Event)(nil)) // want the ptr type + resultType = reflect.TypeOf((*protocol.Result)(nil)).Elem() +) + +// receiver creates a receiverFn wrapper class that is used by the client to +// validate and invoke the provided function. +// Valid fn signatures are: +// * func() +// * func() protocol.Result +// * func(context.Context) +// * func(context.Context) protocol.Result +// * func(event.Event) +// * func(event.Event) transport.Result +// * func(context.Context, event.Event) +// * func(context.Context, event.Event) protocol.Result +// * func(event.Event) *event.Event +// * func(event.Event) (*event.Event, protocol.Result) +// * func(context.Context, event.Event) *event.Event +// * func(context.Context, event.Event) (*event.Event, protocol.Result) +// +func receiver(fn interface{}) (*receiverFn, error) { + fnType := reflect.TypeOf(fn) + if fnType.Kind() != reflect.Func { + return nil, errors.New("must pass a function to handle events") + } + + r := &receiverFn{ + fnValue: reflect.ValueOf(fn), + numIn: fnType.NumIn(), + numOut: fnType.NumOut(), + } + + if err := r.validate(fnType); err != nil { + return nil, err + } + + return r, nil +} + +func (r *receiverFn) invoke(ctx context.Context, e *event.Event) (*event.Event, protocol.Result) { + args := make([]reflect.Value, 0, r.numIn) + + if r.numIn > 0 { + if r.hasContextIn { + args = append(args, reflect.ValueOf(ctx)) + } + if r.hasEventIn { + args = append(args, reflect.ValueOf(*e)) + } + } + v := r.fnValue.Call(args) + var respOut protocol.Result + var eOut *event.Event + if r.numOut > 0 { + i := 0 + if r.hasEventOut { + if eo, ok := v[i].Interface().(*event.Event); ok { + eOut = eo + } + i++ // <-- note, need to inc i. + } + if r.hasResultOut { + if resp, ok := v[i].Interface().(protocol.Result); ok { + respOut = resp + } + } + } + return eOut, respOut +} + +// Verifies that the inputs to a function have a valid signature +// Valid input is to be [0, all] of +// context.Context, event.Event in this order. +func (r *receiverFn) validateInParamSignature(fnType reflect.Type) error { + r.hasContextIn = false + r.hasEventIn = false + + switch fnType.NumIn() { + case 2: + // has to be (context.Context, event.Event) + if !eventType.ConvertibleTo(fnType.In(1)) { + return fmt.Errorf("%s; cannot convert parameter 2 to %s from event.Event", inParamUsage, fnType.In(1)) + } else { + r.hasEventIn = true + } + fallthrough + case 1: + if !contextType.ConvertibleTo(fnType.In(0)) { + if !eventType.ConvertibleTo(fnType.In(0)) { + return fmt.Errorf("%s; cannot convert parameter 1 to %s from context.Context or event.Event", inParamUsage, fnType.In(0)) + } else if r.hasEventIn { + return fmt.Errorf("%s; duplicate parameter of type event.Event", inParamUsage) + } else { + r.hasEventIn = true + } + } else { + r.hasContextIn = true + } + fallthrough + case 0: + return nil + + default: + return fmt.Errorf("%s; function has too many parameters (%d)", inParamUsage, fnType.NumIn()) + } +} + +// Verifies that the outputs of a function have a valid signature +// Valid output signatures to be [0, all] of +// *event.Event, transport.Result in this order +func (r *receiverFn) validateOutParamSignature(fnType reflect.Type) error { + r.hasEventOut = false + r.hasResultOut = false + + switch fnType.NumOut() { + case 2: + // has to be (*event.Event, transport.Result) + if !fnType.Out(1).ConvertibleTo(resultType) { + return fmt.Errorf("%s; cannot convert parameter 2 from %s to event.Response", outParamUsage, fnType.Out(1)) + } else { + r.hasResultOut = true + } + fallthrough + case 1: + if !fnType.Out(0).ConvertibleTo(resultType) { + if !fnType.Out(0).ConvertibleTo(eventPtrType) { + return fmt.Errorf("%s; cannot convert parameter 1 from %s to *event.Event or transport.Result", outParamUsage, fnType.Out(0)) + } else { + r.hasEventOut = true + } + } else if r.hasResultOut { + return fmt.Errorf("%s; duplicate parameter of type event.Response", outParamUsage) + } else { + r.hasResultOut = true + } + fallthrough + case 0: + return nil + default: + return fmt.Errorf("%s; function has too many return types (%d)", outParamUsage, fnType.NumOut()) + } +} + +// validateReceiverFn validates that a function has the right number of in and +// out params and that they are of allowed types. +func (r *receiverFn) validate(fnType reflect.Type) error { + if err := r.validateInParamSignature(fnType); err != nil { + return err + } + if err := r.validateOutParamSignature(fnType); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/context/context.go b/vendor/github.com/cloudevents/sdk-go/v2/context/context.go new file mode 100644 index 000000000..fc9ef0315 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/context/context.go @@ -0,0 +1,110 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package context + +import ( + "context" + "net/url" + "time" +) + +// Opaque key type used to store target +type targetKeyType struct{} + +var targetKey = targetKeyType{} + +// WithTarget returns back a new context with the given target. Target is intended to be transport dependent. +// For http transport, `target` should be a full URL and will be injected into the outbound http request. +func WithTarget(ctx context.Context, target string) context.Context { + return context.WithValue(ctx, targetKey, target) +} + +// TargetFrom looks in the given context and returns `target` as a parsed url if found and valid, otherwise nil. +func TargetFrom(ctx context.Context) *url.URL { + c := ctx.Value(targetKey) + if c != nil { + if s, ok := c.(string); ok && s != "" { + if target, err := url.Parse(s); err == nil { + return target + } + } + } + return nil +} + +// Opaque key type used to store topic +type topicKeyType struct{} + +var topicKey = topicKeyType{} + +// WithTopic returns back a new context with the given topic. Topic is intended to be transport dependent. +// For pubsub transport, `topic` should be a Pub/Sub Topic ID. +func WithTopic(ctx context.Context, topic string) context.Context { + return context.WithValue(ctx, topicKey, topic) +} + +// TopicFrom looks in the given context and returns `topic` as a string if found and valid, otherwise "". +func TopicFrom(ctx context.Context) string { + c := ctx.Value(topicKey) + if c != nil { + if s, ok := c.(string); ok { + return s + } + } + return "" +} + +// Opaque key type used to store retry parameters +type retriesKeyType struct{} + +var retriesKey = retriesKeyType{} + +// WithRetriesConstantBackoff returns back a new context with retries parameters using constant backoff strategy. +// MaxTries is the maximum number for retries and delay is the time interval between retries +func WithRetriesConstantBackoff(ctx context.Context, delay time.Duration, maxTries int) context.Context { + return WithRetryParams(ctx, &RetryParams{ + Strategy: BackoffStrategyConstant, + Period: delay, + MaxTries: maxTries, + }) +} + +// WithRetriesLinearBackoff returns back a new context with retries parameters using linear backoff strategy. +// MaxTries is the maximum number for retries and delay*tries is the time interval between retries +func WithRetriesLinearBackoff(ctx context.Context, delay time.Duration, maxTries int) context.Context { + return WithRetryParams(ctx, &RetryParams{ + Strategy: BackoffStrategyLinear, + Period: delay, + MaxTries: maxTries, + }) +} + +// WithRetriesExponentialBackoff returns back a new context with retries parameters using exponential backoff strategy. +// MaxTries is the maximum number for retries and period is the amount of time to wait, used as `period * 2^retries`. +func WithRetriesExponentialBackoff(ctx context.Context, period time.Duration, maxTries int) context.Context { + return WithRetryParams(ctx, &RetryParams{ + Strategy: BackoffStrategyExponential, + Period: period, + MaxTries: maxTries, + }) +} + +// WithRetryParams returns back a new context with retries parameters. +func WithRetryParams(ctx context.Context, rp *RetryParams) context.Context { + return context.WithValue(ctx, retriesKey, rp) +} + +// RetriesFrom looks in the given context and returns the retries parameters if found. +// Otherwise returns the default retries configuration (ie. no retries). +func RetriesFrom(ctx context.Context) *RetryParams { + c := ctx.Value(retriesKey) + if c != nil { + if s, ok := c.(*RetryParams); ok { + return s + } + } + return &DefaultRetryParams +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/context/delegating.go b/vendor/github.com/cloudevents/sdk-go/v2/context/delegating.go new file mode 100644 index 000000000..434a4da7a --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/context/delegating.go @@ -0,0 +1,25 @@ +package context + +import "context" + +type valuesDelegating struct { + context.Context + parent context.Context +} + +// ValuesDelegating wraps a child and parent context. It will perform Value() +// lookups first on the child, and then fall back to the child. All other calls +// go solely to the child context. +func ValuesDelegating(child, parent context.Context) context.Context { + return &valuesDelegating{ + Context: child, + parent: parent, + } +} + +func (c *valuesDelegating) Value(key interface{}) interface{} { + if val := c.Context.Value(key); val != nil { + return val + } + return c.parent.Value(key) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/context/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/context/doc.go new file mode 100644 index 000000000..0b2dcaf70 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/context/doc.go @@ -0,0 +1,10 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package context holds the last resort overrides and fyi objects that can be passed to clients and transports added to +context.Context objects. +*/ +package context diff --git a/vendor/github.com/cloudevents/sdk-go/v2/context/logger.go b/vendor/github.com/cloudevents/sdk-go/v2/context/logger.go new file mode 100644 index 000000000..b3087a79f --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/context/logger.go @@ -0,0 +1,48 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package context + +import ( + "context" + + "go.uber.org/zap" +) + +// Opaque key type used to store logger +type loggerKeyType struct{} + +var loggerKey = loggerKeyType{} + +// fallbackLogger is the logger is used when there is no logger attached to the context. +var fallbackLogger *zap.SugaredLogger + +func init() { + if logger, err := zap.NewProduction(); err != nil { + // We failed to create a fallback logger. + fallbackLogger = zap.NewNop().Sugar() + } else { + fallbackLogger = logger.Named("fallback").Sugar() + } +} + +// WithLogger returns a new context with the logger injected into the given context. +func WithLogger(ctx context.Context, logger *zap.SugaredLogger) context.Context { + if logger == nil { + return context.WithValue(ctx, loggerKey, fallbackLogger) + } + return context.WithValue(ctx, loggerKey, logger) +} + +// LoggerFrom returns the logger stored in context. +func LoggerFrom(ctx context.Context) *zap.SugaredLogger { + l := ctx.Value(loggerKey) + if l != nil { + if logger, ok := l.(*zap.SugaredLogger); ok { + return logger + } + } + return fallbackLogger +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/context/retry.go b/vendor/github.com/cloudevents/sdk-go/v2/context/retry.go new file mode 100644 index 000000000..ec17df72e --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/context/retry.go @@ -0,0 +1,76 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package context + +import ( + "context" + "errors" + "math" + "time" +) + +type BackoffStrategy string + +const ( + BackoffStrategyNone = "none" + BackoffStrategyConstant = "constant" + BackoffStrategyLinear = "linear" + BackoffStrategyExponential = "exponential" +) + +var DefaultRetryParams = RetryParams{Strategy: BackoffStrategyNone} + +// RetryParams holds parameters applied to retries +type RetryParams struct { + // Strategy is the backoff strategy to applies between retries + Strategy BackoffStrategy + + // MaxTries is the maximum number of times to retry request before giving up + MaxTries int + + // Period is + // - for none strategy: no delay + // - for constant strategy: the delay interval between retries + // - for linear strategy: interval between retries = Period * retries + // - for exponential strategy: interval between retries = Period * retries^2 + Period time.Duration +} + +// BackoffFor tries will return the time duration that should be used for this +// current try count. +// `tries` is assumed to be the number of times the caller has already retried. +func (r *RetryParams) BackoffFor(tries int) time.Duration { + switch r.Strategy { + case BackoffStrategyConstant: + return r.Period + case BackoffStrategyLinear: + return r.Period * time.Duration(tries) + case BackoffStrategyExponential: + exp := math.Exp2(float64(tries)) + return r.Period * time.Duration(exp) + case BackoffStrategyNone: + fallthrough // default + default: + return r.Period + } +} + +// Backoff is a blocking call to wait for the correct amount of time for the retry. +// `tries` is assumed to be the number of times the caller has already retried. +func (r *RetryParams) Backoff(ctx context.Context, tries int) error { + if tries > r.MaxTries { + return errors.New("too many retries") + } + ticker := time.NewTicker(r.BackoffFor(tries)) + select { + case <-ctx.Done(): + ticker.Stop() + return errors.New("context has been cancelled") + case <-ticker.C: + ticker.Stop() + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/content_type.go b/vendor/github.com/cloudevents/sdk-go/v2/event/content_type.go new file mode 100644 index 000000000..a49522f82 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/content_type.go @@ -0,0 +1,47 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +const ( + TextPlain = "text/plain" + TextJSON = "text/json" + ApplicationJSON = "application/json" + ApplicationXML = "application/xml" + ApplicationCloudEventsJSON = "application/cloudevents+json" + ApplicationCloudEventsBatchJSON = "application/cloudevents-batch+json" +) + +// StringOfApplicationJSON returns a string pointer to "application/json" +func StringOfApplicationJSON() *string { + a := ApplicationJSON + return &a +} + +// StringOfApplicationXML returns a string pointer to "application/xml" +func StringOfApplicationXML() *string { + a := ApplicationXML + return &a +} + +// StringOfTextPlain returns a string pointer to "text/plain" +func StringOfTextPlain() *string { + a := TextPlain + return &a +} + +// StringOfApplicationCloudEventsJSON returns a string pointer to +// "application/cloudevents+json" +func StringOfApplicationCloudEventsJSON() *string { + a := ApplicationCloudEventsJSON + return &a +} + +// StringOfApplicationCloudEventsBatchJSON returns a string pointer to +// "application/cloudevents-batch+json" +func StringOfApplicationCloudEventsBatchJSON() *string { + a := ApplicationCloudEventsBatchJSON + return &a +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/data_content_encoding.go b/vendor/github.com/cloudevents/sdk-go/v2/event/data_content_encoding.go new file mode 100644 index 000000000..cf2152693 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/data_content_encoding.go @@ -0,0 +1,16 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +const ( + Base64 = "base64" +) + +// StringOfBase64 returns a string pointer to "Base64" +func StringOfBase64() *string { + a := Base64 + return &a +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/codec.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/codec.go new file mode 100644 index 000000000..3e077740b --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/codec.go @@ -0,0 +1,78 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package datacodec + +import ( + "context" + "fmt" + + "github.com/cloudevents/sdk-go/v2/event/datacodec/json" + "github.com/cloudevents/sdk-go/v2/event/datacodec/text" + "github.com/cloudevents/sdk-go/v2/event/datacodec/xml" +) + +// Decoder is the expected function signature for decoding `in` to `out`. +// If Event sent the payload as base64, Decoder assumes that `in` is the +// decoded base64 byte array. +type Decoder func(ctx context.Context, in []byte, out interface{}) error + +// Encoder is the expected function signature for encoding `in` to bytes. +// Returns an error if the encoder has an issue encoding `in`. +type Encoder func(ctx context.Context, in interface{}) ([]byte, error) + +var decoder map[string]Decoder +var encoder map[string]Encoder + +func init() { + decoder = make(map[string]Decoder, 10) + encoder = make(map[string]Encoder, 10) + + AddDecoder("", json.Decode) + AddDecoder("application/json", json.Decode) + AddDecoder("text/json", json.Decode) + AddDecoder("application/xml", xml.Decode) + AddDecoder("text/xml", xml.Decode) + AddDecoder("text/plain", text.Decode) + + AddEncoder("", json.Encode) + AddEncoder("application/json", json.Encode) + AddEncoder("text/json", json.Encode) + AddEncoder("application/xml", xml.Encode) + AddEncoder("text/xml", xml.Encode) + AddEncoder("text/plain", text.Encode) +} + +// AddDecoder registers a decoder for a given content type. The codecs will use +// these to decode the data payload from a cloudevent.Event object. +func AddDecoder(contentType string, fn Decoder) { + decoder[contentType] = fn +} + +// AddEncoder registers an encoder for a given content type. The codecs will +// use these to encode the data payload for a cloudevent.Event object. +func AddEncoder(contentType string, fn Encoder) { + encoder[contentType] = fn +} + +// Decode looks up and invokes the decoder registered for the given content +// type. An error is returned if no decoder is registered for the given +// content type. +func Decode(ctx context.Context, contentType string, in []byte, out interface{}) error { + if fn, ok := decoder[contentType]; ok { + return fn(ctx, in, out) + } + return fmt.Errorf("[decode] unsupported content type: %q", contentType) +} + +// Encode looks up and invokes the encoder registered for the given content +// type. An error is returned if no encoder is registered for the given +// content type. +func Encode(ctx context.Context, contentType string, in interface{}) ([]byte, error) { + if fn, ok := encoder[contentType]; ok { + return fn(ctx, in) + } + return nil, fmt.Errorf("[encode] unsupported content type: %q", contentType) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/doc.go new file mode 100644 index 000000000..b681af887 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/doc.go @@ -0,0 +1,10 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package datacodec holds the data codec registry and adds known encoders and decoders supporting media types such as +`application/json` and `application/xml`. +*/ +package datacodec diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/data.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/data.go new file mode 100644 index 000000000..734ade59f --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/data.go @@ -0,0 +1,56 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package json + +import ( + "context" + "encoding/json" + "fmt" + "reflect" +) + +// Decode takes `in` as []byte. +// If Event sent the payload as base64, Decoder assumes that `in` is the +// decoded base64 byte array. +func Decode(ctx context.Context, in []byte, out interface{}) error { + if in == nil { + return nil + } + if out == nil { + return fmt.Errorf("out is nil") + } + + if err := json.Unmarshal(in, out); err != nil { + return fmt.Errorf("[json] found bytes \"%s\", but failed to unmarshal: %s", string(in), err.Error()) + } + return nil +} + +// Encode attempts to json.Marshal `in` into bytes. Encode will inspect `in` +// and returns `in` unmodified if it is detected that `in` is already a []byte; +// Or json.Marshal errors. +func Encode(ctx context.Context, in interface{}) ([]byte, error) { + if in == nil { + return nil, nil + } + + it := reflect.TypeOf(in) + switch it.Kind() { + case reflect.Slice: + if it.Elem().Kind() == reflect.Uint8 { + + if b, ok := in.([]byte); ok && len(b) > 0 { + // check to see if it is a pre-encoded byte string. + if b[0] == byte('"') || b[0] == byte('{') || b[0] == byte('[') { + return b, nil + } + } + + } + } + + return json.Marshal(in) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/doc.go new file mode 100644 index 000000000..33e1323c7 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/doc.go @@ -0,0 +1,9 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package json holds the encoder/decoder implementation for `application/json`. +*/ +package json diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/data.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/data.go new file mode 100644 index 000000000..761a10113 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/data.go @@ -0,0 +1,30 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package text + +import ( + "context" + "fmt" +) + +// Text codec converts []byte or string to string and vice-versa. + +func Decode(_ context.Context, in []byte, out interface{}) error { + p, _ := out.(*string) + if p == nil { + return fmt.Errorf("text.Decode out: want *string, got %T", out) + } + *p = string(in) + return nil +} + +func Encode(_ context.Context, in interface{}) ([]byte, error) { + s, ok := in.(string) + if !ok { + return nil, fmt.Errorf("text.Encode in: want string, got %T", in) + } + return []byte(s), nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/doc.go new file mode 100644 index 000000000..af10577aa --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/doc.go @@ -0,0 +1,9 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package text holds the encoder/decoder implementation for `text/plain`. +*/ +package text diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/data.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/data.go new file mode 100644 index 000000000..de68ec3dc --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/data.go @@ -0,0 +1,40 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package xml + +import ( + "context" + "encoding/xml" + "fmt" +) + +// Decode takes `in` as []byte. +// If Event sent the payload as base64, Decoder assumes that `in` is the +// decoded base64 byte array. +func Decode(ctx context.Context, in []byte, out interface{}) error { + if in == nil { + return nil + } + + if err := xml.Unmarshal(in, out); err != nil { + return fmt.Errorf("[xml] found bytes, but failed to unmarshal: %s %s", err.Error(), string(in)) + } + return nil +} + +// Encode attempts to xml.Marshal `in` into bytes. Encode will inspect `in` +// and returns `in` unmodified if it is detected that `in` is already a []byte; +// Or xml.Marshal errors. +func Encode(ctx context.Context, in interface{}) ([]byte, error) { + if b, ok := in.([]byte); ok { + // check to see if it is a pre-encoded byte string. + if len(b) > 0 && b[0] == byte('"') { + return b, nil + } + } + + return xml.Marshal(in) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/doc.go new file mode 100644 index 000000000..c8d73213f --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/doc.go @@ -0,0 +1,9 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package xml holds the encoder/decoder implementation for `application/xml`. +*/ +package xml diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/event/doc.go new file mode 100644 index 000000000..31c22ce67 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/doc.go @@ -0,0 +1,9 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package event provides primitives to work with CloudEvents specification: https://github.com/cloudevents/spec. +*/ +package event diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event.go new file mode 100644 index 000000000..94b5aa0ad --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event.go @@ -0,0 +1,126 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "bytes" + "encoding/json" + "strings" +) + +// Event represents the canonical representation of a CloudEvent. +type Event struct { + Context EventContext + DataEncoded []byte + // DataBase64 indicates if the event, when serialized, represents + // the data field using the base64 encoding. + // In v0.3, this field is superseded by DataContentEncoding + DataBase64 bool + FieldErrors map[string]error +} + +const ( + defaultEventVersion = CloudEventsVersionV1 +) + +func (e *Event) fieldError(field string, err error) { + if e.FieldErrors == nil { + e.FieldErrors = make(map[string]error) + } + e.FieldErrors[field] = err +} + +func (e *Event) fieldOK(field string) { + if e.FieldErrors != nil { + delete(e.FieldErrors, field) + } +} + +// New returns a new Event, an optional version can be passed to change the +// default spec version from 1.0 to the provided version. +func New(version ...string) Event { + specVersion := defaultEventVersion + if len(version) >= 1 { + specVersion = version[0] + } + e := &Event{} + e.SetSpecVersion(specVersion) + return *e +} + +// ExtensionAs is deprecated: access extensions directly via the e.Extensions() map. +// Use functions in the types package to convert extension values. +// For example replace this: +// +// var i int +// err := e.ExtensionAs("foo", &i) +// +// With this: +// +// i, err := types.ToInteger(e.Extensions["foo"]) +// +func (e Event) ExtensionAs(name string, obj interface{}) error { + return e.Context.ExtensionAs(name, obj) +} + +// String returns a pretty-printed representation of the Event. +func (e Event) String() string { + b := strings.Builder{} + + b.WriteString(e.Context.String()) + + if e.DataEncoded != nil { + if e.DataBase64 { + b.WriteString("Data (binary),\n ") + } else { + b.WriteString("Data,\n ") + } + switch e.DataMediaType() { + case ApplicationJSON: + var prettyJSON bytes.Buffer + err := json.Indent(&prettyJSON, e.DataEncoded, " ", " ") + if err != nil { + b.Write(e.DataEncoded) + } else { + b.Write(prettyJSON.Bytes()) + } + default: + b.Write(e.DataEncoded) + } + b.WriteString("\n") + } + + return b.String() +} + +func (e Event) Clone() Event { + out := Event{} + out.Context = e.Context.Clone() + out.DataEncoded = cloneBytes(e.DataEncoded) + out.DataBase64 = e.DataBase64 + out.FieldErrors = e.cloneFieldErrors() + return out +} + +func cloneBytes(in []byte) []byte { + if in == nil { + return nil + } + out := make([]byte, len(in)) + copy(out, in) + return out +} + +func (e Event) cloneFieldErrors() map[string]error { + if e.FieldErrors == nil { + return nil + } + newFE := make(map[string]error, len(e.FieldErrors)) + for k, v := range e.FieldErrors { + newFE[k] = v + } + return newFE +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_data.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_data.go new file mode 100644 index 000000000..8fc449ed9 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_data.go @@ -0,0 +1,118 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "context" + "encoding/base64" + "fmt" + "strconv" + + "github.com/cloudevents/sdk-go/v2/event/datacodec" +) + +// SetData encodes the given payload with the given content type. +// If the provided payload is a byte array, when marshalled to json it will be encoded as base64. +// If the provided payload is different from byte array, datacodec.Encode is invoked to attempt a +// marshalling to byte array. +func (e *Event) SetData(contentType string, obj interface{}) error { + e.SetDataContentType(contentType) + + if e.SpecVersion() != CloudEventsVersionV1 { + return e.legacySetData(obj) + } + + // Version 1.0 and above. + switch obj := obj.(type) { + case []byte: + e.DataEncoded = obj + e.DataBase64 = true + default: + data, err := datacodec.Encode(context.Background(), e.DataMediaType(), obj) + if err != nil { + return err + } + e.DataEncoded = data + e.DataBase64 = false + } + + return nil +} + +// Deprecated: Delete when we do not have to support Spec v0.3. +func (e *Event) legacySetData(obj interface{}) error { + data, err := datacodec.Encode(context.Background(), e.DataMediaType(), obj) + if err != nil { + return err + } + if e.DeprecatedDataContentEncoding() == Base64 { + buf := make([]byte, base64.StdEncoding.EncodedLen(len(data))) + base64.StdEncoding.Encode(buf, data) + e.DataEncoded = buf + e.DataBase64 = false + } else { + data, err := datacodec.Encode(context.Background(), e.DataMediaType(), obj) + if err != nil { + return err + } + e.DataEncoded = data + e.DataBase64 = false + } + return nil +} + +const ( + quotes = `"'` +) + +func (e Event) Data() []byte { + return e.DataEncoded +} + +// DataAs attempts to populate the provided data object with the event payload. +// obj should be a pointer type. +func (e Event) DataAs(obj interface{}) error { + data := e.Data() + + if len(data) == 0 { + // No data. + return nil + } + + if e.SpecVersion() != CloudEventsVersionV1 { + var err error + if data, err = e.legacyConvertData(data); err != nil { + return err + } + } + + return datacodec.Decode(context.Background(), e.DataMediaType(), data, obj) +} + +func (e Event) legacyConvertData(data []byte) ([]byte, error) { + if e.Context.DeprecatedGetDataContentEncoding() == Base64 { + var bs []byte + // test to see if we need to unquote the data. + if data[0] == quotes[0] || data[0] == quotes[1] { + str, err := strconv.Unquote(string(data)) + if err != nil { + return nil, err + } + bs = []byte(str) + } else { + bs = data + } + + buf := make([]byte, base64.StdEncoding.DecodedLen(len(bs))) + n, err := base64.StdEncoding.Decode(buf, bs) + if err != nil { + return nil, fmt.Errorf("failed to decode data from base64: %s", err.Error()) + } + data = buf[:n] + } + + return data, nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_interface.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_interface.go new file mode 100644 index 000000000..2809fed57 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_interface.go @@ -0,0 +1,102 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "time" +) + +// EventReader is the interface for reading through an event from attributes. +type EventReader interface { + // SpecVersion returns event.Context.GetSpecVersion(). + SpecVersion() string + // Type returns event.Context.GetType(). + Type() string + // Source returns event.Context.GetSource(). + Source() string + // Subject returns event.Context.GetSubject(). + Subject() string + // ID returns event.Context.GetID(). + ID() string + // Time returns event.Context.GetTime(). + Time() time.Time + // DataSchema returns event.Context.GetDataSchema(). + DataSchema() string + // DataContentType returns event.Context.GetDataContentType(). + DataContentType() string + // DataMediaType returns event.Context.GetDataMediaType(). + DataMediaType() string + // DeprecatedDataContentEncoding returns event.Context.DeprecatedGetDataContentEncoding(). + DeprecatedDataContentEncoding() string + + // Extension Attributes + + // Extensions returns the event.Context.GetExtensions(). + // Extensions use the CloudEvents type system, details in package cloudevents/types. + Extensions() map[string]interface{} + + // ExtensionAs returns event.Context.ExtensionAs(name, obj). + // + // DEPRECATED: Access extensions directly via the e.Extensions() map. + // Use functions in the types package to convert extension values. + // For example replace this: + // + // var i int + // err := e.ExtensionAs("foo", &i) + // + // With this: + // + // i, err := types.ToInteger(e.Extensions["foo"]) + // + ExtensionAs(string, interface{}) error + + // Data Attribute + + // Data returns the raw data buffer + // If the event was encoded with base64 encoding, Data returns the already decoded + // byte array + Data() []byte + + // DataAs attempts to populate the provided data object with the event payload. + DataAs(interface{}) error +} + +// EventWriter is the interface for writing through an event onto attributes. +// If an error is thrown by a sub-component, EventWriter caches the error +// internally and exposes errors with a call to event.Validate(). +type EventWriter interface { + // Context Attributes + + // SetSpecVersion performs event.Context.SetSpecVersion. + SetSpecVersion(string) + // SetType performs event.Context.SetType. + SetType(string) + // SetSource performs event.Context.SetSource. + SetSource(string) + // SetSubject( performs event.Context.SetSubject. + SetSubject(string) + // SetID performs event.Context.SetID. + SetID(string) + // SetTime performs event.Context.SetTime. + SetTime(time.Time) + // SetDataSchema performs event.Context.SetDataSchema. + SetDataSchema(string) + // SetDataContentType performs event.Context.SetDataContentType. + SetDataContentType(string) + // DeprecatedSetDataContentEncoding performs event.Context.DeprecatedSetDataContentEncoding. + SetDataContentEncoding(string) + + // Extension Attributes + + // SetExtension performs event.Context.SetExtension. + SetExtension(string, interface{}) + + // SetData encodes the given payload with the given content type. + // If the provided payload is a byte array, when marshalled to json it will be encoded as base64. + // If the provided payload is different from byte array, datacodec.Encode is invoked to attempt a + // marshalling to byte array. + SetData(string, interface{}) error +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_marshal.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_marshal.go new file mode 100644 index 000000000..c5f2dc03c --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_marshal.go @@ -0,0 +1,203 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "strings" + + jsoniter "github.com/json-iterator/go" +) + +// WriteJson writes the in event in the provided writer. +// Note: this function assumes the input event is valid. +func WriteJson(in *Event, writer io.Writer) error { + stream := jsoniter.ConfigFastest.BorrowStream(writer) + defer jsoniter.ConfigFastest.ReturnStream(stream) + stream.WriteObjectStart() + + var ext map[string]interface{} + var dct *string + var isBase64 bool + + // Write the context (without the extensions) + switch eventContext := in.Context.(type) { + case *EventContextV03: + // Set a bunch of variables we need later + ext = eventContext.Extensions + dct = eventContext.DataContentType + + stream.WriteObjectField("specversion") + stream.WriteString(CloudEventsVersionV03) + stream.WriteMore() + + stream.WriteObjectField("id") + stream.WriteString(eventContext.ID) + stream.WriteMore() + + stream.WriteObjectField("source") + stream.WriteString(eventContext.Source.String()) + stream.WriteMore() + + stream.WriteObjectField("type") + stream.WriteString(eventContext.Type) + + if eventContext.Subject != nil { + stream.WriteMore() + stream.WriteObjectField("subject") + stream.WriteString(*eventContext.Subject) + } + + if eventContext.DataContentEncoding != nil { + isBase64 = true + stream.WriteMore() + stream.WriteObjectField("datacontentencoding") + stream.WriteString(*eventContext.DataContentEncoding) + } + + if eventContext.DataContentType != nil { + stream.WriteMore() + stream.WriteObjectField("datacontenttype") + stream.WriteString(*eventContext.DataContentType) + } + + if eventContext.SchemaURL != nil { + stream.WriteMore() + stream.WriteObjectField("schemaurl") + stream.WriteString(eventContext.SchemaURL.String()) + } + + if eventContext.Time != nil { + stream.WriteMore() + stream.WriteObjectField("time") + stream.WriteString(eventContext.Time.String()) + } + case *EventContextV1: + // Set a bunch of variables we need later + ext = eventContext.Extensions + dct = eventContext.DataContentType + isBase64 = in.DataBase64 + + stream.WriteObjectField("specversion") + stream.WriteString(CloudEventsVersionV1) + stream.WriteMore() + + stream.WriteObjectField("id") + stream.WriteString(eventContext.ID) + stream.WriteMore() + + stream.WriteObjectField("source") + stream.WriteString(eventContext.Source.String()) + stream.WriteMore() + + stream.WriteObjectField("type") + stream.WriteString(eventContext.Type) + + if eventContext.Subject != nil { + stream.WriteMore() + stream.WriteObjectField("subject") + stream.WriteString(*eventContext.Subject) + } + + if eventContext.DataContentType != nil { + stream.WriteMore() + stream.WriteObjectField("datacontenttype") + stream.WriteString(*eventContext.DataContentType) + } + + if eventContext.DataSchema != nil { + stream.WriteMore() + stream.WriteObjectField("dataschema") + stream.WriteString(eventContext.DataSchema.String()) + } + + if eventContext.Time != nil { + stream.WriteMore() + stream.WriteObjectField("time") + stream.WriteString(eventContext.Time.String()) + } + default: + return fmt.Errorf("missing event context") + } + + // Let's do a check on the error + if stream.Error != nil { + return fmt.Errorf("error while writing the event attributes: %w", stream.Error) + } + + // Let's write the body + if in.DataEncoded != nil { + stream.WriteMore() + + // We need to figure out the media type first + var mediaType string + if dct == nil { + mediaType = ApplicationJSON + } else { + // This code is required to extract the media type from the full content type string (which might contain encoding and stuff) + contentType := *dct + i := strings.IndexRune(contentType, ';') + if i == -1 { + i = len(contentType) + } + mediaType = strings.TrimSpace(strings.ToLower(contentType[0:i])) + } + + isJson := mediaType == "" || mediaType == ApplicationJSON || mediaType == TextJSON + + // If isJson and no encoding to base64, we don't need to perform additional steps + if isJson && !isBase64 { + stream.WriteObjectField("data") + _, err := stream.Write(in.DataEncoded) + if err != nil { + return fmt.Errorf("error while writing data: %w", err) + } + } else { + if in.Context.GetSpecVersion() == CloudEventsVersionV1 && isBase64 { + stream.WriteObjectField("data_base64") + } else { + stream.WriteObjectField("data") + } + // At this point of we need to write to base 64 string, or we just need to write the plain string + if isBase64 { + stream.WriteString(base64.StdEncoding.EncodeToString(in.DataEncoded)) + } else { + stream.WriteString(string(in.DataEncoded)) + } + } + + } + + // Let's do a check on the error + if stream.Error != nil { + return fmt.Errorf("error while writing the event data: %w", stream.Error) + } + + for k, v := range ext { + stream.WriteMore() + stream.WriteObjectField(k) + stream.WriteVal(v) + } + + stream.WriteObjectEnd() + + // Let's do a check on the error + if stream.Error != nil { + return fmt.Errorf("error while writing the event extensions: %w", stream.Error) + } + return stream.Flush() +} + +// MarshalJSON implements a custom json marshal method used when this type is +// marshaled using json.Marshal. +func (e Event) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + err := WriteJson(&e, &buf) + return buf.Bytes(), err +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_reader.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_reader.go new file mode 100644 index 000000000..9d1aeeb65 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_reader.go @@ -0,0 +1,103 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "time" +) + +var _ EventReader = (*Event)(nil) + +// SpecVersion implements EventReader.SpecVersion +func (e Event) SpecVersion() string { + if e.Context != nil { + return e.Context.GetSpecVersion() + } + return "" +} + +// Type implements EventReader.Type +func (e Event) Type() string { + if e.Context != nil { + return e.Context.GetType() + } + return "" +} + +// Source implements EventReader.Source +func (e Event) Source() string { + if e.Context != nil { + return e.Context.GetSource() + } + return "" +} + +// Subject implements EventReader.Subject +func (e Event) Subject() string { + if e.Context != nil { + return e.Context.GetSubject() + } + return "" +} + +// ID implements EventReader.ID +func (e Event) ID() string { + if e.Context != nil { + return e.Context.GetID() + } + return "" +} + +// Time implements EventReader.Time +func (e Event) Time() time.Time { + if e.Context != nil { + return e.Context.GetTime() + } + return time.Time{} +} + +// DataSchema implements EventReader.DataSchema +func (e Event) DataSchema() string { + if e.Context != nil { + return e.Context.GetDataSchema() + } + return "" +} + +// DataContentType implements EventReader.DataContentType +func (e Event) DataContentType() string { + if e.Context != nil { + return e.Context.GetDataContentType() + } + return "" +} + +// DataMediaType returns the parsed DataMediaType of the event. If parsing +// fails, the empty string is returned. To retrieve the parsing error, use +// `Context.GetDataMediaType` instead. +func (e Event) DataMediaType() string { + if e.Context != nil { + mediaType, _ := e.Context.GetDataMediaType() + return mediaType + } + return "" +} + +// DeprecatedDataContentEncoding implements EventReader.DeprecatedDataContentEncoding +func (e Event) DeprecatedDataContentEncoding() string { + if e.Context != nil { + return e.Context.DeprecatedGetDataContentEncoding() + } + return "" +} + +// Extensions implements EventReader.Extensions +func (e Event) Extensions() map[string]interface{} { + if e.Context != nil { + return e.Context.GetExtensions() + } + return map[string]interface{}(nil) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_unmarshal.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_unmarshal.go new file mode 100644 index 000000000..0dd88ae5a --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_unmarshal.go @@ -0,0 +1,480 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "encoding/base64" + "errors" + "fmt" + "io" + "sync" + + jsoniter "github.com/json-iterator/go" + + "github.com/cloudevents/sdk-go/v2/types" +) + +const specVersionV03Flag uint8 = 1 << 4 +const specVersionV1Flag uint8 = 1 << 5 +const dataBase64Flag uint8 = 1 << 6 +const dataContentTypeFlag uint8 = 1 << 7 + +func checkFlag(state uint8, flag uint8) bool { + return state&flag != 0 +} + +func appendFlag(state *uint8, flag uint8) { + *state = (*state) | flag +} + +var iterPool = sync.Pool{ + New: func() interface{} { + return jsoniter.Parse(jsoniter.ConfigFastest, nil, 1024) + }, +} + +func borrowIterator(reader io.Reader) *jsoniter.Iterator { + iter := iterPool.Get().(*jsoniter.Iterator) + iter.Reset(reader) + return iter +} + +func returnIterator(iter *jsoniter.Iterator) { + iter.Error = nil + iter.Attachment = nil + iterPool.Put(iter) +} + +func ReadJson(out *Event, reader io.Reader) error { + iterator := borrowIterator(reader) + defer returnIterator(iterator) + + return readJsonFromIterator(out, iterator) +} + +// ReadJson allows you to read the bytes reader as an event +func readJsonFromIterator(out *Event, iterator *jsoniter.Iterator) error { + // Parsing dependency graph: + // SpecVersion + // ^ ^ + // | +--------------+ + // + + + // All Attributes datacontenttype (and datacontentencoding for v0.3) + // (except datacontenttype) ^ + // | + // | + // + + // Data + + var state uint8 = 0 + var cachedData []byte + + var ( + // Universally parseable fields. + id string + typ string + source types.URIRef + subject *string + time *types.Timestamp + datacontenttype *string + extensions = make(map[string]interface{}) + + // These fields require knowledge about the specversion to be parsed. + schemaurl jsoniter.Any + datacontentencoding jsoniter.Any + dataschema jsoniter.Any + dataBase64 jsoniter.Any + ) + + for key := iterator.ReadObject(); key != ""; key = iterator.ReadObject() { + // Check if we have some error in our error cache + if iterator.Error != nil { + return iterator.Error + } + + // We have a key, now we need to figure out what to do + // depending on the parsing state + + // If it's a specversion, trigger state change + if key == "specversion" { + if checkFlag(state, specVersionV1Flag|specVersionV03Flag) { + return fmt.Errorf("specversion was already provided") + } + sv := iterator.ReadString() + + // Check proper specversion + switch sv { + case CloudEventsVersionV1: + con := &EventContextV1{ + ID: id, + Type: typ, + Source: source, + Subject: subject, + Time: time, + DataContentType: datacontenttype, + } + + // Add the fields relevant for the version ... + if dataschema != nil { + var err error + con.DataSchema, err = toUriPtr(dataschema) + if err != nil { + return err + } + } + if dataBase64 != nil { + stream := jsoniter.ConfigFastest.BorrowStream(nil) + defer jsoniter.ConfigFastest.ReturnStream(stream) + dataBase64.WriteTo(stream) + cachedData = stream.Buffer() + if stream.Error != nil { + return stream.Error + } + appendFlag(&state, dataBase64Flag) + } + + // ... add all remaining fields as extensions. + if schemaurl != nil { + extensions["schemaurl"] = schemaurl.GetInterface() + } + if datacontentencoding != nil { + extensions["datacontentencoding"] = datacontentencoding.GetInterface() + } + + out.Context = con + appendFlag(&state, specVersionV1Flag) + case CloudEventsVersionV03: + con := &EventContextV03{ + ID: id, + Type: typ, + Source: source, + Subject: subject, + Time: time, + DataContentType: datacontenttype, + } + var err error + // Add the fields relevant for the version ... + if schemaurl != nil { + con.SchemaURL, err = toUriRefPtr(schemaurl) + if err != nil { + return err + } + } + if datacontentencoding != nil { + con.DataContentEncoding, err = toStrPtr(datacontentencoding) + if *con.DataContentEncoding != Base64 { + err = ValidationError{"datacontentencoding": errors.New("invalid datacontentencoding value, the only allowed value is 'base64'")} + } + if err != nil { + return err + } + appendFlag(&state, dataBase64Flag) + } + + // ... add all remaining fields as extensions. + if dataschema != nil { + extensions["dataschema"] = dataschema.GetInterface() + } + if dataBase64 != nil { + extensions["data_base64"] = dataBase64.GetInterface() + } + + out.Context = con + appendFlag(&state, specVersionV03Flag) + default: + return ValidationError{"specversion": errors.New("unknown value: " + sv)} + } + + // Apply all extensions to the context object. + for key, val := range extensions { + if err := out.Context.SetExtension(key, val); err != nil { + return err + } + } + continue + } + + // If no specversion ... + if !checkFlag(state, specVersionV03Flag|specVersionV1Flag) { + switch key { + case "id": + id = iterator.ReadString() + case "type": + typ = iterator.ReadString() + case "source": + source = readUriRef(iterator) + case "subject": + subject = readStrPtr(iterator) + case "time": + time = readTimestamp(iterator) + case "datacontenttype": + datacontenttype = readStrPtr(iterator) + appendFlag(&state, dataContentTypeFlag) + case "data": + cachedData = iterator.SkipAndReturnBytes() + case "data_base64": + dataBase64 = iterator.ReadAny() + case "dataschema": + dataschema = iterator.ReadAny() + case "schemaurl": + schemaurl = iterator.ReadAny() + case "datacontentencoding": + datacontentencoding = iterator.ReadAny() + default: + extensions[key] = iterator.Read() + } + continue + } + + // From this point downward -> we can assume the event has a context pointer non nil + + // If it's a datacontenttype, trigger state change + if key == "datacontenttype" { + if checkFlag(state, dataContentTypeFlag) { + return fmt.Errorf("datacontenttype was already provided") + } + + dct := iterator.ReadString() + + switch ctx := out.Context.(type) { + case *EventContextV03: + ctx.DataContentType = &dct + case *EventContextV1: + ctx.DataContentType = &dct + } + appendFlag(&state, dataContentTypeFlag) + continue + } + + // If it's a datacontentencoding and it's v0.3, trigger state change + if checkFlag(state, specVersionV03Flag) && key == "datacontentencoding" { + if checkFlag(state, dataBase64Flag) { + return ValidationError{"datacontentencoding": errors.New("datacontentencoding was specified twice")} + } + + dce := iterator.ReadString() + + if dce != Base64 { + return ValidationError{"datacontentencoding": errors.New("invalid datacontentencoding value, the only allowed value is 'base64'")} + } + + out.Context.(*EventContextV03).DataContentEncoding = &dce + appendFlag(&state, dataBase64Flag) + continue + } + + // We can parse all attributes, except data. + // If it's data or data_base64 and we don't have the attributes to process it, then we cache it + // The expanded form of this condition is: + // (checkFlag(state, specVersionV1Flag) && !checkFlag(state, dataContentTypeFlag) && (key == "data" || key == "data_base64")) || + // (checkFlag(state, specVersionV03Flag) && !(checkFlag(state, dataContentTypeFlag) && checkFlag(state, dataBase64Flag)) && key == "data") + if (state&(specVersionV1Flag|dataContentTypeFlag) == specVersionV1Flag && (key == "data" || key == "data_base64")) || + ((state&specVersionV03Flag == specVersionV03Flag) && (state&(dataContentTypeFlag|dataBase64Flag) != (dataContentTypeFlag | dataBase64Flag)) && key == "data") { + if key == "data_base64" { + appendFlag(&state, dataBase64Flag) + } + cachedData = iterator.SkipAndReturnBytes() + continue + } + + // At this point or this value is an attribute (excluding datacontenttype and datacontentencoding), or this value is data and this condition is valid: + // (specVersionV1Flag & dataContentTypeFlag) || (specVersionV03Flag & dataContentTypeFlag & dataBase64Flag) + switch eventContext := out.Context.(type) { + case *EventContextV03: + switch key { + case "id": + eventContext.ID = iterator.ReadString() + case "type": + eventContext.Type = iterator.ReadString() + case "source": + eventContext.Source = readUriRef(iterator) + case "subject": + eventContext.Subject = readStrPtr(iterator) + case "time": + eventContext.Time = readTimestamp(iterator) + case "schemaurl": + eventContext.SchemaURL = readUriRefPtr(iterator) + case "data": + iterator.Error = consumeData(out, checkFlag(state, dataBase64Flag), iterator) + default: + if eventContext.Extensions == nil { + eventContext.Extensions = make(map[string]interface{}, 1) + } + iterator.Error = eventContext.SetExtension(key, iterator.Read()) + } + case *EventContextV1: + switch key { + case "id": + eventContext.ID = iterator.ReadString() + case "type": + eventContext.Type = iterator.ReadString() + case "source": + eventContext.Source = readUriRef(iterator) + case "subject": + eventContext.Subject = readStrPtr(iterator) + case "time": + eventContext.Time = readTimestamp(iterator) + case "dataschema": + eventContext.DataSchema = readUriPtr(iterator) + case "data": + iterator.Error = consumeData(out, false, iterator) + case "data_base64": + iterator.Error = consumeData(out, true, iterator) + default: + if eventContext.Extensions == nil { + eventContext.Extensions = make(map[string]interface{}, 1) + } + iterator.Error = eventContext.SetExtension(key, iterator.Read()) + } + } + } + + if state&(specVersionV03Flag|specVersionV1Flag) == 0 { + return ValidationError{"specversion": errors.New("no specversion")} + } + + if iterator.Error != nil { + return iterator.Error + } + + // If there is a dataToken cached, we always defer at the end the processing + // because nor datacontenttype or datacontentencoding are mandatory. + if cachedData != nil { + return consumeDataAsBytes(out, checkFlag(state, dataBase64Flag), cachedData) + } + return nil +} + +func consumeDataAsBytes(e *Event, isBase64 bool, b []byte) error { + if isBase64 { + e.DataBase64 = true + + // Allocate payload byte buffer + base64Encoded := b[1 : len(b)-1] // remove quotes + e.DataEncoded = make([]byte, base64.StdEncoding.DecodedLen(len(base64Encoded))) + length, err := base64.StdEncoding.Decode(e.DataEncoded, base64Encoded) + if err != nil { + return err + } + e.DataEncoded = e.DataEncoded[0:length] + return nil + } + + mt, _ := e.Context.GetDataMediaType() + // Empty content type assumes json + if mt != "" && mt != ApplicationJSON && mt != TextJSON { + // If not json, then data is encoded as string + iter := jsoniter.ParseBytes(jsoniter.ConfigFastest, b) + src := iter.ReadString() // handles escaping + e.DataEncoded = []byte(src) + if iter.Error != nil { + return fmt.Errorf("unexpected data payload for media type %q, expected a string: %w", mt, iter.Error) + } + return nil + } + + e.DataEncoded = b + return nil +} + +func consumeData(e *Event, isBase64 bool, iter *jsoniter.Iterator) error { + if isBase64 { + e.DataBase64 = true + + // Allocate payload byte buffer + base64Encoded := iter.ReadStringAsSlice() + e.DataEncoded = make([]byte, base64.StdEncoding.DecodedLen(len(base64Encoded))) + length, err := base64.StdEncoding.Decode(e.DataEncoded, base64Encoded) + if err != nil { + return err + } + e.DataEncoded = e.DataEncoded[0:length] + return nil + } + + mt, _ := e.Context.GetDataMediaType() + if mt != ApplicationJSON && mt != TextJSON { + // If not json, then data is encoded as string + src := iter.ReadString() // handles escaping + e.DataEncoded = []byte(src) + if iter.Error != nil { + return fmt.Errorf("unexpected data payload for media type %q, expected a string: %w", mt, iter.Error) + } + return nil + } + + e.DataEncoded = iter.SkipAndReturnBytes() + return nil +} + +func readUriRef(iter *jsoniter.Iterator) types.URIRef { + str := iter.ReadString() + uriRef := types.ParseURIRef(str) + if uriRef == nil { + iter.Error = fmt.Errorf("cannot parse uri ref: %v", str) + return types.URIRef{} + } + return *uriRef +} + +func readStrPtr(iter *jsoniter.Iterator) *string { + str := iter.ReadString() + if str == "" { + return nil + } + return &str +} + +func readUriRefPtr(iter *jsoniter.Iterator) *types.URIRef { + return types.ParseURIRef(iter.ReadString()) +} + +func readUriPtr(iter *jsoniter.Iterator) *types.URI { + return types.ParseURI(iter.ReadString()) +} + +func readTimestamp(iter *jsoniter.Iterator) *types.Timestamp { + t, err := types.ParseTimestamp(iter.ReadString()) + if err != nil { + iter.Error = err + } + return t +} + +func toStrPtr(val jsoniter.Any) (*string, error) { + str := val.ToString() + if val.LastError() != nil { + return nil, val.LastError() + } + if str == "" { + return nil, nil + } + return &str, nil +} + +func toUriRefPtr(val jsoniter.Any) (*types.URIRef, error) { + str := val.ToString() + if val.LastError() != nil { + return nil, val.LastError() + } + return types.ParseURIRef(str), nil +} + +func toUriPtr(val jsoniter.Any) (*types.URI, error) { + str := val.ToString() + if val.LastError() != nil { + return nil, val.LastError() + } + return types.ParseURI(str), nil +} + +// UnmarshalJSON implements the json unmarshal method used when this type is +// unmarshaled using json.Unmarshal. +func (e *Event) UnmarshalJSON(b []byte) error { + iterator := jsoniter.ConfigFastest.BorrowIterator(b) + defer jsoniter.ConfigFastest.ReturnIterator(iterator) + return readJsonFromIterator(e, iterator) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_validation.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_validation.go new file mode 100644 index 000000000..958ecc47d --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_validation.go @@ -0,0 +1,50 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "fmt" + "strings" +) + +type ValidationError map[string]error + +func (e ValidationError) Error() string { + b := strings.Builder{} + for k, v := range e { + b.WriteString(k) + b.WriteString(": ") + b.WriteString(v.Error()) + b.WriteRune('\n') + } + return b.String() +} + +// Validate performs a spec based validation on this event. +// Validation is dependent on the spec version specified in the event context. +func (e Event) Validate() error { + if e.Context == nil { + return ValidationError{"specversion": fmt.Errorf("missing Event.Context")} + } + + errs := map[string]error{} + if e.FieldErrors != nil { + for k, v := range e.FieldErrors { + errs[k] = v + } + } + + if fieldErrors := e.Context.Validate(); fieldErrors != nil { + for k, v := range fieldErrors { + errs[k] = v + } + } + + if len(errs) > 0 { + return ValidationError(errs) + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_writer.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_writer.go new file mode 100644 index 000000000..ddfb1be38 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_writer.go @@ -0,0 +1,117 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "fmt" + "time" +) + +var _ EventWriter = (*Event)(nil) + +// SetSpecVersion implements EventWriter.SetSpecVersion +func (e *Event) SetSpecVersion(v string) { + switch v { + case CloudEventsVersionV03: + if e.Context == nil { + e.Context = &EventContextV03{} + } else { + e.Context = e.Context.AsV03() + } + case CloudEventsVersionV1: + if e.Context == nil { + e.Context = &EventContextV1{} + } else { + e.Context = e.Context.AsV1() + } + default: + e.fieldError("specversion", fmt.Errorf("a valid spec version is required: [%s, %s]", + CloudEventsVersionV03, CloudEventsVersionV1)) + return + } + e.fieldOK("specversion") +} + +// SetType implements EventWriter.SetType +func (e *Event) SetType(t string) { + if err := e.Context.SetType(t); err != nil { + e.fieldError("type", err) + } else { + e.fieldOK("type") + } +} + +// SetSource implements EventWriter.SetSource +func (e *Event) SetSource(s string) { + if err := e.Context.SetSource(s); err != nil { + e.fieldError("source", err) + } else { + e.fieldOK("source") + } +} + +// SetSubject implements EventWriter.SetSubject +func (e *Event) SetSubject(s string) { + if err := e.Context.SetSubject(s); err != nil { + e.fieldError("subject", err) + } else { + e.fieldOK("subject") + } +} + +// SetID implements EventWriter.SetID +func (e *Event) SetID(id string) { + if err := e.Context.SetID(id); err != nil { + e.fieldError("id", err) + } else { + e.fieldOK("id") + } +} + +// SetTime implements EventWriter.SetTime +func (e *Event) SetTime(t time.Time) { + if err := e.Context.SetTime(t); err != nil { + e.fieldError("time", err) + } else { + e.fieldOK("time") + } +} + +// SetDataSchema implements EventWriter.SetDataSchema +func (e *Event) SetDataSchema(s string) { + if err := e.Context.SetDataSchema(s); err != nil { + e.fieldError("dataschema", err) + } else { + e.fieldOK("dataschema") + } +} + +// SetDataContentType implements EventWriter.SetDataContentType +func (e *Event) SetDataContentType(ct string) { + if err := e.Context.SetDataContentType(ct); err != nil { + e.fieldError("datacontenttype", err) + } else { + e.fieldOK("datacontenttype") + } +} + +// SetDataContentEncoding is deprecated. Implements EventWriter.SetDataContentEncoding. +func (e *Event) SetDataContentEncoding(enc string) { + if err := e.Context.DeprecatedSetDataContentEncoding(enc); err != nil { + e.fieldError("datacontentencoding", err) + } else { + e.fieldOK("datacontentencoding") + } +} + +// SetExtension implements EventWriter.SetExtension +func (e *Event) SetExtension(name string, obj interface{}) { + if err := e.Context.SetExtension(name, obj); err != nil { + e.fieldError("extension:"+name, err) + } else { + e.fieldOK("extension:" + name) + } +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext.go new file mode 100644 index 000000000..a39565afa --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext.go @@ -0,0 +1,125 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import "time" + +// EventContextReader are the methods required to be a reader of context +// attributes. +type EventContextReader interface { + // GetSpecVersion returns the native CloudEvents Spec version of the event + // context. + GetSpecVersion() string + // GetType returns the CloudEvents type from the context. + GetType() string + // GetSource returns the CloudEvents source from the context. + GetSource() string + // GetSubject returns the CloudEvents subject from the context. + GetSubject() string + // GetID returns the CloudEvents ID from the context. + GetID() string + // GetTime returns the CloudEvents creation time from the context. + GetTime() time.Time + // GetDataSchema returns the CloudEvents schema URL (if any) from the + // context. + GetDataSchema() string + // GetDataContentType returns content type on the context. + GetDataContentType() string + // DeprecatedGetDataContentEncoding returns content encoding on the context. + DeprecatedGetDataContentEncoding() string + + // GetDataMediaType returns the MIME media type for encoded data, which is + // needed by both encoding and decoding. This is a processed form of + // GetDataContentType and it may return an error. + GetDataMediaType() (string, error) + + // DEPRECATED: Access extensions directly via the GetExtensions() + // For example replace this: + // + // var i int + // err := ec.ExtensionAs("foo", &i) + // + // With this: + // + // i, err := types.ToInteger(ec.GetExtensions["foo"]) + // + ExtensionAs(string, interface{}) error + + // GetExtensions returns the full extensions map. + // + // Extensions use the CloudEvents type system, details in package cloudevents/types. + GetExtensions() map[string]interface{} + + // GetExtension returns the extension associated with with the given key. + // The given key is case insensitive. If the extension can not be found, + // an error will be returned. + GetExtension(string) (interface{}, error) +} + +// EventContextWriter are the methods required to be a writer of context +// attributes. +type EventContextWriter interface { + // SetType sets the type of the context. + SetType(string) error + // SetSource sets the source of the context. + SetSource(string) error + // SetSubject sets the subject of the context. + SetSubject(string) error + // SetID sets the ID of the context. + SetID(string) error + // SetTime sets the time of the context. + SetTime(time time.Time) error + // SetDataSchema sets the schema url of the context. + SetDataSchema(string) error + // SetDataContentType sets the data content type of the context. + SetDataContentType(string) error + // DeprecatedSetDataContentEncoding sets the data context encoding of the context. + DeprecatedSetDataContentEncoding(string) error + + // SetExtension sets the given interface onto the extension attributes + // determined by the provided name. + // + // This function fails in V1 if the name doesn't respect the regex ^[a-zA-Z0-9]+$ + // + // Package ./types documents the types that are allowed as extension values. + SetExtension(string, interface{}) error +} + +// EventContextConverter are the methods that allow for event version +// conversion. +type EventContextConverter interface { + // AsV03 provides a translation from whatever the "native" encoding of the + // CloudEvent was to the equivalent in v0.3 field names, moving fields to or + // from extensions as necessary. + AsV03() *EventContextV03 + + // AsV1 provides a translation from whatever the "native" encoding of the + // CloudEvent was to the equivalent in v1.0 field names, moving fields to or + // from extensions as necessary. + AsV1() *EventContextV1 +} + +// EventContext is conical interface for a CloudEvents Context. +type EventContext interface { + // EventContextConverter allows for conversion between versions. + EventContextConverter + + // EventContextReader adds methods for reading context. + EventContextReader + + // EventContextWriter adds methods for writing to context. + EventContextWriter + + // Validate the event based on the specifics of the CloudEvents spec version + // represented by this event context. + Validate() ValidationError + + // Clone clones the event context. + Clone() EventContext + + // String returns a pretty-printed representation of the EventContext. + String() string +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03.go new file mode 100644 index 000000000..c511c81c4 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03.go @@ -0,0 +1,329 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "encoding/json" + "fmt" + "mime" + "sort" + "strings" + + "github.com/cloudevents/sdk-go/v2/types" +) + +const ( + // CloudEventsVersionV03 represents the version 0.3 of the CloudEvents spec. + CloudEventsVersionV03 = "0.3" +) + +var specV03Attributes = map[string]struct{}{ + "type": {}, + "source": {}, + "subject": {}, + "id": {}, + "time": {}, + "schemaurl": {}, + "datacontenttype": {}, + "datacontentencoding": {}, +} + +// EventContextV03 represents the non-data attributes of a CloudEvents v0.3 +// event. +type EventContextV03 struct { + // Type - The type of the occurrence which has happened. + Type string `json:"type"` + // Source - A URI describing the event producer. + Source types.URIRef `json:"source"` + // Subject - The subject of the event in the context of the event producer + // (identified by `source`). + Subject *string `json:"subject,omitempty"` + // ID of the event; must be non-empty and unique within the scope of the producer. + ID string `json:"id"` + // Time - A Timestamp when the event happened. + Time *types.Timestamp `json:"time,omitempty"` + // DataSchema - A link to the schema that the `data` attribute adheres to. + SchemaURL *types.URIRef `json:"schemaurl,omitempty"` + // GetDataMediaType - A MIME (RFC2046) string describing the media type of `data`. + DataContentType *string `json:"datacontenttype,omitempty"` + // DeprecatedDataContentEncoding describes the content encoding for the `data` attribute. Valid: nil, `Base64`. + DataContentEncoding *string `json:"datacontentencoding,omitempty"` + // Extensions - Additional extension metadata beyond the base spec. + Extensions map[string]interface{} `json:"-"` +} + +// Adhere to EventContext +var _ EventContext = (*EventContextV03)(nil) + +// ExtensionAs implements EventContext.ExtensionAs +func (ec EventContextV03) ExtensionAs(name string, obj interface{}) error { + value, ok := ec.Extensions[name] + if !ok { + return fmt.Errorf("extension %q does not exist", name) + } + + // Try to unmarshal extension if we find it as a RawMessage. + switch v := value.(type) { + case json.RawMessage: + if err := json.Unmarshal(v, obj); err == nil { + // if that worked, return with obj set. + return nil + } + } + // else try as a string ptr. + + // Only support *string for now. + switch v := obj.(type) { + case *string: + if valueAsString, ok := value.(string); ok { + *v = valueAsString + return nil + } else { + return fmt.Errorf("invalid type for extension %q", name) + } + default: + return fmt.Errorf("unknown extension type %T", obj) + } +} + +// SetExtension adds the extension 'name' with value 'value' to the CloudEvents +// context. This function fails if the name uses a reserved event context key. +func (ec *EventContextV03) SetExtension(name string, value interface{}) error { + if ec.Extensions == nil { + ec.Extensions = make(map[string]interface{}) + } + + if _, ok := specV03Attributes[strings.ToLower(name)]; ok { + return fmt.Errorf("bad key %q: CloudEvents spec attribute MUST NOT be overwritten by extension", name) + } + + if value == nil { + delete(ec.Extensions, name) + if len(ec.Extensions) == 0 { + ec.Extensions = nil + } + return nil + } else { + v, err := types.Validate(value) + if err == nil { + ec.Extensions[name] = v + } + return err + } +} + +// Clone implements EventContextConverter.Clone +func (ec EventContextV03) Clone() EventContext { + ec03 := ec.AsV03() + ec03.Source = types.Clone(ec.Source).(types.URIRef) + if ec.Time != nil { + ec03.Time = types.Clone(ec.Time).(*types.Timestamp) + } + if ec.SchemaURL != nil { + ec03.SchemaURL = types.Clone(ec.SchemaURL).(*types.URIRef) + } + ec03.Extensions = ec.cloneExtensions() + return ec03 +} + +func (ec *EventContextV03) cloneExtensions() map[string]interface{} { + old := ec.Extensions + if old == nil { + return nil + } + new := make(map[string]interface{}, len(ec.Extensions)) + for k, v := range old { + new[k] = types.Clone(v) + } + return new +} + +// AsV03 implements EventContextConverter.AsV03 +func (ec EventContextV03) AsV03() *EventContextV03 { + return &ec +} + +// AsV1 implements EventContextConverter.AsV1 +func (ec EventContextV03) AsV1() *EventContextV1 { + ret := EventContextV1{ + ID: ec.ID, + Time: ec.Time, + Type: ec.Type, + DataContentType: ec.DataContentType, + Source: types.URIRef{URL: ec.Source.URL}, + Subject: ec.Subject, + Extensions: make(map[string]interface{}), + } + if ec.SchemaURL != nil { + ret.DataSchema = &types.URI{URL: ec.SchemaURL.URL} + } + + // DataContentEncoding was removed in 1.0, so put it in an extension for 1.0. + if ec.DataContentEncoding != nil { + _ = ret.SetExtension(DataContentEncodingKey, *ec.DataContentEncoding) + } + + if ec.Extensions != nil { + for k, v := range ec.Extensions { + k = strings.ToLower(k) + ret.Extensions[k] = v + } + } + if len(ret.Extensions) == 0 { + ret.Extensions = nil + } + return &ret +} + +// Validate returns errors based on requirements from the CloudEvents spec. +// For more details, see https://github.com/cloudevents/spec/blob/master/spec.md +// As of Feb 26, 2019, commit 17c32ea26baf7714ad027d9917d03d2fff79fc7e +// + https://github.com/cloudevents/spec/pull/387 -> datacontentencoding +// + https://github.com/cloudevents/spec/pull/406 -> subject +func (ec EventContextV03) Validate() ValidationError { + errors := map[string]error{} + + // type + // Type: String + // Constraints: + // REQUIRED + // MUST be a non-empty string + // SHOULD be prefixed with a reverse-DNS name. The prefixed domain dictates the organization which defines the semantics of this event type. + eventType := strings.TrimSpace(ec.Type) + if eventType == "" { + errors["type"] = fmt.Errorf("MUST be a non-empty string") + } + + // source + // Type: URI-reference + // Constraints: + // REQUIRED + source := strings.TrimSpace(ec.Source.String()) + if source == "" { + errors["source"] = fmt.Errorf("REQUIRED") + } + + // subject + // Type: String + // Constraints: + // OPTIONAL + // MUST be a non-empty string + if ec.Subject != nil { + subject := strings.TrimSpace(*ec.Subject) + if subject == "" { + errors["subject"] = fmt.Errorf("if present, MUST be a non-empty string") + } + } + + // id + // Type: String + // Constraints: + // REQUIRED + // MUST be a non-empty string + // MUST be unique within the scope of the producer + id := strings.TrimSpace(ec.ID) + if id == "" { + errors["id"] = fmt.Errorf("MUST be a non-empty string") + + // no way to test "MUST be unique within the scope of the producer" + } + + // time + // Type: Timestamp + // Constraints: + // OPTIONAL + // If present, MUST adhere to the format specified in RFC 3339 + // --> no need to test this, no way to set the time without it being valid. + + // schemaurl + // Type: URI + // Constraints: + // OPTIONAL + // If present, MUST adhere to the format specified in RFC 3986 + if ec.SchemaURL != nil { + schemaURL := strings.TrimSpace(ec.SchemaURL.String()) + // empty string is not RFC 3986 compatible. + if schemaURL == "" { + errors["schemaurl"] = fmt.Errorf("if present, MUST adhere to the format specified in RFC 3986") + } + } + + // datacontenttype + // Type: String per RFC 2046 + // Constraints: + // OPTIONAL + // If present, MUST adhere to the format specified in RFC 2046 + if ec.DataContentType != nil { + dataContentType := strings.TrimSpace(*ec.DataContentType) + if dataContentType == "" { + errors["datacontenttype"] = fmt.Errorf("if present, MUST adhere to the format specified in RFC 2046") + } else { + _, _, err := mime.ParseMediaType(dataContentType) + if err != nil { + errors["datacontenttype"] = fmt.Errorf("if present, MUST adhere to the format specified in RFC 2046") + } + } + } + + // datacontentencoding + // Type: String per RFC 2045 Section 6.1 + // Constraints: + // The attribute MUST be set if the data attribute contains string-encoded binary data. + // Otherwise the attribute MUST NOT be set. + // If present, MUST adhere to RFC 2045 Section 6.1 + if ec.DataContentEncoding != nil { + dataContentEncoding := strings.ToLower(strings.TrimSpace(*ec.DataContentEncoding)) + if dataContentEncoding != Base64 { + errors["datacontentencoding"] = fmt.Errorf("if present, MUST adhere to RFC 2045 Section 6.1") + } + } + + if len(errors) > 0 { + return errors + } + return nil +} + +// String returns a pretty-printed representation of the EventContext. +func (ec EventContextV03) String() string { + b := strings.Builder{} + + b.WriteString("Context Attributes,\n") + + b.WriteString(" specversion: " + CloudEventsVersionV03 + "\n") + b.WriteString(" type: " + ec.Type + "\n") + b.WriteString(" source: " + ec.Source.String() + "\n") + if ec.Subject != nil { + b.WriteString(" subject: " + *ec.Subject + "\n") + } + b.WriteString(" id: " + ec.ID + "\n") + if ec.Time != nil { + b.WriteString(" time: " + ec.Time.String() + "\n") + } + if ec.SchemaURL != nil { + b.WriteString(" schemaurl: " + ec.SchemaURL.String() + "\n") + } + if ec.DataContentType != nil { + b.WriteString(" datacontenttype: " + *ec.DataContentType + "\n") + } + if ec.DataContentEncoding != nil { + b.WriteString(" datacontentencoding: " + *ec.DataContentEncoding + "\n") + } + + if ec.Extensions != nil && len(ec.Extensions) > 0 { + b.WriteString("Extensions,\n") + keys := make([]string, 0, len(ec.Extensions)) + for k := range ec.Extensions { + keys = append(keys, k) + } + sort.Strings(keys) + for _, key := range keys { + b.WriteString(fmt.Sprintf(" %s: %v\n", key, ec.Extensions[key])) + } + } + + return b.String() +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_reader.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_reader.go new file mode 100644 index 000000000..2cd27a705 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_reader.go @@ -0,0 +1,99 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "fmt" + "strings" + "time" +) + +// GetSpecVersion implements EventContextReader.GetSpecVersion +func (ec EventContextV03) GetSpecVersion() string { + return CloudEventsVersionV03 +} + +// GetDataContentType implements EventContextReader.GetDataContentType +func (ec EventContextV03) GetDataContentType() string { + if ec.DataContentType != nil { + return *ec.DataContentType + } + return "" +} + +// GetDataMediaType implements EventContextReader.GetDataMediaType +func (ec EventContextV03) GetDataMediaType() (string, error) { + if ec.DataContentType != nil { + dct := *ec.DataContentType + i := strings.IndexRune(dct, ';') + if i == -1 { + return dct, nil + } + return strings.TrimSpace(dct[0:i]), nil + } + return "", nil +} + +// GetType implements EventContextReader.GetType +func (ec EventContextV03) GetType() string { + return ec.Type +} + +// GetSource implements EventContextReader.GetSource +func (ec EventContextV03) GetSource() string { + return ec.Source.String() +} + +// GetSubject implements EventContextReader.GetSubject +func (ec EventContextV03) GetSubject() string { + if ec.Subject != nil { + return *ec.Subject + } + return "" +} + +// GetTime implements EventContextReader.GetTime +func (ec EventContextV03) GetTime() time.Time { + if ec.Time != nil { + return ec.Time.Time + } + return time.Time{} +} + +// GetID implements EventContextReader.GetID +func (ec EventContextV03) GetID() string { + return ec.ID +} + +// GetDataSchema implements EventContextReader.GetDataSchema +func (ec EventContextV03) GetDataSchema() string { + if ec.SchemaURL != nil { + return ec.SchemaURL.String() + } + return "" +} + +// DeprecatedGetDataContentEncoding implements EventContextReader.DeprecatedGetDataContentEncoding +func (ec EventContextV03) DeprecatedGetDataContentEncoding() string { + if ec.DataContentEncoding != nil { + return *ec.DataContentEncoding + } + return "" +} + +// GetExtensions implements EventContextReader.GetExtensions +func (ec EventContextV03) GetExtensions() map[string]interface{} { + return ec.Extensions +} + +// GetExtension implements EventContextReader.GetExtension +func (ec EventContextV03) GetExtension(key string) (interface{}, error) { + v, ok := caseInsensitiveSearch(key, ec.Extensions) + if !ok { + return "", fmt.Errorf("%q not found", key) + } + return v, nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_writer.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_writer.go new file mode 100644 index 000000000..5d664635e --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_writer.go @@ -0,0 +1,103 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "errors" + "net/url" + "strings" + "time" + + "github.com/cloudevents/sdk-go/v2/types" +) + +// Adhere to EventContextWriter +var _ EventContextWriter = (*EventContextV03)(nil) + +// SetDataContentType implements EventContextWriter.SetDataContentType +func (ec *EventContextV03) SetDataContentType(ct string) error { + ct = strings.TrimSpace(ct) + if ct == "" { + ec.DataContentType = nil + } else { + ec.DataContentType = &ct + } + return nil +} + +// SetType implements EventContextWriter.SetType +func (ec *EventContextV03) SetType(t string) error { + t = strings.TrimSpace(t) + ec.Type = t + return nil +} + +// SetSource implements EventContextWriter.SetSource +func (ec *EventContextV03) SetSource(u string) error { + pu, err := url.Parse(u) + if err != nil { + return err + } + ec.Source = types.URIRef{URL: *pu} + return nil +} + +// SetSubject implements EventContextWriter.SetSubject +func (ec *EventContextV03) SetSubject(s string) error { + s = strings.TrimSpace(s) + if s == "" { + ec.Subject = nil + } else { + ec.Subject = &s + } + return nil +} + +// SetID implements EventContextWriter.SetID +func (ec *EventContextV03) SetID(id string) error { + id = strings.TrimSpace(id) + if id == "" { + return errors.New("id is required to be a non-empty string") + } + ec.ID = id + return nil +} + +// SetTime implements EventContextWriter.SetTime +func (ec *EventContextV03) SetTime(t time.Time) error { + if t.IsZero() { + ec.Time = nil + } else { + ec.Time = &types.Timestamp{Time: t} + } + return nil +} + +// SetDataSchema implements EventContextWriter.SetDataSchema +func (ec *EventContextV03) SetDataSchema(u string) error { + u = strings.TrimSpace(u) + if u == "" { + ec.SchemaURL = nil + return nil + } + pu, err := url.Parse(u) + if err != nil { + return err + } + ec.SchemaURL = &types.URIRef{URL: *pu} + return nil +} + +// DeprecatedSetDataContentEncoding implements EventContextWriter.DeprecatedSetDataContentEncoding +func (ec *EventContextV03) DeprecatedSetDataContentEncoding(e string) error { + e = strings.ToLower(strings.TrimSpace(e)) + if e == "" { + ec.DataContentEncoding = nil + } else { + ec.DataContentEncoding = &e + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1.go new file mode 100644 index 000000000..8f164502b --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1.go @@ -0,0 +1,315 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "fmt" + "mime" + "sort" + "strings" + + "github.com/cloudevents/sdk-go/v2/types" +) + +// WIP: AS OF SEP 20, 2019 + +const ( + // CloudEventsVersionV1 represents the version 1.0 of the CloudEvents spec. + CloudEventsVersionV1 = "1.0" +) + +var specV1Attributes = map[string]struct{}{ + "id": {}, + "source": {}, + "type": {}, + "datacontenttype": {}, + "subject": {}, + "time": {}, + "specversion": {}, + "dataschema": {}, +} + +// EventContextV1 represents the non-data attributes of a CloudEvents v1.0 +// event. +type EventContextV1 struct { + // ID of the event; must be non-empty and unique within the scope of the producer. + // +required + ID string `json:"id"` + // Source - A URI describing the event producer. + // +required + Source types.URIRef `json:"source"` + // Type - The type of the occurrence which has happened. + // +required + Type string `json:"type"` + + // DataContentType - A MIME (RFC2046) string describing the media type of `data`. + // +optional + DataContentType *string `json:"datacontenttype,omitempty"` + // Subject - The subject of the event in the context of the event producer + // (identified by `source`). + // +optional + Subject *string `json:"subject,omitempty"` + // Time - A Timestamp when the event happened. + // +optional + Time *types.Timestamp `json:"time,omitempty"` + // DataSchema - A link to the schema that the `data` attribute adheres to. + // +optional + DataSchema *types.URI `json:"dataschema,omitempty"` + + // Extensions - Additional extension metadata beyond the base spec. + // +optional + Extensions map[string]interface{} `json:"-"` +} + +// Adhere to EventContext +var _ EventContext = (*EventContextV1)(nil) + +// ExtensionAs implements EventContext.ExtensionAs +func (ec EventContextV1) ExtensionAs(name string, obj interface{}) error { + name = strings.ToLower(name) + value, ok := ec.Extensions[name] + if !ok { + return fmt.Errorf("extension %q does not exist", name) + } + + // Only support *string for now. + if v, ok := obj.(*string); ok { + if *v, ok = value.(string); ok { + return nil + } + } + return fmt.Errorf("unknown extension type %T", obj) +} + +// SetExtension adds the extension 'name' with value 'value' to the CloudEvents +// context. This function fails if the name doesn't respect the regex +// ^[a-zA-Z0-9]+$ or if the name uses a reserved event context key. +func (ec *EventContextV1) SetExtension(name string, value interface{}) error { + if err := validateExtensionName(name); err != nil { + return err + } + + if _, ok := specV1Attributes[strings.ToLower(name)]; ok { + return fmt.Errorf("bad key %q: CloudEvents spec attribute MUST NOT be overwritten by extension", name) + } + + name = strings.ToLower(name) + if ec.Extensions == nil { + ec.Extensions = make(map[string]interface{}) + } + if value == nil { + delete(ec.Extensions, name) + if len(ec.Extensions) == 0 { + ec.Extensions = nil + } + return nil + } else { + v, err := types.Validate(value) // Ensure it's a legal CE attribute value + if err == nil { + ec.Extensions[name] = v + } + return err + } +} + +// Clone implements EventContextConverter.Clone +func (ec EventContextV1) Clone() EventContext { + ec1 := ec.AsV1() + ec1.Source = types.Clone(ec.Source).(types.URIRef) + if ec.Time != nil { + ec1.Time = types.Clone(ec.Time).(*types.Timestamp) + } + if ec.DataSchema != nil { + ec1.DataSchema = types.Clone(ec.DataSchema).(*types.URI) + } + ec1.Extensions = ec.cloneExtensions() + return ec1 +} + +func (ec *EventContextV1) cloneExtensions() map[string]interface{} { + old := ec.Extensions + if old == nil { + return nil + } + new := make(map[string]interface{}, len(ec.Extensions)) + for k, v := range old { + new[k] = types.Clone(v) + } + return new +} + +// AsV03 implements EventContextConverter.AsV03 +func (ec EventContextV1) AsV03() *EventContextV03 { + ret := EventContextV03{ + ID: ec.ID, + Time: ec.Time, + Type: ec.Type, + DataContentType: ec.DataContentType, + Source: types.URIRef{URL: ec.Source.URL}, + Subject: ec.Subject, + Extensions: make(map[string]interface{}), + } + + if ec.DataSchema != nil { + ret.SchemaURL = &types.URIRef{URL: ec.DataSchema.URL} + } + + if ec.Extensions != nil { + for k, v := range ec.Extensions { + k = strings.ToLower(k) + // DeprecatedDataContentEncoding was introduced in 0.3, removed in 1.0 + if strings.EqualFold(k, DataContentEncodingKey) { + etv, ok := v.(string) + if ok && etv != "" { + ret.DataContentEncoding = &etv + } + continue + } + ret.Extensions[k] = v + } + } + if len(ret.Extensions) == 0 { + ret.Extensions = nil + } + return &ret +} + +// AsV1 implements EventContextConverter.AsV1 +func (ec EventContextV1) AsV1() *EventContextV1 { + return &ec +} + +// Validate returns errors based on requirements from the CloudEvents spec. +// For more details, see https://github.com/cloudevents/spec/blob/v1.0/spec.md. +func (ec EventContextV1) Validate() ValidationError { + errors := map[string]error{} + + // id + // Type: String + // Constraints: + // REQUIRED + // MUST be a non-empty string + // MUST be unique within the scope of the producer + id := strings.TrimSpace(ec.ID) + if id == "" { + errors["id"] = fmt.Errorf("MUST be a non-empty string") + // no way to test "MUST be unique within the scope of the producer" + } + + // source + // Type: URI-reference + // Constraints: + // REQUIRED + // MUST be a non-empty URI-reference + // An absolute URI is RECOMMENDED + source := strings.TrimSpace(ec.Source.String()) + if source == "" { + errors["source"] = fmt.Errorf("REQUIRED") + } + + // type + // Type: String + // Constraints: + // REQUIRED + // MUST be a non-empty string + // SHOULD be prefixed with a reverse-DNS name. The prefixed domain dictates the organization which defines the semantics of this event type. + eventType := strings.TrimSpace(ec.Type) + if eventType == "" { + errors["type"] = fmt.Errorf("MUST be a non-empty string") + } + + // The following attributes are optional but still have validation. + + // datacontenttype + // Type: String per RFC 2046 + // Constraints: + // OPTIONAL + // If present, MUST adhere to the format specified in RFC 2046 + if ec.DataContentType != nil { + dataContentType := strings.TrimSpace(*ec.DataContentType) + if dataContentType == "" { + errors["datacontenttype"] = fmt.Errorf("if present, MUST adhere to the format specified in RFC 2046") + } else { + _, _, err := mime.ParseMediaType(dataContentType) + if err != nil { + errors["datacontenttype"] = fmt.Errorf("failed to parse RFC 2046 media type %w", err) + } + } + } + + // dataschema + // Type: URI + // Constraints: + // OPTIONAL + // If present, MUST adhere to the format specified in RFC 3986 + if ec.DataSchema != nil { + if !ec.DataSchema.Validate() { + errors["dataschema"] = fmt.Errorf("if present, MUST adhere to the format specified in RFC 3986, Section 4.3. Absolute URI") + } + } + + // subject + // Type: String + // Constraints: + // OPTIONAL + // MUST be a non-empty string + if ec.Subject != nil { + subject := strings.TrimSpace(*ec.Subject) + if subject == "" { + errors["subject"] = fmt.Errorf("if present, MUST be a non-empty string") + } + } + + // time + // Type: Timestamp + // Constraints: + // OPTIONAL + // If present, MUST adhere to the format specified in RFC 3339 + // --> no need to test this, no way to set the time without it being valid. + + if len(errors) > 0 { + return errors + } + return nil +} + +// String returns a pretty-printed representation of the EventContext. +func (ec EventContextV1) String() string { + b := strings.Builder{} + + b.WriteString("Context Attributes,\n") + + b.WriteString(" specversion: " + CloudEventsVersionV1 + "\n") + b.WriteString(" type: " + ec.Type + "\n") + b.WriteString(" source: " + ec.Source.String() + "\n") + if ec.Subject != nil { + b.WriteString(" subject: " + *ec.Subject + "\n") + } + b.WriteString(" id: " + ec.ID + "\n") + if ec.Time != nil { + b.WriteString(" time: " + ec.Time.String() + "\n") + } + if ec.DataSchema != nil { + b.WriteString(" dataschema: " + ec.DataSchema.String() + "\n") + } + if ec.DataContentType != nil { + b.WriteString(" datacontenttype: " + *ec.DataContentType + "\n") + } + + if ec.Extensions != nil && len(ec.Extensions) > 0 { + b.WriteString("Extensions,\n") + keys := make([]string, 0, len(ec.Extensions)) + for k := range ec.Extensions { + keys = append(keys, k) + } + sort.Strings(keys) + for _, key := range keys { + b.WriteString(fmt.Sprintf(" %s: %v\n", key, ec.Extensions[key])) + } + } + + return b.String() +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_reader.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_reader.go new file mode 100644 index 000000000..74f73b029 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_reader.go @@ -0,0 +1,104 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "fmt" + "strings" + "time" +) + +// GetSpecVersion implements EventContextReader.GetSpecVersion +func (ec EventContextV1) GetSpecVersion() string { + return CloudEventsVersionV1 +} + +// GetDataContentType implements EventContextReader.GetDataContentType +func (ec EventContextV1) GetDataContentType() string { + if ec.DataContentType != nil { + return *ec.DataContentType + } + return "" +} + +// GetDataMediaType implements EventContextReader.GetDataMediaType +func (ec EventContextV1) GetDataMediaType() (string, error) { + if ec.DataContentType != nil { + dct := *ec.DataContentType + i := strings.IndexRune(dct, ';') + if i == -1 { + return dct, nil + } + return strings.TrimSpace(dct[0:i]), nil + } + return "", nil +} + +// GetType implements EventContextReader.GetType +func (ec EventContextV1) GetType() string { + return ec.Type +} + +// GetSource implements EventContextReader.GetSource +func (ec EventContextV1) GetSource() string { + return ec.Source.String() +} + +// GetSubject implements EventContextReader.GetSubject +func (ec EventContextV1) GetSubject() string { + if ec.Subject != nil { + return *ec.Subject + } + return "" +} + +// GetTime implements EventContextReader.GetTime +func (ec EventContextV1) GetTime() time.Time { + if ec.Time != nil { + return ec.Time.Time + } + return time.Time{} +} + +// GetID implements EventContextReader.GetID +func (ec EventContextV1) GetID() string { + return ec.ID +} + +// GetDataSchema implements EventContextReader.GetDataSchema +func (ec EventContextV1) GetDataSchema() string { + if ec.DataSchema != nil { + return ec.DataSchema.String() + } + return "" +} + +// DeprecatedGetDataContentEncoding implements EventContextReader.DeprecatedGetDataContentEncoding +func (ec EventContextV1) DeprecatedGetDataContentEncoding() string { + return "" +} + +// GetExtensions implements EventContextReader.GetExtensions +func (ec EventContextV1) GetExtensions() map[string]interface{} { + if len(ec.Extensions) == 0 { + return nil + } + // For now, convert the extensions of v1.0 to the pre-v1.0 style. + ext := make(map[string]interface{}, len(ec.Extensions)) + for k, v := range ec.Extensions { + ext[k] = v + } + return ext +} + +// GetExtension implements EventContextReader.GetExtension +func (ec EventContextV1) GetExtension(key string) (interface{}, error) { + v, ok := caseInsensitiveSearch(key, ec.Extensions) + if !ok { + return "", fmt.Errorf("%q not found", key) + } + return v, nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_writer.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_writer.go new file mode 100644 index 000000000..5f2aca763 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_writer.go @@ -0,0 +1,97 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "errors" + "net/url" + "strings" + "time" + + "github.com/cloudevents/sdk-go/v2/types" +) + +// Adhere to EventContextWriter +var _ EventContextWriter = (*EventContextV1)(nil) + +// SetDataContentType implements EventContextWriter.SetDataContentType +func (ec *EventContextV1) SetDataContentType(ct string) error { + ct = strings.TrimSpace(ct) + if ct == "" { + ec.DataContentType = nil + } else { + ec.DataContentType = &ct + } + return nil +} + +// SetType implements EventContextWriter.SetType +func (ec *EventContextV1) SetType(t string) error { + t = strings.TrimSpace(t) + ec.Type = t + return nil +} + +// SetSource implements EventContextWriter.SetSource +func (ec *EventContextV1) SetSource(u string) error { + pu, err := url.Parse(u) + if err != nil { + return err + } + ec.Source = types.URIRef{URL: *pu} + return nil +} + +// SetSubject implements EventContextWriter.SetSubject +func (ec *EventContextV1) SetSubject(s string) error { + s = strings.TrimSpace(s) + if s == "" { + ec.Subject = nil + } else { + ec.Subject = &s + } + return nil +} + +// SetID implements EventContextWriter.SetID +func (ec *EventContextV1) SetID(id string) error { + id = strings.TrimSpace(id) + if id == "" { + return errors.New("id is required to be a non-empty string") + } + ec.ID = id + return nil +} + +// SetTime implements EventContextWriter.SetTime +func (ec *EventContextV1) SetTime(t time.Time) error { + if t.IsZero() { + ec.Time = nil + } else { + ec.Time = &types.Timestamp{Time: t} + } + return nil +} + +// SetDataSchema implements EventContextWriter.SetDataSchema +func (ec *EventContextV1) SetDataSchema(u string) error { + u = strings.TrimSpace(u) + if u == "" { + ec.DataSchema = nil + return nil + } + pu, err := url.Parse(u) + if err != nil { + return err + } + ec.DataSchema = &types.URI{URL: *pu} + return nil +} + +// DeprecatedSetDataContentEncoding implements EventContextWriter.DeprecatedSetDataContentEncoding +func (ec *EventContextV1) DeprecatedSetDataContentEncoding(e string) error { + return errors.New("deprecated: SetDataContentEncoding is not supported in v1.0 of CloudEvents") +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/extensions.go b/vendor/github.com/cloudevents/sdk-go/v2/event/extensions.go new file mode 100644 index 000000000..72d0e757a --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/extensions.go @@ -0,0 +1,57 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "errors" + "fmt" + "strings" +) + +const ( + // DataContentEncodingKey is the key to DeprecatedDataContentEncoding for versions that do not support data content encoding + // directly. + DataContentEncodingKey = "datacontentencoding" +) + +var ( + // This determines the behavior of validateExtensionName(). For MaxExtensionNameLength > 0, an error will be returned, + // if len(key) > MaxExtensionNameLength + MaxExtensionNameLength = 0 +) + +func caseInsensitiveSearch(key string, space map[string]interface{}) (interface{}, bool) { + lkey := strings.ToLower(key) + for k, v := range space { + if strings.EqualFold(lkey, strings.ToLower(k)) { + return v, true + } + } + return nil, false +} + +func IsExtensionNameValid(key string) bool { + if err := validateExtensionName(key); err != nil { + return false + } + return true +} + +func validateExtensionName(key string) error { + if len(key) < 1 { + return errors.New("bad key, CloudEvents attribute names MUST NOT be empty") + } + if MaxExtensionNameLength > 0 && len(key) > MaxExtensionNameLength { + return fmt.Errorf("bad key, CloudEvents attribute name '%s' is longer than %d characters", key, MaxExtensionNameLength) + } + + for _, c := range key { + if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9')) { + return errors.New("bad key, CloudEvents attribute names MUST consist of lower-case letters ('a' to 'z'), upper-case letters ('A' to 'Z') or digits ('0' to '9') from the ASCII character set") + } + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/doc.go new file mode 100644 index 000000000..f826a1841 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/doc.go @@ -0,0 +1,26 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package protocol defines interfaces to decouple the client package +from protocol implementations. + +Most event sender and receiver applications should not use this +package, they should use the client package. This package is for +infrastructure developers implementing new transports, or intermediary +components like importers, channels or brokers. + +Available protocols: + +* HTTP (using net/http) +* Kafka (using github.com/Shopify/sarama) +* AMQP (using pack.ag/amqp) +* Go Channels +* Nats +* Nats Streaming (stan) +* Google PubSub + +*/ +package protocol diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/error.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/error.go new file mode 100644 index 000000000..a3f335261 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/error.go @@ -0,0 +1,42 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package protocol + +import "fmt" + +// ErrTransportMessageConversion is an error produced when the transport +// message can not be converted. +type ErrTransportMessageConversion struct { + fatal bool + handled bool + transport string + message string +} + +// NewErrTransportMessageConversion makes a new ErrTransportMessageConversion. +func NewErrTransportMessageConversion(transport, message string, handled, fatal bool) *ErrTransportMessageConversion { + return &ErrTransportMessageConversion{ + transport: transport, + message: message, + handled: handled, + fatal: fatal, + } +} + +// IsFatal reports if this error should be considered fatal. +func (e *ErrTransportMessageConversion) IsFatal() bool { + return e.fatal +} + +// Handled reports if this error should be considered accepted and no further action. +func (e *ErrTransportMessageConversion) Handled() bool { + return e.handled +} + +// Error implements error.Error +func (e *ErrTransportMessageConversion) Error() string { + return fmt.Sprintf("transport %s failed to convert message: %s", e.transport, e.message) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/abuse_protection.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/abuse_protection.go new file mode 100644 index 000000000..48f03fb6c --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/abuse_protection.go @@ -0,0 +1,128 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "context" + cecontext "github.com/cloudevents/sdk-go/v2/context" + "go.uber.org/zap" + "net/http" + "strconv" + "strings" + "time" +) + +type WebhookConfig struct { + AllowedMethods []string // defaults to POST + AllowedRate *int + AutoACKCallback bool + AllowedOrigins []string +} + +const ( + DefaultAllowedRate = 1000 + DefaultTimeout = time.Second * 600 +) + +// TODO: implement rate limiting. +// Throttling is indicated by requests being rejected using HTTP status code 429 Too Many Requests. +// TODO: use this if Webhook Request Origin has been turned on. +// Inbound requests should be rejected if Allowed Origins is required by SDK. + +func (p *Protocol) OptionsHandler(rw http.ResponseWriter, req *http.Request) { + if req.Method != http.MethodOptions || p.WebhookConfig == nil { + rw.WriteHeader(http.StatusMethodNotAllowed) + return + } + + headers := make(http.Header) + + // The spec does not say we need to validate the origin, just the request origin. + // After the handshake, we will validate the origin. + if origin, ok := p.ValidateRequestOrigin(req); !ok { + rw.WriteHeader(http.StatusBadRequest) + return + } else { + headers.Set("WebHook-Allowed-Origin", origin) + } + + allowedRateRequired := false + if _, ok := req.Header[http.CanonicalHeaderKey("WebHook-Request-Rate")]; ok { + // must send WebHook-Allowed-Rate + allowedRateRequired = true + } + + if p.WebhookConfig.AllowedRate != nil { + headers.Set("WebHook-Allowed-Rate", strconv.Itoa(*p.WebhookConfig.AllowedRate)) + } else if allowedRateRequired { + headers.Set("WebHook-Allowed-Rate", strconv.Itoa(DefaultAllowedRate)) + } + + if len(p.WebhookConfig.AllowedMethods) > 0 { + headers.Set("Allow", strings.Join(p.WebhookConfig.AllowedMethods, ", ")) + } else { + headers.Set("Allow", http.MethodPost) + } + + cb := req.Header.Get("WebHook-Request-Callback") + if cb != "" { + if p.WebhookConfig.AutoACKCallback { + go func() { + reqAck, err := http.NewRequest(http.MethodPost, cb, nil) + if err != nil { + cecontext.LoggerFrom(req.Context()).Errorw("OPTIONS handler failed to create http request attempting to ack callback.", zap.Error(err), zap.String("callback", cb)) + return + } + + // Write out the headers. + for k := range headers { + reqAck.Header.Set(k, headers.Get(k)) + } + + _, err = http.DefaultClient.Do(reqAck) + if err != nil { + cecontext.LoggerFrom(req.Context()).Errorw("OPTIONS handler failed to ack callback.", zap.Error(err), zap.String("callback", cb)) + return + } + }() + return + } else { + cecontext.LoggerFrom(req.Context()).Infof("ACTION REQUIRED: Please validate web hook request callback: %q", cb) + // TODO: what to do pending https://github.com/cloudevents/spec/issues/617 + return + } + } + + // Write out the headers. + for k := range headers { + rw.Header().Set(k, headers.Get(k)) + } +} + +func (p *Protocol) ValidateRequestOrigin(req *http.Request) (string, bool) { + return p.validateOrigin(req.Header.Get("WebHook-Request-Origin")) +} + +func (p *Protocol) ValidateOrigin(req *http.Request) (string, bool) { + return p.validateOrigin(req.Header.Get("Origin")) +} + +func (p *Protocol) validateOrigin(ro string) (string, bool) { + cecontext.LoggerFrom(context.TODO()).Infow("Validating origin.", zap.String("origin", ro)) + + for _, ao := range p.WebhookConfig.AllowedOrigins { + if ao == "*" { + return ao, true + } + // TODO: it is not clear what the rules for allowed hosts are. + // Need to find docs for this. For now, test for prefix. + if strings.HasPrefix(ro, ao) { + return ao, true + } + } + + return ro, false +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/context.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/context.go new file mode 100644 index 000000000..0eec396a1 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/context.go @@ -0,0 +1,48 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "context" + + nethttp "net/http" + "net/url" +) + +type requestKey struct{} + +// RequestData holds the http.Request information subset that can be +// used to retrieve HTTP information for an incoming CloudEvent. +type RequestData struct { + URL *url.URL + Header nethttp.Header + RemoteAddr string + Host string +} + +// WithRequestDataAtContext uses the http.Request to add RequestData +// information to the Context. +func WithRequestDataAtContext(ctx context.Context, r *nethttp.Request) context.Context { + if r == nil { + return ctx + } + + return context.WithValue(ctx, requestKey{}, &RequestData{ + URL: r.URL, + Header: r.Header, + RemoteAddr: r.RemoteAddr, + Host: r.Host, + }) +} + +// RequestDataFromContext retrieves RequestData from the Context. +// If not set nil is returned. +func RequestDataFromContext(ctx context.Context) *RequestData { + if req := ctx.Value(requestKey{}); req != nil { + return req.(*RequestData) + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/doc.go new file mode 100644 index 000000000..3428ea387 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/doc.go @@ -0,0 +1,9 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package http implements an HTTP binding using net/http module +*/ +package http diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/headers.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/headers.go new file mode 100644 index 000000000..055a5c4dd --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/headers.go @@ -0,0 +1,55 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "context" + "github.com/cloudevents/sdk-go/v2/binding" + "net/http" + "net/textproto" + "strings" + "unicode" + + "github.com/cloudevents/sdk-go/v2/binding/spec" +) + +var attributeHeadersMapping map[string]string + +type customHeaderKey int + +const ( + headerKey customHeaderKey = iota +) + +func init() { + attributeHeadersMapping = make(map[string]string) + for _, v := range specs.Versions() { + for _, a := range v.Attributes() { + if a.Kind() == spec.DataContentType { + attributeHeadersMapping[a.Name()] = ContentType + } else { + attributeHeadersMapping[a.Name()] = textproto.CanonicalMIMEHeaderKey(prefix + a.Name()) + } + } + } +} + +func extNameToHeaderName(name string) string { + var b strings.Builder + b.Grow(len(name) + len(prefix)) + b.WriteString(prefix) + b.WriteRune(unicode.ToUpper(rune(name[0]))) + b.WriteString(name[1:]) + return b.String() +} + +func HeaderFrom(ctx context.Context) http.Header { + return binding.GetOrDefaultFromCtx(ctx, headerKey, make(http.Header)).(http.Header) +} + +func WithCustomHeader(ctx context.Context, header http.Header) context.Context { + return context.WithValue(ctx, headerKey, header) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/message.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/message.go new file mode 100644 index 000000000..7a7c36f9b --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/message.go @@ -0,0 +1,175 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "context" + "io" + nethttp "net/http" + "net/textproto" + "strings" + "unicode" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/binding/format" + "github.com/cloudevents/sdk-go/v2/binding/spec" +) + +const prefix = "Ce-" + +var specs = spec.WithPrefixMatchExact( + func(s string) string { + if s == "datacontenttype" { + return "Content-Type" + } else { + return textproto.CanonicalMIMEHeaderKey("Ce-" + s) + } + }, + "Ce-", +) + +const ContentType = "Content-Type" +const ContentLength = "Content-Length" + +// Message holds the Header and Body of a HTTP Request or Response. +// The Message instance *must* be constructed from NewMessage function. +// This message *cannot* be read several times. In order to read it more times, buffer it using binding/buffering methods +type Message struct { + Header nethttp.Header + BodyReader io.ReadCloser + OnFinish func(error) error + + ctx context.Context + + format format.Format + version spec.Version +} + +// Check if http.Message implements binding.Message +var _ binding.Message = (*Message)(nil) +var _ binding.MessageContext = (*Message)(nil) +var _ binding.MessageMetadataReader = (*Message)(nil) + +// NewMessage returns a binding.Message with header and data. +// The returned binding.Message *cannot* be read several times. In order to read it more times, buffer it using binding/buffering methods +func NewMessage(header nethttp.Header, body io.ReadCloser) *Message { + m := Message{Header: header} + if body != nil { + m.BodyReader = body + } + if m.format = format.Lookup(header.Get(ContentType)); m.format == nil { + m.version = specs.Version(m.Header.Get(specs.PrefixedSpecVersionName())) + } + return &m +} + +// NewMessageFromHttpRequest returns a binding.Message with header and data. +// The returned binding.Message *cannot* be read several times. In order to read it more times, buffer it using binding/buffering methods +func NewMessageFromHttpRequest(req *nethttp.Request) *Message { + if req == nil { + return nil + } + message := NewMessage(req.Header, req.Body) + message.ctx = req.Context() + return message +} + +// NewMessageFromHttpResponse returns a binding.Message with header and data. +// The returned binding.Message *cannot* be read several times. In order to read it more times, buffer it using binding/buffering methods +func NewMessageFromHttpResponse(resp *nethttp.Response) *Message { + if resp == nil { + return nil + } + msg := NewMessage(resp.Header, resp.Body) + return msg +} + +func (m *Message) ReadEncoding() binding.Encoding { + if m.version != nil { + return binding.EncodingBinary + } + if m.format != nil { + if m.format == format.JSONBatch { + return binding.EncodingBatch + } + return binding.EncodingStructured + } + return binding.EncodingUnknown +} + +func (m *Message) ReadStructured(ctx context.Context, encoder binding.StructuredWriter) error { + if m.format == nil { + return binding.ErrNotStructured + } else { + return encoder.SetStructuredEvent(ctx, m.format, m.BodyReader) + } +} + +func (m *Message) ReadBinary(ctx context.Context, encoder binding.BinaryWriter) (err error) { + if m.version == nil { + return binding.ErrNotBinary + } + + for k, v := range m.Header { + attr := m.version.Attribute(k) + if attr != nil { + err = encoder.SetAttribute(attr, v[0]) + } else if strings.HasPrefix(k, prefix) { + // Trim Prefix + To lower + var b strings.Builder + b.Grow(len(k) - len(prefix)) + b.WriteRune(unicode.ToLower(rune(k[len(prefix)]))) + b.WriteString(k[len(prefix)+1:]) + err = encoder.SetExtension(b.String(), v[0]) + } + if err != nil { + return err + } + } + + if m.BodyReader != nil { + err = encoder.SetData(m.BodyReader) + if err != nil { + return err + } + } + + return +} + +func (m *Message) GetAttribute(k spec.Kind) (spec.Attribute, interface{}) { + attr := m.version.AttributeFromKind(k) + if attr != nil { + h := m.Header[attributeHeadersMapping[attr.Name()]] + if h != nil { + return attr, h[0] + } + return attr, nil + } + return nil, nil +} + +func (m *Message) GetExtension(name string) interface{} { + h := m.Header[extNameToHeaderName(name)] + if h != nil { + return h[0] + } + return nil +} + +func (m *Message) Context() context.Context { + return m.ctx +} + +func (m *Message) Finish(err error) error { + if m.BodyReader != nil { + _ = m.BodyReader.Close() + } + if m.OnFinish != nil { + return m.OnFinish(err) + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/options.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/options.go new file mode 100644 index 000000000..5e400905a --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/options.go @@ -0,0 +1,301 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "fmt" + "net" + nethttp "net/http" + "net/url" + "strings" + "time" +) + +// Option is the function signature required to be considered an http.Option. +type Option func(*Protocol) error + +// WithTarget sets the outbound recipient of cloudevents when using an HTTP +// request. +func WithTarget(targetUrl string) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http target option can not set nil protocol") + } + targetUrl = strings.TrimSpace(targetUrl) + if targetUrl != "" { + var err error + var target *url.URL + target, err = url.Parse(targetUrl) + if err != nil { + return fmt.Errorf("http target option failed to parse target url: %s", err.Error()) + } + + p.Target = target + + if p.RequestTemplate == nil { + p.RequestTemplate = &nethttp.Request{ + Method: nethttp.MethodPost, + } + } + p.RequestTemplate.URL = target + + return nil + } + return fmt.Errorf("http target option was empty string") + } +} + +// WithHeader sets an additional default outbound header for all cloudevents +// when using an HTTP request. +func WithHeader(key, value string) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http header option can not set nil protocol") + } + key = strings.TrimSpace(key) + if key != "" { + if p.RequestTemplate == nil { + p.RequestTemplate = &nethttp.Request{ + Method: nethttp.MethodPost, + } + } + if p.RequestTemplate.Header == nil { + p.RequestTemplate.Header = nethttp.Header{} + } + p.RequestTemplate.Header.Add(key, value) + return nil + } + return fmt.Errorf("http header option was empty string") + } +} + +// WithShutdownTimeout sets the shutdown timeout when the http server is being shutdown. +func WithShutdownTimeout(timeout time.Duration) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http shutdown timeout option can not set nil protocol") + } + p.ShutdownTimeout = timeout + return nil + } +} + +func checkListen(p *Protocol, prefix string) error { + switch { + case p.listener.Load() != nil: + return fmt.Errorf("error setting %v: listener already set", prefix) + } + return nil +} + +// WithPort sets the listening port for StartReceiver. +// Only one of WithListener or WithPort is allowed. +func WithPort(port int) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http port option can not set nil protocol") + } + if port < 0 || port > 65535 { + return fmt.Errorf("http port option was given an invalid port: %d", port) + } + if err := checkListen(p, "http port option"); err != nil { + return err + } + p.Port = port + return nil + } +} + +// WithListener sets the listener for StartReceiver. +// Only one of WithListener or WithPort is allowed. +func WithListener(l net.Listener) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http listener option can not set nil protocol") + } + if err := checkListen(p, "http listener"); err != nil { + return err + } + p.listener.Store(l) + return nil + } +} + +// WithPath sets the path to receive cloudevents on for HTTP transports. +func WithPath(path string) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http path option can not set nil protocol") + } + path = strings.TrimSpace(path) + if len(path) == 0 { + return fmt.Errorf("http path option was given an invalid path: %q", path) + } + p.Path = path + return nil + } +} + +// WithMethod sets the HTTP verb (GET, POST, PUT, etc.) to use +// when using an HTTP request. +func WithMethod(method string) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http method option can not set nil protocol") + } + method = strings.TrimSpace(method) + if method != "" { + if p.RequestTemplate == nil { + p.RequestTemplate = &nethttp.Request{} + } + p.RequestTemplate.Method = method + return nil + } + return fmt.Errorf("http method option was empty string") + } +} + +// +// Middleware is a function that takes an existing http.Handler and wraps it in middleware, +// returning the wrapped http.Handler. +type Middleware func(next nethttp.Handler) nethttp.Handler + +// WithMiddleware adds an HTTP middleware to the transport. It may be specified multiple times. +// Middleware is applied to everything before it. For example +// `NewClient(WithMiddleware(foo), WithMiddleware(bar))` would result in `bar(foo(original))`. +func WithMiddleware(middleware Middleware) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http middleware option can not set nil protocol") + } + p.middleware = append(p.middleware, middleware) + return nil + } +} + +// WithRoundTripper sets the HTTP RoundTripper. +func WithRoundTripper(roundTripper nethttp.RoundTripper) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http round tripper option can not set nil protocol") + } + p.roundTripper = roundTripper + return nil + } +} + +// WithRoundTripperDecorator decorates the default HTTP RoundTripper chosen. +func WithRoundTripperDecorator(decorator func(roundTripper nethttp.RoundTripper) nethttp.RoundTripper) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http round tripper option can not set nil protocol") + } + if p.roundTripper == nil { + if p.Client == nil { + p.roundTripper = nethttp.DefaultTransport + } else { + p.roundTripper = p.Client.Transport + } + } + p.roundTripper = decorator(p.roundTripper) + return nil + } +} + +// WithClient sets the protocol client +func WithClient(client nethttp.Client) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("client option can not set nil protocol") + } + p.Client = &client + return nil + } +} + +// WithGetHandlerFunc sets the http GET handler func +func WithGetHandlerFunc(fn nethttp.HandlerFunc) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http GET handler func can not set nil protocol") + } + p.GetHandlerFn = fn + return nil + } +} + +// WithOptionsHandlerFunc sets the http OPTIONS handler func +func WithOptionsHandlerFunc(fn nethttp.HandlerFunc) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http OPTIONS handler func can not set nil protocol") + } + p.OptionsHandlerFn = fn + return nil + } +} + +// WithDefaultOptionsHandlerFunc sets the options handler to be the built in handler and configures the options. +// methods: the supported methods reported to OPTIONS caller. +// rate: the rate limit reported to OPTIONS caller. +// origins: the prefix of the accepted origins, or "*". +// callback: preform the callback to ACK the OPTIONS request. +func WithDefaultOptionsHandlerFunc(methods []string, rate int, origins []string, callback bool) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http OPTIONS handler func can not set nil protocol") + } + p.OptionsHandlerFn = p.OptionsHandler + p.WebhookConfig = &WebhookConfig{ + AllowedMethods: methods, + AllowedRate: &rate, + AllowedOrigins: origins, + AutoACKCallback: callback, + } + return nil + } +} + +// IsRetriable is a custom function that can be used to override the +// default retriable status codes. +type IsRetriable func(statusCode int) bool + +// WithIsRetriableFunc sets the function that gets called to determine if an +// error should be retried. If not set, the defaultIsRetriableFunc is used. +func WithIsRetriableFunc(isRetriable IsRetriable) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("isRetriable handler func can not set nil protocol") + } + if isRetriable == nil { + return fmt.Errorf("isRetriable handler can not be nil") + } + p.isRetriableFunc = isRetriable + return nil + } +} + +func WithRateLimiter(rl RateLimiter) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http OPTIONS handler func can not set nil protocol") + } + p.limiter = rl + return nil + } +} + +// WithRequestDataAtContextMiddleware adds to the Context RequestData. +// This enables a user's dispatch handler to inspect HTTP request information by +// retrieving it from the Context. +func WithRequestDataAtContextMiddleware() Option { + return WithMiddleware(func(next nethttp.Handler) nethttp.Handler { + return nethttp.HandlerFunc(func(w nethttp.ResponseWriter, r *nethttp.Request) { + ctx := WithRequestDataAtContext(r.Context(), r) + next.ServeHTTP(w, r.WithContext(ctx)) + }) + }) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol.go new file mode 100644 index 000000000..dba6fd7ba --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol.go @@ -0,0 +1,408 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/cloudevents/sdk-go/v2/binding" + cecontext "github.com/cloudevents/sdk-go/v2/context" + "github.com/cloudevents/sdk-go/v2/event" + "github.com/cloudevents/sdk-go/v2/protocol" +) + +const ( + // DefaultShutdownTimeout defines the default timeout given to the http.Server when calling Shutdown. + DefaultShutdownTimeout = time.Minute * 1 +) + +type msgErr struct { + msg *Message + respFn protocol.ResponseFn + err error +} + +// Default error codes that we retry on - string isn't used, it's just there so +// people know what each error code's title is. +// To modify this use Option +var defaultRetriableErrors = map[int]string{ + 404: "Not Found", + 413: "Payload Too Large", + 425: "Too Early", + 429: "Too Many Requests", + 502: "Bad Gateway", + 503: "Service Unavailable", + 504: "Gateway Timeout", +} + +// Protocol acts as both a http client and a http handler. +type Protocol struct { + Target *url.URL + RequestTemplate *http.Request + Client *http.Client + incoming chan msgErr + + // OptionsHandlerFn handles the OPTIONS method requests and is intended to + // implement the abuse protection spec: + // https://github.com/cloudevents/spec/blob/v1.0/http-webhook.md#4-abuse-protection + OptionsHandlerFn http.HandlerFunc + WebhookConfig *WebhookConfig + + GetHandlerFn http.HandlerFunc + DeleteHandlerFn http.HandlerFunc + + // To support Opener: + + // ShutdownTimeout defines the timeout given to the http.Server when calling Shutdown. + // If 0, DefaultShutdownTimeout is used. + ShutdownTimeout time.Duration + + // Port is the port configured to bind the receiver to. Defaults to 8080. + // If you want to know the effective port you're listening to, use GetListeningPort() + Port int + // Path is the path to bind the receiver to. Defaults to "/". + Path string + + // Receive Mutex + reMu sync.Mutex + // Handler is the handler the http Server will use. Use this to reuse the + // http server. If nil, the Protocol will create a one. + Handler *http.ServeMux + + listener atomic.Value + roundTripper http.RoundTripper + server *http.Server + handlerRegistered bool + middleware []Middleware + limiter RateLimiter + + isRetriableFunc IsRetriable +} + +func New(opts ...Option) (*Protocol, error) { + p := &Protocol{ + incoming: make(chan msgErr), + Port: -1, + } + if err := p.applyOptions(opts...); err != nil { + return nil, err + } + + if p.Client == nil { + p.Client = http.DefaultClient + } + + if p.roundTripper != nil { + p.Client.Transport = p.roundTripper + } + + if p.ShutdownTimeout == 0 { + p.ShutdownTimeout = DefaultShutdownTimeout + } + + if p.isRetriableFunc == nil { + p.isRetriableFunc = defaultIsRetriableFunc + } + + if p.limiter == nil { + p.limiter = noOpLimiter{} + } + + return p, nil +} + +// NewObserved creates an HTTP protocol with trace propagating middleware. +// Deprecated: now this behaves like New and it will be removed in future releases, +// setup the http observed protocol using the opencensus separate module NewObservedHttp +var NewObserved = New + +func (p *Protocol) applyOptions(opts ...Option) error { + for _, fn := range opts { + if err := fn(p); err != nil { + return err + } + } + return nil +} + +// Send implements binding.Sender +func (p *Protocol) Send(ctx context.Context, m binding.Message, transformers ...binding.Transformer) error { + if ctx == nil { + return fmt.Errorf("nil Context") + } else if m == nil { + return fmt.Errorf("nil Message") + } + + msg, err := p.Request(ctx, m, transformers...) + if msg != nil { + defer func() { _ = msg.Finish(err) }() + } + if err != nil && !protocol.IsACK(err) { + var res *Result + if protocol.ResultAs(err, &res) { + if message, ok := msg.(*Message); ok { + buf := new(bytes.Buffer) + buf.ReadFrom(message.BodyReader) + errorStr := buf.String() + // If the error is not wrapped, then append the original error string. + if og, ok := err.(*Result); ok { + og.Format = og.Format + "%s" + og.Args = append(og.Args, errorStr) + err = og + } else { + err = NewResult(res.StatusCode, "%w: %s", err, errorStr) + } + } + } + } + return err +} + +// Request implements binding.Requester +func (p *Protocol) Request(ctx context.Context, m binding.Message, transformers ...binding.Transformer) (binding.Message, error) { + if ctx == nil { + return nil, fmt.Errorf("nil Context") + } else if m == nil { + return nil, fmt.Errorf("nil Message") + } + + var err error + defer func() { _ = m.Finish(err) }() + + req := p.makeRequest(ctx) + + if p.Client == nil || req == nil || req.URL == nil { + return nil, fmt.Errorf("not initialized: %#v", p) + } + + if err = WriteRequest(ctx, m, req, transformers...); err != nil { + return nil, err + } + + return p.do(ctx, req) +} + +func (p *Protocol) makeRequest(ctx context.Context) *http.Request { + req := &http.Request{ + Method: http.MethodPost, + Header: HeaderFrom(ctx), + } + + if p.RequestTemplate != nil { + req.Method = p.RequestTemplate.Method + req.URL = p.RequestTemplate.URL + req.Close = p.RequestTemplate.Close + req.Host = p.RequestTemplate.Host + copyHeadersEnsure(p.RequestTemplate.Header, &req.Header) + } + + if p.Target != nil { + req.URL = p.Target + } + + // Override the default request with target from context. + if target := cecontext.TargetFrom(ctx); target != nil { + req.URL = target + } + return req.WithContext(ctx) +} + +// Ensure to is a non-nil map before copying +func copyHeadersEnsure(from http.Header, to *http.Header) { + if len(from) > 0 { + if *to == nil { + *to = http.Header{} + } + copyHeaders(from, *to) + } +} + +func copyHeaders(from, to http.Header) { + if from == nil || to == nil { + return + } + for header, values := range from { + for _, value := range values { + to.Add(header, value) + } + } +} + +// Receive the next incoming HTTP request as a CloudEvent. +// Returns non-nil error if the incoming HTTP request fails to parse as a CloudEvent +// Returns io.EOF if the receiver is closed. +func (p *Protocol) Receive(ctx context.Context) (binding.Message, error) { + if ctx == nil { + return nil, fmt.Errorf("nil Context") + } + + msg, fn, err := p.Respond(ctx) + // No-op the response when finish is invoked. + if msg != nil { + return binding.WithFinish(msg, func(err error) { + if fn != nil { + _ = fn(ctx, nil, nil) + } + }), err + } else { + return nil, err + } +} + +// Respond receives the next incoming HTTP request as a CloudEvent and waits +// for the response callback to invoked before continuing. +// Returns non-nil error if the incoming HTTP request fails to parse as a CloudEvent +// Returns io.EOF if the receiver is closed. +func (p *Protocol) Respond(ctx context.Context) (binding.Message, protocol.ResponseFn, error) { + if ctx == nil { + return nil, nil, fmt.Errorf("nil Context") + } + + select { + case in, ok := <-p.incoming: + if !ok { + return nil, nil, io.EOF + } + + if in.msg == nil { + return nil, in.respFn, in.err + } + return in.msg, in.respFn, in.err + + case <-ctx.Done(): + return nil, nil, io.EOF + } +} + +// ServeHTTP implements http.Handler. +// Blocks until ResponseFn is invoked. +func (p *Protocol) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + // always apply limiter first using req context + ok, reset, err := p.limiter.Allow(req.Context(), req) + if err != nil { + p.incoming <- msgErr{msg: nil, err: fmt.Errorf("unable to acquire rate limit token: %w", err)} + rw.WriteHeader(http.StatusInternalServerError) + return + } + + if !ok { + rw.Header().Add("Retry-After", strconv.Itoa(int(reset))) + http.Error(rw, "limit exceeded", 429) + return + } + + // Filter the GET style methods: + switch req.Method { + case http.MethodOptions: + if p.OptionsHandlerFn == nil { + rw.WriteHeader(http.StatusMethodNotAllowed) + return + } + p.OptionsHandlerFn(rw, req) + return + + case http.MethodGet: + if p.GetHandlerFn == nil { + rw.WriteHeader(http.StatusMethodNotAllowed) + return + } + p.GetHandlerFn(rw, req) + return + + case http.MethodDelete: + if p.DeleteHandlerFn == nil { + rw.WriteHeader(http.StatusMethodNotAllowed) + return + } + p.DeleteHandlerFn(rw, req) + return + } + + m := NewMessageFromHttpRequest(req) + if m == nil { + // Should never get here unless ServeHTTP is called directly. + p.incoming <- msgErr{msg: nil, err: binding.ErrUnknownEncoding} + rw.WriteHeader(http.StatusBadRequest) + return // if there was no message, return. + } + + var finishErr error + m.OnFinish = func(err error) error { + finishErr = err + return nil + } + + wg := sync.WaitGroup{} + wg.Add(1) + var fn protocol.ResponseFn = func(ctx context.Context, respMsg binding.Message, res protocol.Result, transformers ...binding.Transformer) error { + // Unblock the ServeHTTP after the reply is written + defer func() { + wg.Done() + }() + + if finishErr != nil { + http.Error(rw, fmt.Sprintf("Cannot forward CloudEvent: %s", finishErr), http.StatusInternalServerError) + return finishErr + } + + status := http.StatusOK + var errMsg string + if res != nil { + var result *Result + switch { + case protocol.ResultAs(res, &result): + if result.StatusCode > 100 && result.StatusCode < 600 { + status = result.StatusCode + } + errMsg = fmt.Errorf(result.Format, result.Args...).Error() + case !protocol.IsACK(res): + // Map client errors to http status code + validationError := event.ValidationError{} + if errors.As(res, &validationError) { + status = http.StatusBadRequest + rw.Header().Set("content-type", "text/plain") + rw.WriteHeader(status) + _, _ = rw.Write([]byte(validationError.Error())) + return validationError + } else if errors.Is(res, binding.ErrUnknownEncoding) { + status = http.StatusUnsupportedMediaType + } else { + status = http.StatusInternalServerError + } + } + } + + if respMsg != nil { + err := WriteResponseWriter(ctx, respMsg, status, rw, transformers...) + return respMsg.Finish(err) + } + + rw.WriteHeader(status) + if _, err := rw.Write([]byte(errMsg)); err != nil { + return err + } + return nil + } + + p.incoming <- msgErr{msg: m, respFn: fn} // Send to Request + // Block until ResponseFn is invoked + wg.Wait() +} + +func defaultIsRetriableFunc(sc int) bool { + _, ok := defaultRetriableErrors[sc] + return ok +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_lifecycle.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_lifecycle.go new file mode 100644 index 000000000..04ef96915 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_lifecycle.go @@ -0,0 +1,143 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "context" + "fmt" + "net" + "net/http" + "strings" + + "github.com/cloudevents/sdk-go/v2/protocol" +) + +var _ protocol.Opener = (*Protocol)(nil) + +func (p *Protocol) OpenInbound(ctx context.Context) error { + p.reMu.Lock() + defer p.reMu.Unlock() + + if p.Handler == nil { + p.Handler = http.NewServeMux() + } + + if !p.handlerRegistered { + // handler.Handle might panic if the user tries to use the same path as the sdk. + p.Handler.Handle(p.GetPath(), p) + p.handlerRegistered = true + } + + // After listener is invok + listener, err := p.listen() + if err != nil { + return err + } + + p.server = &http.Server{ + Addr: listener.Addr().String(), + Handler: attachMiddleware(p.Handler, p.middleware), + ReadTimeout: DefaultTimeout, + WriteTimeout: DefaultTimeout, + } + + // Shutdown + defer func() { + _ = p.server.Close() + p.server = nil + }() + + errChan := make(chan error) + go func() { + errChan <- p.server.Serve(listener) + }() + + // wait for the server to return or ctx.Done(). + select { + case <-ctx.Done(): + // Try a graceful shutdown. + ctx, cancel := context.WithTimeout(context.Background(), p.ShutdownTimeout) + defer cancel() + + shdwnErr := p.server.Shutdown(ctx) + if shdwnErr != nil { + shdwnErr = fmt.Errorf("shutting down HTTP server: %w", shdwnErr) + } + + // Wait for server goroutine to exit + rntmErr := <-errChan + if rntmErr != nil && rntmErr != http.ErrServerClosed { + rntmErr = fmt.Errorf("server failed during shutdown: %w", rntmErr) + + if shdwnErr != nil { + return fmt.Errorf("combined error during shutdown of HTTP server: %w, %v", + shdwnErr, rntmErr) + } + + return rntmErr + } + + return shdwnErr + + case err := <-errChan: + if err != nil { + return fmt.Errorf("during runtime of HTTP server: %w", err) + } + return nil + } +} + +// GetListeningPort returns the listening port. +// Returns -1 if it's not listening. +func (p *Protocol) GetListeningPort() int { + if listener := p.listener.Load(); listener != nil { + if tcpAddr, ok := listener.(net.Listener).Addr().(*net.TCPAddr); ok { + return tcpAddr.Port + } + } + return -1 +} + +// listen if not already listening, update t.Port +func (p *Protocol) listen() (net.Listener, error) { + if p.listener.Load() == nil { + port := 8080 + if p.Port != -1 { + port = p.Port + if port < 0 || port > 65535 { + return nil, fmt.Errorf("invalid port %d", port) + } + } + var err error + var listener net.Listener + if listener, err = net.Listen("tcp", fmt.Sprintf(":%d", port)); err != nil { + return nil, err + } + p.listener.Store(listener) + return listener, nil + } + return p.listener.Load().(net.Listener), nil +} + +// GetPath returns the path the transport is hosted on. If the path is '/', +// the transport will handle requests on any URI. To discover the true path +// a request was received on, inspect the context from Receive(cxt, ...) with +// TransportContextFrom(ctx). +func (p *Protocol) GetPath() string { + path := strings.TrimSpace(p.Path) + if len(path) > 0 { + return path + } + return "/" // default +} + +// attachMiddleware attaches the HTTP middleware to the specified handler. +func attachMiddleware(h http.Handler, middleware []Middleware) http.Handler { + for _, m := range middleware { + h = m(h) + } + return h +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_rate.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_rate.go new file mode 100644 index 000000000..9c4c10a29 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_rate.go @@ -0,0 +1,34 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "context" + "net/http" +) + +type RateLimiter interface { + // Allow attempts to take one token from the rate limiter for the specified + // request. It returns ok when this operation was successful. In case ok is + // false, reset will indicate the time in seconds when it is safe to perform + // another attempt. An error is returned when this operation failed, e.g. due to + // a backend error. + Allow(ctx context.Context, r *http.Request) (ok bool, reset uint64, err error) + // Close terminates rate limiter and cleans up any data structures or + // connections that may remain open. After a store is stopped, Take() should + // always return zero values. + Close(ctx context.Context) error +} + +type noOpLimiter struct{} + +func (n noOpLimiter) Allow(ctx context.Context, r *http.Request) (bool, uint64, error) { + return true, 0, nil +} + +func (n noOpLimiter) Close(ctx context.Context) error { + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_retry.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_retry.go new file mode 100644 index 000000000..71e7346f3 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_retry.go @@ -0,0 +1,145 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "bytes" + "context" + "errors" + "io" + "io/ioutil" + "net/http" + "net/url" + "time" + + "go.uber.org/zap" + + "github.com/cloudevents/sdk-go/v2/binding" + cecontext "github.com/cloudevents/sdk-go/v2/context" + "github.com/cloudevents/sdk-go/v2/protocol" +) + +func (p *Protocol) do(ctx context.Context, req *http.Request) (binding.Message, error) { + params := cecontext.RetriesFrom(ctx) + + switch params.Strategy { + case cecontext.BackoffStrategyConstant, cecontext.BackoffStrategyLinear, cecontext.BackoffStrategyExponential: + return p.doWithRetry(ctx, params, req) + case cecontext.BackoffStrategyNone: + fallthrough + default: + return p.doOnce(req) + } +} + +func (p *Protocol) doOnce(req *http.Request) (binding.Message, protocol.Result) { + resp, err := p.Client.Do(req) + if err != nil { + return nil, protocol.NewReceipt(false, "%w", err) + } + + var result protocol.Result + if resp.StatusCode/100 == 2 { + result = protocol.ResultACK + } else { + result = protocol.ResultNACK + } + + return NewMessage(resp.Header, resp.Body), NewResult(resp.StatusCode, "%w", result) +} + +func (p *Protocol) doWithRetry(ctx context.Context, params *cecontext.RetryParams, req *http.Request) (binding.Message, error) { + then := time.Now() + retry := 0 + results := make([]protocol.Result, 0) + + var ( + body []byte + err error + ) + + if req != nil && req.Body != nil { + defer func() { + if err = req.Body.Close(); err != nil { + cecontext.LoggerFrom(ctx).Warnw("could not close request body", zap.Error(err)) + } + }() + body, err = ioutil.ReadAll(req.Body) + if err != nil { + panic(err) + } + resetBody(req, body) + } + + for { + msg, result := p.doOnce(req) + + // Fast track common case. + if protocol.IsACK(result) { + return msg, NewRetriesResult(result, retry, then, results) + } + + // Try again? + // + // Make sure the error was something we should retry. + + { + var uErr *url.Error + if errors.As(result, &uErr) { + goto DoBackoff + } + } + + { + var httpResult *Result + if errors.As(result, &httpResult) { + sc := httpResult.StatusCode + if p.isRetriableFunc(sc) { + // retry! + goto DoBackoff + } else { + // Permanent error + cecontext.LoggerFrom(ctx).Debugw("status code not retryable, will not try again", + zap.Error(httpResult), + zap.Int("statusCode", sc)) + return msg, NewRetriesResult(result, retry, then, results) + } + } + } + + DoBackoff: + resetBody(req, body) + + // Wait for the correct amount of backoff time. + + // total tries = retry + 1 + if err := params.Backoff(ctx, retry+1); err != nil { + // do not try again. + cecontext.LoggerFrom(ctx).Debugw("backoff error, will not try again", zap.Error(err)) + return msg, NewRetriesResult(result, retry, then, results) + } + + retry++ + results = append(results, result) + } +} + +// reset body to allow it to be read multiple times, e.g. when retrying http +// requests +func resetBody(req *http.Request, body []byte) { + if req == nil || req.Body == nil { + return + } + + req.Body = ioutil.NopCloser(bytes.NewReader(body)) + + // do not modify existing GetBody function + if req.GetBody == nil { + req.GetBody = func() (io.ReadCloser, error) { + return ioutil.NopCloser(bytes.NewReader(body)), nil + } + } +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/result.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/result.go new file mode 100644 index 000000000..7a0b2626c --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/result.go @@ -0,0 +1,60 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "errors" + "fmt" + + "github.com/cloudevents/sdk-go/v2/protocol" +) + +// NewResult returns a fully populated http Result that should be used as +// a transport.Result. +func NewResult(statusCode int, messageFmt string, args ...interface{}) protocol.Result { + return &Result{ + StatusCode: statusCode, + Format: messageFmt, + Args: args, + } +} + +// Result wraps the fields required to make adjustments for http Responses. +type Result struct { + StatusCode int + Format string + Args []interface{} +} + +// make sure Result implements error. +var _ error = (*Result)(nil) + +// Is returns if the target error is a Result type checking target. +func (e *Result) Is(target error) bool { + if o, ok := target.(*Result); ok { + return e.StatusCode == o.StatusCode + } + + // Special case for nil == ACK + if o, ok := target.(*protocol.Receipt); ok { + if e == nil && o.ACK { + return true + } + } + + // Allow for wrapped errors. + if e != nil { + err := fmt.Errorf(e.Format, e.Args...) + return errors.Is(err, target) + } + return false +} + +// Error returns the string that is formed by using the format string with the +// provided args. +func (e *Result) Error() string { + return fmt.Sprintf("%d: %v", e.StatusCode, fmt.Errorf(e.Format, e.Args...)) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/retries_result.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/retries_result.go new file mode 100644 index 000000000..f4046d522 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/retries_result.go @@ -0,0 +1,59 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "fmt" + "time" + + "github.com/cloudevents/sdk-go/v2/protocol" +) + +// NewRetriesResult returns a http RetriesResult that should be used as +// a transport.Result without retries +func NewRetriesResult(result protocol.Result, retries int, startTime time.Time, attempts []protocol.Result) protocol.Result { + rr := &RetriesResult{ + Result: result, + Retries: retries, + Duration: time.Since(startTime), + } + if len(attempts) > 0 { + rr.Attempts = attempts + } + return rr +} + +// RetriesResult wraps the fields required to make adjustments for http Responses. +type RetriesResult struct { + // The last result + protocol.Result + + // Retries is the number of times the request was tried + Retries int + + // Duration records the time spent retrying. Exclude the successful request (if any) + Duration time.Duration + + // Attempts of all failed requests. Exclude last result. + Attempts []protocol.Result +} + +// make sure RetriesResult implements error. +var _ error = (*RetriesResult)(nil) + +// Is returns if the target error is a RetriesResult type checking target. +func (e *RetriesResult) Is(target error) bool { + return protocol.ResultIs(e.Result, target) +} + +// Error returns the string that is formed by using the format string with the +// provided args. +func (e *RetriesResult) Error() string { + if e.Retries == 0 { + return e.Result.Error() + } + return fmt.Sprintf("%s (%dx)", e.Result.Error(), e.Retries) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/utility.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/utility.go new file mode 100644 index 000000000..350fc1cf6 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/utility.go @@ -0,0 +1,89 @@ +/* + Copyright 2022 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "bytes" + "context" + "encoding/json" + nethttp "net/http" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/event" +) + +// NewEventFromHTTPRequest returns an Event. +func NewEventFromHTTPRequest(req *nethttp.Request) (*event.Event, error) { + msg := NewMessageFromHttpRequest(req) + return binding.ToEvent(context.Background(), msg) +} + +// NewEventFromHTTPResponse returns an Event. +func NewEventFromHTTPResponse(resp *nethttp.Response) (*event.Event, error) { + msg := NewMessageFromHttpResponse(resp) + return binding.ToEvent(context.Background(), msg) +} + +// NewEventsFromHTTPRequest returns a batched set of Events from a HTTP Request +func NewEventsFromHTTPRequest(req *nethttp.Request) ([]event.Event, error) { + msg := NewMessageFromHttpRequest(req) + return binding.ToEvents(context.Background(), msg, msg.BodyReader) +} + +// NewEventsFromHTTPResponse returns a batched set of Events from a HTTP Response +func NewEventsFromHTTPResponse(resp *nethttp.Response) ([]event.Event, error) { + msg := NewMessageFromHttpResponse(resp) + return binding.ToEvents(context.Background(), msg, msg.BodyReader) +} + +// NewHTTPRequestFromEvent creates a http.Request object that can be used with any http.Client for a singular event. +// This is an HTTP POST action to the provided url. +func NewHTTPRequestFromEvent(ctx context.Context, url string, event event.Event) (*nethttp.Request, error) { + if err := event.Validate(); err != nil { + return nil, err + } + + req, err := nethttp.NewRequestWithContext(ctx, nethttp.MethodPost, url, nil) + if err != nil { + return nil, err + } + if err := WriteRequest(ctx, (*binding.EventMessage)(&event), req); err != nil { + return nil, err + } + + return req, nil +} + +// NewHTTPRequestFromEvents creates a http.Request object that can be used with any http.Client for sending +// a batched set of events. This is an HTTP POST action to the provided url. +func NewHTTPRequestFromEvents(ctx context.Context, url string, events []event.Event) (*nethttp.Request, error) { + // Sending batch events is quite straightforward, as there is only JSON format, so a simple implementation. + for _, e := range events { + if err := e.Validate(); err != nil { + return nil, err + } + } + var buffer bytes.Buffer + err := json.NewEncoder(&buffer).Encode(events) + if err != nil { + return nil, err + } + + request, err := nethttp.NewRequestWithContext(ctx, nethttp.MethodPost, url, &buffer) + if err != nil { + return nil, err + } + + request.Header.Set(ContentType, event.ApplicationCloudEventsBatchJSON) + + return request, nil +} + +// IsHTTPBatch returns if the current http.Request or http.Response is a batch event operation, by checking the +// header `Content-Type` value. +func IsHTTPBatch(header nethttp.Header) bool { + return header.Get(ContentType) == event.ApplicationCloudEventsBatchJSON +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_request.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_request.go new file mode 100644 index 000000000..43ad36180 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_request.go @@ -0,0 +1,141 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "bytes" + "context" + "io" + "io/ioutil" + "net/http" + "strings" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/binding/format" + "github.com/cloudevents/sdk-go/v2/binding/spec" + "github.com/cloudevents/sdk-go/v2/types" +) + +// WriteRequest fills the provided httpRequest with the message m. +// Using context you can tweak the encoding processing (more details on binding.Write documentation). +func WriteRequest(ctx context.Context, m binding.Message, httpRequest *http.Request, transformers ...binding.Transformer) error { + structuredWriter := (*httpRequestWriter)(httpRequest) + binaryWriter := (*httpRequestWriter)(httpRequest) + + _, err := binding.Write( + ctx, + m, + structuredWriter, + binaryWriter, + transformers..., + ) + return err +} + +type httpRequestWriter http.Request + +func (b *httpRequestWriter) SetStructuredEvent(ctx context.Context, format format.Format, event io.Reader) error { + b.Header.Set(ContentType, format.MediaType()) + return b.setBody(event) +} + +func (b *httpRequestWriter) Start(ctx context.Context) error { + return nil +} + +func (b *httpRequestWriter) End(ctx context.Context) error { + return nil +} + +func (b *httpRequestWriter) SetData(data io.Reader) error { + return b.setBody(data) +} + +// setBody is a cherry-pick of the implementation in http.NewRequestWithContext +func (b *httpRequestWriter) setBody(body io.Reader) error { + rc, ok := body.(io.ReadCloser) + if !ok && body != nil { + rc = ioutil.NopCloser(body) + } + b.Body = rc + if body != nil { + switch v := body.(type) { + case *bytes.Buffer: + b.ContentLength = int64(v.Len()) + buf := v.Bytes() + b.GetBody = func() (io.ReadCloser, error) { + r := bytes.NewReader(buf) + return ioutil.NopCloser(r), nil + } + case *bytes.Reader: + b.ContentLength = int64(v.Len()) + snapshot := *v + b.GetBody = func() (io.ReadCloser, error) { + r := snapshot + return ioutil.NopCloser(&r), nil + } + case *strings.Reader: + b.ContentLength = int64(v.Len()) + snapshot := *v + b.GetBody = func() (io.ReadCloser, error) { + r := snapshot + return ioutil.NopCloser(&r), nil + } + default: + // This is where we'd set it to -1 (at least + // if body != NoBody) to mean unknown, but + // that broke people during the Go 1.8 testing + // period. People depend on it being 0 I + // guess. Maybe retry later. See Issue 18117. + } + // For client requests, Request.ContentLength of 0 + // means either actually 0, or unknown. The only way + // to explicitly say that the ContentLength is zero is + // to set the Body to nil. But turns out too much code + // depends on NewRequest returning a non-nil Body, + // so we use a well-known ReadCloser variable instead + // and have the http package also treat that sentinel + // variable to mean explicitly zero. + if b.GetBody != nil && b.ContentLength == 0 { + b.Body = http.NoBody + b.GetBody = func() (io.ReadCloser, error) { return http.NoBody, nil } + } + } + return nil +} + +func (b *httpRequestWriter) SetAttribute(attribute spec.Attribute, value interface{}) error { + mapping := attributeHeadersMapping[attribute.Name()] + if value == nil { + delete(b.Header, mapping) + return nil + } + + // Http headers, everything is a string! + s, err := types.Format(value) + if err != nil { + return err + } + b.Header[mapping] = append(b.Header[mapping], s) + return nil +} + +func (b *httpRequestWriter) SetExtension(name string, value interface{}) error { + if value == nil { + delete(b.Header, extNameToHeaderName(name)) + return nil + } + // Http headers, everything is a string! + s, err := types.Format(value) + if err != nil { + return err + } + b.Header[extNameToHeaderName(name)] = []string{s} + return nil +} + +var _ binding.StructuredWriter = (*httpRequestWriter)(nil) // Test it conforms to the interface +var _ binding.BinaryWriter = (*httpRequestWriter)(nil) // Test it conforms to the interface diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_responsewriter.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_responsewriter.go new file mode 100644 index 000000000..41385dab1 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_responsewriter.go @@ -0,0 +1,126 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "bytes" + "context" + "io" + "net/http" + "strconv" + "strings" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/binding/format" + "github.com/cloudevents/sdk-go/v2/binding/spec" + "github.com/cloudevents/sdk-go/v2/types" +) + +// WriteResponseWriter writes out to the the provided httpResponseWriter with the message m. +// Using context you can tweak the encoding processing (more details on binding.Write documentation). +func WriteResponseWriter(ctx context.Context, m binding.Message, status int, rw http.ResponseWriter, transformers ...binding.Transformer) error { + if status < 200 || status >= 600 { + status = http.StatusOK + } + writer := &httpResponseWriter{rw: rw, status: status} + + _, err := binding.Write( + ctx, + m, + writer, + writer, + transformers..., + ) + return err +} + +type httpResponseWriter struct { + rw http.ResponseWriter + status int + body io.Reader +} + +func (b *httpResponseWriter) SetStructuredEvent(ctx context.Context, format format.Format, event io.Reader) error { + b.rw.Header().Set(ContentType, format.MediaType()) + b.body = event + return b.finalizeWriter() +} + +func (b *httpResponseWriter) Start(ctx context.Context) error { + return nil +} + +func (b *httpResponseWriter) SetAttribute(attribute spec.Attribute, value interface{}) error { + mapping := attributeHeadersMapping[attribute.Name()] + if value == nil { + delete(b.rw.Header(), mapping) + } + + // Http headers, everything is a string! + s, err := types.Format(value) + if err != nil { + return err + } + b.rw.Header()[mapping] = append(b.rw.Header()[mapping], s) + return nil +} + +func (b *httpResponseWriter) SetExtension(name string, value interface{}) error { + if value == nil { + delete(b.rw.Header(), extNameToHeaderName(name)) + } + // Http headers, everything is a string! + s, err := types.Format(value) + if err != nil { + return err + } + b.rw.Header()[extNameToHeaderName(name)] = []string{s} + return nil +} + +func (b *httpResponseWriter) SetData(reader io.Reader) error { + b.body = reader + return nil +} + +func (b *httpResponseWriter) finalizeWriter() error { + if b.body != nil { + // Try to figure it out if we have a content-length + contentLength := -1 + switch v := b.body.(type) { + case *bytes.Buffer: + contentLength = v.Len() + case *bytes.Reader: + contentLength = v.Len() + case *strings.Reader: + contentLength = v.Len() + } + + if contentLength != -1 { + b.rw.Header().Add("Content-length", strconv.Itoa(contentLength)) + } + + // Finalize the headers. + b.rw.WriteHeader(b.status) + + // Write body. + _, err := io.Copy(b.rw, b.body) + if err != nil { + return err + } + } else { + // Finalize the headers. + b.rw.WriteHeader(b.status) + } + return nil +} + +func (b *httpResponseWriter) End(ctx context.Context) error { + return b.finalizeWriter() +} + +var _ binding.StructuredWriter = (*httpResponseWriter)(nil) // Test it conforms to the interface +var _ binding.BinaryWriter = (*httpResponseWriter)(nil) // Test it conforms to the interface diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/inbound.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/inbound.go new file mode 100644 index 000000000..e7a74294d --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/inbound.go @@ -0,0 +1,54 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package protocol + +import ( + "context" + + "github.com/cloudevents/sdk-go/v2/binding" +) + +// Receiver receives messages. +type Receiver interface { + // Receive blocks till a message is received or ctx expires. + // Receive can be invoked safely from different goroutines. + // + // A non-nil error means the receiver is closed. + // io.EOF means it closed cleanly, any other value indicates an error. + // The caller is responsible for `Finish()` the returned message + Receive(ctx context.Context) (binding.Message, error) +} + +// ReceiveCloser is a Receiver that can be closed. +type ReceiveCloser interface { + Receiver + Closer +} + +// ResponseFn is the function callback provided from Responder.Respond to allow +// for a receiver to "reply" to a message it receives. +// transformers are applied when the message is written on the wire. +type ResponseFn func(ctx context.Context, m binding.Message, r Result, transformers ...binding.Transformer) error + +// Responder receives messages and is given a callback to respond. +type Responder interface { + // Respond blocks till a message is received or ctx expires. + // Respond can be invoked safely from different goroutines. + // + // A non-nil error means the receiver is closed. + // io.EOF means it closed cleanly, any other value indicates an error. + // The caller is responsible for `Finish()` the returned message, + // while the protocol implementation is responsible for `Finish()` the response message. + // The caller MUST invoke ResponseFn, in order to avoid leaks. + // The correct flow for the caller is to finish the received message and then invoke the ResponseFn + Respond(ctx context.Context) (binding.Message, ResponseFn, error) +} + +// ResponderCloser is a Responder that can be closed. +type ResponderCloser interface { + Responder + Closer +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/lifecycle.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/lifecycle.go new file mode 100644 index 000000000..4a058c962 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/lifecycle.go @@ -0,0 +1,23 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package protocol + +import ( + "context" +) + +// Opener is the common interface for things that need to be opened. +type Opener interface { + // OpenInbound is a blocking call and ctx is used to stop the Inbound message Receiver/Responder. + // Closing the context won't close the Receiver/Responder, aka it won't invoke Close(ctx). + OpenInbound(ctx context.Context) error +} + +// Closer is the common interface for things that can be closed. +// After invoking Close(ctx), you cannot reuse the object you closed. +type Closer interface { + Close(ctx context.Context) error +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/outbound.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/outbound.go new file mode 100644 index 000000000..e44fa432a --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/outbound.go @@ -0,0 +1,49 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package protocol + +import ( + "context" + + "github.com/cloudevents/sdk-go/v2/binding" +) + +// Sender sends messages. +type Sender interface { + // Send a message. + // + // Send returns when the "outbound" message has been sent. The Sender may + // still be expecting acknowledgment or holding other state for the message. + // + // m.Finish() is called when sending is finished (both succeeded or failed): + // expected acknowledgments (or errors) have been received, the Sender is + // no longer holding any state for the message. + // m.Finish() may be called during or after Send(). + // + // transformers are applied when the message is written on the wire. + Send(ctx context.Context, m binding.Message, transformers ...binding.Transformer) error +} + +// SendCloser is a Sender that can be closed. +type SendCloser interface { + Sender + Closer +} + +// Requester sends a message and receives a response +// +// Optional interface that may be implemented by protocols that support +// request/response correlation. +type Requester interface { + // Request sends m like Sender.Send() but also arranges to receive a response. + Request(ctx context.Context, m binding.Message, transformers ...binding.Transformer) (binding.Message, error) +} + +// RequesterCloser is a Requester that can be closed. +type RequesterCloser interface { + Requester + Closer +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/result.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/result.go new file mode 100644 index 000000000..eae64e018 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/result.go @@ -0,0 +1,127 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package protocol + +import ( + "errors" + "fmt" +) + +// Result leverages go's error wrapping. +type Result error + +// ResultIs reports whether any error in err's chain matches target. +// +// The chain consists of err itself followed by the sequence of errors obtained by +// repeatedly calling Unwrap. +// +// An error is considered to match a target if it is equal to that target or if +// it implements a method Is(error) bool such that Is(target) returns true. +// (text from errors/wrap.go) +var ResultIs = errors.Is + +// ResultAs finds the first error in err's chain that matches target, and if so, sets +// target to that error value and returns true. +// +// The chain consists of err itself followed by the sequence of errors obtained by +// repeatedly calling Unwrap. +// +// An error matches target if the error's concrete value is assignable to the value +// pointed to by target, or if the error has a method As(interface{}) bool such that +// As(target) returns true. In the latter case, the As method is responsible for +// setting target. +// +// As will panic if target is not a non-nil pointer to either a type that implements +// error, or to any interface type. As returns false if err is nil. +// (text from errors/wrap.go) +var ResultAs = errors.As + +func NewResult(messageFmt string, args ...interface{}) Result { + return fmt.Errorf(messageFmt, args...) +} + +// IsACK true means the recipient acknowledged the event. +func IsACK(target Result) bool { + // special case, nil target also means ACK. + if target == nil { + return true + } + + return ResultIs(target, ResultACK) +} + +// IsNACK true means the recipient did not acknowledge the event. +func IsNACK(target Result) bool { + return ResultIs(target, ResultNACK) +} + +// IsUndelivered true means the target result is not an ACK/NACK, but some other +// error unrelated to delivery not from the intended recipient. Likely target +// is an error that represents some part of the protocol is misconfigured or +// the event that was attempting to be sent was invalid. +func IsUndelivered(target Result) bool { + if target == nil { + // Short-circuit nil result is ACK. + return false + } + return !ResultIs(target, ResultACK) && !ResultIs(target, ResultNACK) +} + +var ( + ResultACK = NewReceipt(true, "") + ResultNACK = NewReceipt(false, "") +) + +// NewReceipt returns a fully populated protocol Receipt that should be used as +// a transport.Result. This type holds the base ACK/NACK results. +func NewReceipt(ack bool, messageFmt string, args ...interface{}) Result { + return &Receipt{ + Err: fmt.Errorf(messageFmt, args...), + ACK: ack, + } +} + +// Receipt wraps the fields required to understand if a protocol event is acknowledged. +type Receipt struct { + Err error + ACK bool +} + +// make sure Result implements error. +var _ error = (*Receipt)(nil) + +// Is returns if the target error is a Result type checking target. +func (e *Receipt) Is(target error) bool { + if o, ok := target.(*Receipt); ok { + if e == nil { + // Special case nil e as ACK. + return o.ACK + } + return e.ACK == o.ACK + } + // Allow for wrapped errors. + if e != nil { + return errors.Is(e.Err, target) + } + return false +} + +// Error returns the string that is formed by using the format string with the +// provided args. +func (e *Receipt) Error() string { + if e != nil { + return e.Err.Error() + } + return "" +} + +// Unwrap returns the wrapped error if exist or nil +func (e *Receipt) Unwrap() error { + if e != nil { + return errors.Unwrap(e.Err) + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/staticcheck.conf b/vendor/github.com/cloudevents/sdk-go/v2/staticcheck.conf new file mode 100644 index 000000000..d6f269556 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/staticcheck.conf @@ -0,0 +1,3 @@ +checks = [ + "all", "-ST1003", +] \ No newline at end of file diff --git a/vendor/github.com/cloudevents/sdk-go/v2/types/allocate.go b/vendor/github.com/cloudevents/sdk-go/v2/types/allocate.go new file mode 100644 index 000000000..814626874 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/types/allocate.go @@ -0,0 +1,41 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package types + +import "reflect" + +// Allocate allocates a new instance of type t and returns: +// asPtr is of type t if t is a pointer type and of type &t otherwise +// asValue is a Value of type t pointing to the same data as asPtr +func Allocate(obj interface{}) (asPtr interface{}, asValue reflect.Value) { + if obj == nil { + return nil, reflect.Value{} + } + + switch t := reflect.TypeOf(obj); t.Kind() { + case reflect.Ptr: + reflectPtr := reflect.New(t.Elem()) + asPtr = reflectPtr.Interface() + asValue = reflectPtr + case reflect.Map: + reflectPtr := reflect.MakeMap(t) + asPtr = reflectPtr.Interface() + asValue = reflectPtr + case reflect.String: + reflectPtr := reflect.New(t) + asPtr = "" + asValue = reflectPtr.Elem() + case reflect.Slice: + reflectPtr := reflect.MakeSlice(t, 0, 0) + asPtr = reflectPtr.Interface() + asValue = reflectPtr + default: + reflectPtr := reflect.New(t) + asPtr = reflectPtr.Interface() + asValue = reflectPtr.Elem() + } + return +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/types/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/types/doc.go new file mode 100644 index 000000000..cf7a94f35 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/types/doc.go @@ -0,0 +1,46 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package types implements the CloudEvents type system. + +CloudEvents defines a set of abstract types for event context attributes. Each +type has a corresponding native Go type and a canonical string encoding. The +native Go types used to represent the CloudEvents types are: +bool, int32, string, []byte, *url.URL, time.Time + + +----------------+----------------+-----------------------------------+ + |CloudEvents Type|Native Type |Convertible From | + +================+================+===================================+ + |Bool |bool |bool | + +----------------+----------------+-----------------------------------+ + |Integer |int32 |Any numeric type with value in | + | | |range of int32 | + +----------------+----------------+-----------------------------------+ + |String |string |string | + +----------------+----------------+-----------------------------------+ + |Binary |[]byte |[]byte | + +----------------+----------------+-----------------------------------+ + |URI-Reference |*url.URL |url.URL, types.URIRef, types.URI | + +----------------+----------------+-----------------------------------+ + |URI |*url.URL |url.URL, types.URIRef, types.URI | + | | |Must be an absolute URI. | + +----------------+----------------+-----------------------------------+ + |Timestamp |time.Time |time.Time, types.Timestamp | + +----------------+----------------+-----------------------------------+ + +Extension attributes may be stored as a native type or a canonical string. The +To functions will convert to the desired from any convertible type +or from the canonical string form. + +The Parse and Format functions convert native types to/from +canonical strings. + +Note are no Parse or Format functions for URL or string. For URL use the +standard url.Parse() and url.URL.String(). The canonical string format of a +string is the string itself. + +*/ +package types diff --git a/vendor/github.com/cloudevents/sdk-go/v2/types/timestamp.go b/vendor/github.com/cloudevents/sdk-go/v2/types/timestamp.go new file mode 100644 index 000000000..ff049727d --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/types/timestamp.go @@ -0,0 +1,75 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package types + +import ( + "encoding/json" + "encoding/xml" + "fmt" + "time" +) + +// Timestamp wraps time.Time to normalize the time layout to RFC3339. It is +// intended to enforce compliance with the CloudEvents spec for their +// definition of Timestamp. Custom marshal methods are implemented to ensure +// the outbound Timestamp is a string in the RFC3339 layout. +type Timestamp struct { + time.Time +} + +// ParseTimestamp attempts to parse the given time assuming RFC3339 layout +func ParseTimestamp(s string) (*Timestamp, error) { + if s == "" { + return nil, nil + } + tt, err := ParseTime(s) + return &Timestamp{Time: tt}, err +} + +// MarshalJSON implements a custom json marshal method used when this type is +// marshaled using json.Marshal. +func (t *Timestamp) MarshalJSON() ([]byte, error) { + if t == nil || t.IsZero() { + return []byte(`""`), nil + } + return []byte(fmt.Sprintf("%q", t)), nil +} + +// UnmarshalJSON implements the json unmarshal method used when this type is +// unmarshaled using json.Unmarshal. +func (t *Timestamp) UnmarshalJSON(b []byte) error { + var timestamp string + if err := json.Unmarshal(b, ×tamp); err != nil { + return err + } + var err error + t.Time, err = ParseTime(timestamp) + return err +} + +// MarshalXML implements a custom xml marshal method used when this type is +// marshaled using xml.Marshal. +func (t *Timestamp) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if t == nil || t.IsZero() { + return e.EncodeElement(nil, start) + } + return e.EncodeElement(t.String(), start) +} + +// UnmarshalXML implements the xml unmarshal method used when this type is +// unmarshaled using xml.Unmarshal. +func (t *Timestamp) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + var timestamp string + if err := d.DecodeElement(×tamp, &start); err != nil { + return err + } + var err error + t.Time, err = ParseTime(timestamp) + return err +} + +// String outputs the time using RFC3339 format. +func (t Timestamp) String() string { return FormatTime(t.Time) } diff --git a/vendor/github.com/cloudevents/sdk-go/v2/types/uri.go b/vendor/github.com/cloudevents/sdk-go/v2/types/uri.go new file mode 100644 index 000000000..bed608094 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/types/uri.go @@ -0,0 +1,86 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package types + +import ( + "encoding/json" + "encoding/xml" + "fmt" + "net/url" +) + +// URI is a wrapper to url.URL. It is intended to enforce compliance with +// the CloudEvents spec for their definition of URI. Custom +// marshal methods are implemented to ensure the outbound URI object +// is a flat string. +type URI struct { + url.URL +} + +// ParseURI attempts to parse the given string as a URI. +func ParseURI(u string) *URI { + if u == "" { + return nil + } + pu, err := url.Parse(u) + if err != nil { + return nil + } + return &URI{URL: *pu} +} + +// MarshalJSON implements a custom json marshal method used when this type is +// marshaled using json.Marshal. +func (u URI) MarshalJSON() ([]byte, error) { + b := fmt.Sprintf("%q", u.String()) + return []byte(b), nil +} + +// UnmarshalJSON implements the json unmarshal method used when this type is +// unmarshaled using json.Unmarshal. +func (u *URI) UnmarshalJSON(b []byte) error { + var ref string + if err := json.Unmarshal(b, &ref); err != nil { + return err + } + r := ParseURI(ref) + if r != nil { + *u = *r + } + return nil +} + +// MarshalXML implements a custom xml marshal method used when this type is +// marshaled using xml.Marshal. +func (u URI) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + return e.EncodeElement(u.String(), start) +} + +// UnmarshalXML implements the xml unmarshal method used when this type is +// unmarshaled using xml.Unmarshal. +func (u *URI) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + var ref string + if err := d.DecodeElement(&ref, &start); err != nil { + return err + } + r := ParseURI(ref) + if r != nil { + *u = *r + } + return nil +} + +func (u URI) Validate() bool { + return u.IsAbs() +} + +// String returns the full string representation of the URI-Reference. +func (u *URI) String() string { + if u == nil { + return "" + } + return u.URL.String() +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/types/uriref.go b/vendor/github.com/cloudevents/sdk-go/v2/types/uriref.go new file mode 100644 index 000000000..22fa12314 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/types/uriref.go @@ -0,0 +1,82 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package types + +import ( + "encoding/json" + "encoding/xml" + "fmt" + "net/url" +) + +// URIRef is a wrapper to url.URL. It is intended to enforce compliance with +// the CloudEvents spec for their definition of URI-Reference. Custom +// marshal methods are implemented to ensure the outbound URIRef object is +// is a flat string. +type URIRef struct { + url.URL +} + +// ParseURIRef attempts to parse the given string as a URI-Reference. +func ParseURIRef(u string) *URIRef { + if u == "" { + return nil + } + pu, err := url.Parse(u) + if err != nil { + return nil + } + return &URIRef{URL: *pu} +} + +// MarshalJSON implements a custom json marshal method used when this type is +// marshaled using json.Marshal. +func (u URIRef) MarshalJSON() ([]byte, error) { + b := fmt.Sprintf("%q", u.String()) + return []byte(b), nil +} + +// UnmarshalJSON implements the json unmarshal method used when this type is +// unmarshaled using json.Unmarshal. +func (u *URIRef) UnmarshalJSON(b []byte) error { + var ref string + if err := json.Unmarshal(b, &ref); err != nil { + return err + } + r := ParseURIRef(ref) + if r != nil { + *u = *r + } + return nil +} + +// MarshalXML implements a custom xml marshal method used when this type is +// marshaled using xml.Marshal. +func (u URIRef) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + return e.EncodeElement(u.String(), start) +} + +// UnmarshalXML implements the xml unmarshal method used when this type is +// unmarshaled using xml.Unmarshal. +func (u *URIRef) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + var ref string + if err := d.DecodeElement(&ref, &start); err != nil { + return err + } + r := ParseURIRef(ref) + if r != nil { + *u = *r + } + return nil +} + +// String returns the full string representation of the URI-Reference. +func (u *URIRef) String() string { + if u == nil { + return "" + } + return u.URL.String() +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/types/value.go b/vendor/github.com/cloudevents/sdk-go/v2/types/value.go new file mode 100644 index 000000000..f643d0aa5 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/types/value.go @@ -0,0 +1,335 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package types + +import ( + "encoding/base64" + "fmt" + "math" + "net/url" + "reflect" + "strconv" + "time" +) + +// FormatBool returns canonical string format: "true" or "false" +func FormatBool(v bool) string { return strconv.FormatBool(v) } + +// FormatInteger returns canonical string format: decimal notation. +func FormatInteger(v int32) string { return strconv.Itoa(int(v)) } + +// FormatBinary returns canonical string format: standard base64 encoding +func FormatBinary(v []byte) string { return base64.StdEncoding.EncodeToString(v) } + +// FormatTime returns canonical string format: RFC3339 with nanoseconds +func FormatTime(v time.Time) string { return v.UTC().Format(time.RFC3339Nano) } + +// ParseBool parse canonical string format: "true" or "false" +func ParseBool(v string) (bool, error) { return strconv.ParseBool(v) } + +// ParseInteger parse canonical string format: decimal notation. +func ParseInteger(v string) (int32, error) { + // Accept floating-point but truncate to int32 as per CE spec. + f, err := strconv.ParseFloat(v, 64) + if err != nil { + return 0, err + } + if f > math.MaxInt32 || f < math.MinInt32 { + return 0, rangeErr(v) + } + return int32(f), nil +} + +// ParseBinary parse canonical string format: standard base64 encoding +func ParseBinary(v string) ([]byte, error) { return base64.StdEncoding.DecodeString(v) } + +// ParseTime parse canonical string format: RFC3339 with nanoseconds +func ParseTime(v string) (time.Time, error) { + t, err := time.Parse(time.RFC3339Nano, v) + if err != nil { + err := convertErr(time.Time{}, v) + err.extra = ": not in RFC3339 format" + return time.Time{}, err + } + return t, nil +} + +// Format returns the canonical string format of v, where v can be +// any type that is convertible to a CloudEvents type. +func Format(v interface{}) (string, error) { + v, err := Validate(v) + if err != nil { + return "", err + } + switch v := v.(type) { + case bool: + return FormatBool(v), nil + case int32: + return FormatInteger(v), nil + case string: + return v, nil + case []byte: + return FormatBinary(v), nil + case URI: + return v.String(), nil + case URIRef: + // url.URL is often passed by pointer so allow both + return v.String(), nil + case Timestamp: + return FormatTime(v.Time), nil + default: + return "", fmt.Errorf("%T is not a CloudEvents type", v) + } +} + +// Validate v is a valid CloudEvents attribute value, convert it to one of: +// bool, int32, string, []byte, types.URI, types.URIRef, types.Timestamp +func Validate(v interface{}) (interface{}, error) { + switch v := v.(type) { + case bool, int32, string, []byte: + return v, nil // Already a CloudEvents type, no validation needed. + + case uint, uintptr, uint8, uint16, uint32, uint64: + u := reflect.ValueOf(v).Uint() + if u > math.MaxInt32 { + return nil, rangeErr(v) + } + return int32(u), nil + case int, int8, int16, int64: + i := reflect.ValueOf(v).Int() + if i > math.MaxInt32 || i < math.MinInt32 { + return nil, rangeErr(v) + } + return int32(i), nil + case float32, float64: + f := reflect.ValueOf(v).Float() + if f > math.MaxInt32 || f < math.MinInt32 { + return nil, rangeErr(v) + } + return int32(f), nil + + case *url.URL: + if v == nil { + break + } + return URI{URL: *v}, nil + case url.URL: + return URI{URL: v}, nil + case *URIRef: + if v != nil { + return *v, nil + } + return nil, nil + case URIRef: + return v, nil + case *URI: + if v != nil { + return *v, nil + } + return nil, nil + case URI: + return v, nil + case time.Time: + return Timestamp{Time: v}, nil + case *time.Time: + if v == nil { + break + } + return Timestamp{Time: *v}, nil + case Timestamp: + return v, nil + } + rx := reflect.ValueOf(v) + if rx.Kind() == reflect.Ptr && !rx.IsNil() { + // Allow pointers-to convertible types + return Validate(rx.Elem().Interface()) + } + return nil, fmt.Errorf("invalid CloudEvents value: %#v", v) +} + +// Clone v clones a CloudEvents attribute value, which is one of the valid types: +// bool, int32, string, []byte, types.URI, types.URIRef, types.Timestamp +// Returns the same type +// Panics if the type is not valid +func Clone(v interface{}) interface{} { + if v == nil { + return nil + } + switch v := v.(type) { + case bool, int32, string, nil: + return v // Already a CloudEvents type, no validation needed. + case []byte: + clone := make([]byte, len(v)) + copy(clone, v) + return v + case url.URL: + return URI{v} + case *url.URL: + return &URI{*v} + case URIRef: + return v + case *URIRef: + return &URIRef{v.URL} + case URI: + return v + case *URI: + return &URI{v.URL} + case time.Time: + return Timestamp{v} + case *time.Time: + return &Timestamp{*v} + case Timestamp: + return v + case *Timestamp: + return &Timestamp{v.Time} + } + panic(fmt.Errorf("invalid CloudEvents value: %#v", v)) +} + +// ToBool accepts a bool value or canonical "true"/"false" string. +func ToBool(v interface{}) (bool, error) { + v, err := Validate(v) + if err != nil { + return false, err + } + switch v := v.(type) { + case bool: + return v, nil + case string: + return ParseBool(v) + default: + return false, convertErr(true, v) + } +} + +// ToInteger accepts any numeric value in int32 range, or canonical string. +func ToInteger(v interface{}) (int32, error) { + v, err := Validate(v) + if err != nil { + return 0, err + } + switch v := v.(type) { + case int32: + return v, nil + case string: + return ParseInteger(v) + default: + return 0, convertErr(int32(0), v) + } +} + +// ToString returns a string value unaltered. +// +// This function does not perform canonical string encoding, use one of the +// Format functions for that. +func ToString(v interface{}) (string, error) { + v, err := Validate(v) + if err != nil { + return "", err + } + switch v := v.(type) { + case string: + return v, nil + default: + return "", convertErr("", v) + } +} + +// ToBinary returns a []byte value, decoding from base64 string if necessary. +func ToBinary(v interface{}) ([]byte, error) { + v, err := Validate(v) + if err != nil { + return nil, err + } + switch v := v.(type) { + case []byte: + return v, nil + case string: + return base64.StdEncoding.DecodeString(v) + default: + return nil, convertErr([]byte(nil), v) + } +} + +// ToURL returns a *url.URL value, parsing from string if necessary. +func ToURL(v interface{}) (*url.URL, error) { + v, err := Validate(v) + if err != nil { + return nil, err + } + switch v := v.(type) { + case *URI: + return &v.URL, nil + case URI: + return &v.URL, nil + case *URIRef: + return &v.URL, nil + case URIRef: + return &v.URL, nil + case string: + u, err := url.Parse(v) + if err != nil { + return nil, err + } + return u, nil + default: + return nil, convertErr((*url.URL)(nil), v) + } +} + +// ToTime returns a time.Time value, parsing from RFC3339 string if necessary. +func ToTime(v interface{}) (time.Time, error) { + v, err := Validate(v) + if err != nil { + return time.Time{}, err + } + switch v := v.(type) { + case Timestamp: + return v.Time, nil + case string: + ts, err := time.Parse(time.RFC3339Nano, v) + if err != nil { + return time.Time{}, err + } + return ts, nil + default: + return time.Time{}, convertErr(time.Time{}, v) + } +} + +func IsZero(v interface{}) bool { + // Fast path + if v == nil { + return true + } + if s, ok := v.(string); ok && s == "" { + return true + } + return reflect.ValueOf(v).IsZero() +} + +type ConvertErr struct { + // Value being converted + Value interface{} + // Type of attempted conversion + Type reflect.Type + + extra string +} + +func (e *ConvertErr) Error() string { + return fmt.Sprintf("cannot convert %#v to %s%s", e.Value, e.Type, e.extra) +} + +func convertErr(target, v interface{}) *ConvertErr { + return &ConvertErr{Value: v, Type: reflect.TypeOf(target)} +} + +func rangeErr(v interface{}) error { + e := convertErr(int32(0), v) + e.extra = ": out of range" + return e +} diff --git a/vendor/github.com/eclipse/paho.golang/LICENSE b/vendor/github.com/eclipse/paho.golang/LICENSE new file mode 100644 index 000000000..d3087e4c5 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/LICENSE @@ -0,0 +1,277 @@ +Eclipse Public License - v 2.0 + + THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE + PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION + OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. + +1. DEFINITIONS + +"Contribution" means: + + a) in the case of the initial Contributor, the initial content + Distributed under this Agreement, and + + b) in the case of each subsequent Contributor: + i) changes to the Program, and + ii) additions to the Program; + where such changes and/or additions to the Program originate from + and are Distributed by that particular Contributor. A Contribution + "originates" from a Contributor if it was added to the Program by + such Contributor itself or anyone acting on such Contributor's behalf. + Contributions do not include changes or additions to the Program that + are not Modified Works. + +"Contributor" means any person or entity that Distributes the Program. + +"Licensed Patents" mean patent claims licensable by a Contributor which +are necessarily infringed by the use or sale of its Contribution alone +or when combined with the Program. + +"Program" means the Contributions Distributed in accordance with this +Agreement. + +"Recipient" means anyone who receives the Program under this Agreement +or any Secondary License (as applicable), including Contributors. + +"Derivative Works" shall mean any work, whether in Source Code or other +form, that is based on (or derived from) the Program and for which the +editorial revisions, annotations, elaborations, or other modifications +represent, as a whole, an original work of authorship. + +"Modified Works" shall mean any work in Source Code or other form that +results from an addition to, deletion from, or modification of the +contents of the Program, including, for purposes of clarity any new file +in Source Code form that contains any contents of the Program. Modified +Works shall not include works that contain only declarations, +interfaces, types, classes, structures, or files of the Program solely +in each case in order to link to, bind by name, or subclass the Program +or Modified Works thereof. + +"Distribute" means the acts of a) distributing or b) making available +in any manner that enables the transfer of a copy. + +"Source Code" means the form of a Program preferred for making +modifications, including but not limited to software source code, +documentation source, and configuration files. + +"Secondary License" means either the GNU General Public License, +Version 2.0, or any later versions of that license, including any +exceptions or additional permissions as identified by the initial +Contributor. + +2. GRANT OF RIGHTS + + a) Subject to the terms of this Agreement, each Contributor hereby + grants Recipient a non-exclusive, worldwide, royalty-free copyright + license to reproduce, prepare Derivative Works of, publicly display, + publicly perform, Distribute and sublicense the Contribution of such + Contributor, if any, and such Derivative Works. + + b) Subject to the terms of this Agreement, each Contributor hereby + grants Recipient a non-exclusive, worldwide, royalty-free patent + license under Licensed Patents to make, use, sell, offer to sell, + import and otherwise transfer the Contribution of such Contributor, + if any, in Source Code or other form. This patent license shall + apply to the combination of the Contribution and the Program if, at + the time the Contribution is added by the Contributor, such addition + of the Contribution causes such combination to be covered by the + Licensed Patents. The patent license shall not apply to any other + combinations which include the Contribution. No hardware per se is + licensed hereunder. + + c) Recipient understands that although each Contributor grants the + licenses to its Contributions set forth herein, no assurances are + provided by any Contributor that the Program does not infringe the + patent or other intellectual property rights of any other entity. + Each Contributor disclaims any liability to Recipient for claims + brought by any other entity based on infringement of intellectual + property rights or otherwise. As a condition to exercising the + rights and licenses granted hereunder, each Recipient hereby + assumes sole responsibility to secure any other intellectual + property rights needed, if any. For example, if a third party + patent license is required to allow Recipient to Distribute the + Program, it is Recipient's responsibility to acquire that license + before distributing the Program. + + d) Each Contributor represents that to its knowledge it has + sufficient copyright rights in its Contribution, if any, to grant + the copyright license set forth in this Agreement. + + e) Notwithstanding the terms of any Secondary License, no + Contributor makes additional grants to any Recipient (other than + those set forth in this Agreement) as a result of such Recipient's + receipt of the Program under the terms of a Secondary License + (if permitted under the terms of Section 3). + +3. REQUIREMENTS + +3.1 If a Contributor Distributes the Program in any form, then: + + a) the Program must also be made available as Source Code, in + accordance with section 3.2, and the Contributor must accompany + the Program with a statement that the Source Code for the Program + is available under this Agreement, and informs Recipients how to + obtain it in a reasonable manner on or through a medium customarily + used for software exchange; and + + b) the Contributor may Distribute the Program under a license + different than this Agreement, provided that such license: + i) effectively disclaims on behalf of all other Contributors all + warranties and conditions, express and implied, including + warranties or conditions of title and non-infringement, and + implied warranties or conditions of merchantability and fitness + for a particular purpose; + + ii) effectively excludes on behalf of all other Contributors all + liability for damages, including direct, indirect, special, + incidental and consequential damages, such as lost profits; + + iii) does not attempt to limit or alter the recipients' rights + in the Source Code under section 3.2; and + + iv) requires any subsequent distribution of the Program by any + party to be under a license that satisfies the requirements + of this section 3. + +3.2 When the Program is Distributed as Source Code: + + a) it must be made available under this Agreement, or if the + Program (i) is combined with other material in a separate file or + files made available under a Secondary License, and (ii) the initial + Contributor attached to the Source Code the notice described in + Exhibit A of this Agreement, then the Program may be made available + under the terms of such Secondary Licenses, and + + b) a copy of this Agreement must be included with each copy of + the Program. + +3.3 Contributors may not remove or alter any copyright, patent, +trademark, attribution notices, disclaimers of warranty, or limitations +of liability ("notices") contained within the Program from any copy of +the Program which they Distribute, provided that Contributors may add +their own appropriate notices. + +4. COMMERCIAL DISTRIBUTION + +Commercial distributors of software may accept certain responsibilities +with respect to end users, business partners and the like. While this +license is intended to facilitate the commercial use of the Program, +the Contributor who includes the Program in a commercial product +offering should do so in a manner which does not create potential +liability for other Contributors. Therefore, if a Contributor includes +the Program in a commercial product offering, such Contributor +("Commercial Contributor") hereby agrees to defend and indemnify every +other Contributor ("Indemnified Contributor") against any losses, +damages and costs (collectively "Losses") arising from claims, lawsuits +and other legal actions brought by a third party against the Indemnified +Contributor to the extent caused by the acts or omissions of such +Commercial Contributor in connection with its distribution of the Program +in a commercial product offering. The obligations in this section do not +apply to any claims or Losses relating to any actual or alleged +intellectual property infringement. In order to qualify, an Indemnified +Contributor must: a) promptly notify the Commercial Contributor in +writing of such claim, and b) allow the Commercial Contributor to control, +and cooperate with the Commercial Contributor in, the defense and any +related settlement negotiations. The Indemnified Contributor may +participate in any such claim at its own expense. + +For example, a Contributor might include the Program in a commercial +product offering, Product X. That Contributor is then a Commercial +Contributor. If that Commercial Contributor then makes performance +claims, or offers warranties related to Product X, those performance +claims and warranties are such Commercial Contributor's responsibility +alone. Under this section, the Commercial Contributor would have to +defend claims against the other Contributors related to those performance +claims and warranties, and if a court requires any other Contributor to +pay any damages as a result, the Commercial Contributor must pay +those damages. + +5. NO WARRANTY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT +PERMITTED BY APPLICABLE LAW, THE PROGRAM IS PROVIDED ON AN "AS IS" +BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR +IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF +TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR +PURPOSE. Each Recipient is solely responsible for determining the +appropriateness of using and distributing the Program and assumes all +risks associated with its exercise of rights under this Agreement, +including but not limited to the risks and costs of program errors, +compliance with applicable laws, damage to or loss of data, programs +or equipment, and unavailability or interruption of operations. + +6. DISCLAIMER OF LIABILITY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT +PERMITTED BY APPLICABLE LAW, NEITHER RECIPIENT NOR ANY CONTRIBUTORS +SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST +PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE +EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + +7. GENERAL + +If any provision of this Agreement is invalid or unenforceable under +applicable law, it shall not affect the validity or enforceability of +the remainder of the terms of this Agreement, and without further +action by the parties hereto, such provision shall be reformed to the +minimum extent necessary to make such provision valid and enforceable. + +If Recipient institutes patent litigation against any entity +(including a cross-claim or counterclaim in a lawsuit) alleging that the +Program itself (excluding combinations of the Program with other software +or hardware) infringes such Recipient's patent(s), then such Recipient's +rights granted under Section 2(b) shall terminate as of the date such +litigation is filed. + +All Recipient's rights under this Agreement shall terminate if it +fails to comply with any of the material terms or conditions of this +Agreement and does not cure such failure in a reasonable period of +time after becoming aware of such noncompliance. If all Recipient's +rights under this Agreement terminate, Recipient agrees to cease use +and distribution of the Program as soon as reasonably practicable. +However, Recipient's obligations under this Agreement and any licenses +granted by Recipient relating to the Program shall continue and survive. + +Everyone is permitted to copy and distribute copies of this Agreement, +but in order to avoid inconsistency the Agreement is copyrighted and +may only be modified in the following manner. The Agreement Steward +reserves the right to publish new versions (including revisions) of +this Agreement from time to time. No one other than the Agreement +Steward has the right to modify this Agreement. The Eclipse Foundation +is the initial Agreement Steward. The Eclipse Foundation may assign the +responsibility to serve as the Agreement Steward to a suitable separate +entity. Each new version of the Agreement will be given a distinguishing +version number. The Program (including Contributions) may always be +Distributed subject to the version of the Agreement under which it was +received. In addition, after a new version of the Agreement is published, +Contributor may elect to Distribute the Program (including its +Contributions) under the new version. + +Except as expressly stated in Sections 2(a) and 2(b) above, Recipient +receives no rights or licenses to the intellectual property of any +Contributor under this Agreement, whether expressly, by implication, +estoppel or otherwise. All rights in the Program not expressly granted +under this Agreement are reserved. Nothing in this Agreement is intended +to be enforceable by any entity that is not a Contributor or Recipient. +No third-party beneficiary rights are created under this Agreement. + +Exhibit A - Form of Secondary Licenses Notice + +"This Source Code may also be made available under the following +Secondary Licenses when the conditions for such availability set forth +in the Eclipse Public License, v. 2.0 are satisfied: {name license(s), +version(s), and exceptions or additional permissions here}." + + Simply including a copy of this Agreement, including this Exhibit A + is not sufficient to license the Source Code under Secondary Licenses. + + If it is not possible or desirable to put the notice in a particular + file, then You may include the notice in a location (such as a LICENSE + file in a relevant directory) where a recipient would be likely to + look for such a notice. + + You may add additional accurate notices of copyright ownership. diff --git a/vendor/github.com/eclipse/paho.golang/packets/auth.go b/vendor/github.com/eclipse/paho.golang/packets/auth.go new file mode 100644 index 000000000..56237e00c --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/auth.go @@ -0,0 +1,77 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" +) + +// Auth is the Variable Header definition for a Auth control packet +type Auth struct { + Properties *Properties + ReasonCode byte +} + +// AuthSuccess is the return code for successful authentication +const ( + AuthSuccess = 0x00 + AuthContinueAuthentication = 0x18 + AuthReauthenticate = 0x19 +) + +func (a *Auth) String() string { + var b strings.Builder + + fmt.Fprintf(&b, "AUTH: ReasonCode:%X", a.ReasonCode) + if a.Properties != nil { + fmt.Fprintf(&b, " Properties:\n%s", a.Properties) + } else { + fmt.Fprint(&b, "\n") + } + + return b.String() +} + +// Unpack is the implementation of the interface required function for a packet +func (a *Auth) Unpack(r *bytes.Buffer) error { + var err error + + success := r.Len() == 0 + noProps := r.Len() == 1 + if !success { + a.ReasonCode, err = r.ReadByte() + if err != nil { + return err + } + + if !noProps { + err = a.Properties.Unpack(r, AUTH) + if err != nil { + return err + } + } + } + + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (a *Auth) Buffers() net.Buffers { + idvp := a.Properties.Pack(AUTH) + propLen := encodeVBI(len(idvp)) + n := net.Buffers{[]byte{a.ReasonCode}, propLen} + if len(idvp) > 0 { + n = append(n, idvp) + } + return n +} + +// WriteTo is the implementation of the interface required function for a packet +func (a *Auth) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: AUTH}} + cp.Content = a + + return cp.WriteTo(w) +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/connack.go b/vendor/github.com/eclipse/paho.golang/packets/connack.go new file mode 100644 index 000000000..3041fbcb5 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/connack.go @@ -0,0 +1,145 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" +) + +// Connack is the Variable Header definition for a connack control packet +type Connack struct { + Properties *Properties + ReasonCode byte + SessionPresent bool +} + +const ( + ConnackSuccess = 0x00 + ConnackUnspecifiedError = 0x80 + ConnackMalformedPacket = 0x81 + ConnackProtocolError = 0x81 + ConnackImplementationSpecificError = 0x83 + ConnackUnsupportedProtocolVersion = 0x84 + ConnackInvalidClientID = 0x85 + ConnackBadUsernameOrPassword = 0x86 + ConnackNotAuthorized = 0x87 + ConnackServerUnavailable = 0x88 + ConnackServerBusy = 0x89 + ConnackBanned = 0x8A + ConnackBadAuthenticationMethod = 0x8C + ConnackTopicNameInvalid = 0x90 + ConnackPacketTooLarge = 0x95 + ConnackQuotaExceeded = 0x97 + ConnackPayloadFormatInvalid = 0x99 + ConnackRetainNotSupported = 0x9A + ConnackQoSNotSupported = 0x9B + ConnackUseAnotherServer = 0x9C + ConnackServerMoved = 0x9D + ConnackConnectionRateExceeded = 0x9F +) + +func (c *Connack) String() string { + return fmt.Sprintf("CONNACK: ReasonCode:%d SessionPresent:%t\nProperties:\n%s", c.ReasonCode, c.SessionPresent, c.Properties) +} + +//Unpack is the implementation of the interface required function for a packet +func (c *Connack) Unpack(r *bytes.Buffer) error { + connackFlags, err := r.ReadByte() + if err != nil { + return err + } + c.SessionPresent = connackFlags&0x01 > 0 + + c.ReasonCode, err = r.ReadByte() + if err != nil { + return err + } + + err = c.Properties.Unpack(r, CONNACK) + if err != nil { + return err + } + + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (c *Connack) Buffers() net.Buffers { + var header bytes.Buffer + + if c.SessionPresent { + header.WriteByte(1) + } else { + header.WriteByte(0) + } + header.WriteByte(c.ReasonCode) + + idvp := c.Properties.Pack(CONNACK) + propLen := encodeVBI(len(idvp)) + + n := net.Buffers{header.Bytes(), propLen} + if len(idvp) > 0 { + n = append(n, idvp) + } + + return n +} + +// WriteTo is the implementation of the interface required function for a packet +func (c *Connack) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: CONNACK}} + cp.Content = c + + return cp.WriteTo(w) +} + +// Reason returns a string representation of the meaning of the ReasonCode +func (c *Connack) Reason() string { + switch c.ReasonCode { + case 0: + return "Success - The Connection is accepted." + case 128: + return "Unspecified error - The Server does not wish to reveal the reason for the failure, or none of the other Reason Codes apply." + case 129: + return "Malformed Packet - Data within the CONNECT packet could not be correctly parsed." + case 130: + return "Protocol Error - Data in the CONNECT packet does not conform to this specification." + case 131: + return "Implementation specific error - The CONNECT is valid but is not accepted by this Server." + case 132: + return "Unsupported Protocol Version - The Server does not support the version of the MQTT protocol requested by the Client." + case 133: + return "Client Identifier not valid - The Client Identifier is a valid string but is not allowed by the Server." + case 134: + return "Bad User Name or Password - The Server does not accept the User Name or Password specified by the Client" + case 135: + return "Not authorized - The Client is not authorized to connect." + case 136: + return "Server unavailable - The MQTT Server is not available." + case 137: + return "Server busy - The Server is busy. Try again later." + case 138: + return "Banned - This Client has been banned by administrative action. Contact the server administrator." + case 140: + return "Bad authentication method - The authentication method is not supported or does not match the authentication method currently in use." + case 144: + return "Topic Name invalid - The Will Topic Name is not malformed, but is not accepted by this Server." + case 149: + return "Packet too large - The CONNECT packet exceeded the maximum permissible size." + case 151: + return "Quota exceeded - An implementation or administrative imposed limit has been exceeded." + case 154: + return "Retain not supported - The Server does not support retained messages, and Will Retain was set to 1." + case 155: + return "QoS not supported - The Server does not support the QoS set in Will QoS." + case 156: + return "Use another server - The Client should temporarily use another server." + case 157: + return "Server moved - The Client should permanently use another server." + case 159: + return "Connection rate exceeded - The connection rate limit has been exceeded." + } + + return "" +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/connect.go b/vendor/github.com/eclipse/paho.golang/packets/connect.go new file mode 100644 index 000000000..31340f6bd --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/connect.go @@ -0,0 +1,189 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" +) + +// Connect is the Variable Header definition for a connect control packet +type Connect struct { + WillMessage []byte + Password []byte + Username string + ProtocolName string + ClientID string + WillTopic string + Properties *Properties + WillProperties *Properties + KeepAlive uint16 + ProtocolVersion byte + WillQOS byte + PasswordFlag bool + UsernameFlag bool + WillRetain bool + WillFlag bool + CleanStart bool +} + +func (c *Connect) String() string { + var b strings.Builder + + fmt.Fprintf(&b, "CONNECT: ProtocolName:%s ProtocolVersion:%d ClientID:%s KeepAlive:%d CleanStart:%t", c.ProtocolName, c.ProtocolVersion, c.ClientID, c.KeepAlive, c.CleanStart) + if c.UsernameFlag { + fmt.Fprintf(&b, " Username:%s", c.Username) + } + if c.PasswordFlag { + fmt.Fprintf(&b, " Password:%s", c.Password) + } + fmt.Fprint(&b, "\n") + if c.WillFlag { + fmt.Fprintf(&b, " WillTopic:%s WillQOS:%d WillRetain:%t WillMessage:\n%s\n", c.WillTopic, c.WillQOS, c.WillRetain, c.WillMessage) + if c.WillProperties != nil { + fmt.Fprintf(&b, "WillProperties:\n%s", c.WillProperties) + } + } + if c.Properties != nil { + fmt.Fprintf(&b, "Properties:\n%s", c.Properties) + } + + return b.String() +} + +// PackFlags takes the Connect flags and packs them into the single byte +// representation used on the wire by MQTT +func (c *Connect) PackFlags() (f byte) { + if c.UsernameFlag { + f |= 0x01 << 7 + } + if c.PasswordFlag { + f |= 0x01 << 6 + } + if c.WillFlag { + f |= 0x01 << 2 + f |= c.WillQOS << 3 + if c.WillRetain { + f |= 0x01 << 5 + } + } + if c.CleanStart { + f |= 0x01 << 1 + } + return +} + +// UnpackFlags takes the wire byte representing the connect options flags +// and fills out the appropriate variables in the struct +func (c *Connect) UnpackFlags(b byte) { + c.CleanStart = 1&(b>>1) > 0 + c.WillFlag = 1&(b>>2) > 0 + c.WillQOS = 3 & (b >> 3) + c.WillRetain = 1&(b>>5) > 0 + c.PasswordFlag = 1&(b>>6) > 0 + c.UsernameFlag = 1&(b>>7) > 0 +} + +//Unpack is the implementation of the interface required function for a packet +func (c *Connect) Unpack(r *bytes.Buffer) error { + var err error + + if c.ProtocolName, err = readString(r); err != nil { + return err + } + + if c.ProtocolVersion, err = r.ReadByte(); err != nil { + return err + } + + flags, err := r.ReadByte() + if err != nil { + return err + } + c.UnpackFlags(flags) + + if c.KeepAlive, err = readUint16(r); err != nil { + return err + } + + err = c.Properties.Unpack(r, CONNECT) + if err != nil { + return err + } + + c.ClientID, err = readString(r) + if err != nil { + return err + } + + if c.WillFlag { + c.WillProperties = &Properties{} + err = c.WillProperties.Unpack(r, CONNECT) + if err != nil { + return err + } + c.WillTopic, err = readString(r) + if err != nil { + return err + } + c.WillMessage, err = readBinary(r) + if err != nil { + return err + } + } + + if c.UsernameFlag { + c.Username, err = readString(r) + if err != nil { + return err + } + } + + if c.PasswordFlag { + c.Password, err = readBinary(r) + if err != nil { + return err + } + } + + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (c *Connect) Buffers() net.Buffers { + var cp bytes.Buffer + + writeString(c.ProtocolName, &cp) + cp.WriteByte(c.ProtocolVersion) + cp.WriteByte(c.PackFlags()) + writeUint16(c.KeepAlive, &cp) + idvp := c.Properties.Pack(CONNECT) + encodeVBIdirect(len(idvp), &cp) + cp.Write(idvp) + + writeString(c.ClientID, &cp) + if c.WillFlag { + willIdvp := c.WillProperties.Pack(CONNECT) + encodeVBIdirect(len(willIdvp), &cp) + cp.Write(willIdvp) + writeString(c.WillTopic, &cp) + writeBinary(c.WillMessage, &cp) + } + if c.UsernameFlag { + writeString(c.Username, &cp) + } + if c.PasswordFlag { + writeBinary(c.Password, &cp) + } + + return net.Buffers{cp.Bytes()} +} + +// WriteTo is the implementation of the interface required function for a packet +func (c *Connect) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: CONNECT}} + cp.Content = c + + return cp.WriteTo(w) +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/disconnect.go b/vendor/github.com/eclipse/paho.golang/packets/disconnect.go new file mode 100644 index 000000000..9180207a6 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/disconnect.go @@ -0,0 +1,152 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" +) + +// Disconnect is the Variable Header definition for a Disconnect control packet +type Disconnect struct { + Properties *Properties + ReasonCode byte +} + +func (d *Disconnect) String() string { + return fmt.Sprintf("DISCONNECT: ReasonCode:%X Properties\n%s", d.ReasonCode, d.Properties) +} + +// DisconnectNormalDisconnection, etc are the list of valid disconnection reason codes. +const ( + DisconnectNormalDisconnection = 0x00 + DisconnectDisconnectWithWillMessage = 0x04 + DisconnectUnspecifiedError = 0x80 + DisconnectMalformedPacket = 0x81 + DisconnectProtocolError = 0x82 + DisconnectImplementationSpecificError = 0x83 + DisconnectNotAuthorized = 0x87 + DisconnectServerBusy = 0x89 + DisconnectServerShuttingDown = 0x8B + DisconnectKeepAliveTimeout = 0x8D + DisconnectSessionTakenOver = 0x8E + DisconnectTopicFilterInvalid = 0x8F + DisconnectTopicNameInvalid = 0x90 + DisconnectReceiveMaximumExceeded = 0x93 + DisconnectTopicAliasInvalid = 0x94 + DisconnectPacketTooLarge = 0x95 + DisconnectMessageRateTooHigh = 0x96 + DisconnectQuotaExceeded = 0x97 + DisconnectAdministrativeAction = 0x98 + DisconnectPayloadFormatInvalid = 0x99 + DisconnectRetainNotSupported = 0x9A + DisconnectQoSNotSupported = 0x9B + DisconnectUseAnotherServer = 0x9C + DisconnectServerMoved = 0x9D + DisconnectSharedSubscriptionNotSupported = 0x9E + DisconnectConnectionRateExceeded = 0x9F + DisconnectMaximumConnectTime = 0xA0 + DisconnectSubscriptionIdentifiersNotSupported = 0xA1 + DisconnectWildcardSubscriptionsNotSupported = 0xA2 +) + +// Unpack is the implementation of the interface required function for a packet +func (d *Disconnect) Unpack(r *bytes.Buffer) error { + var err error + d.ReasonCode, err = r.ReadByte() + if err != nil { + return err + } + + err = d.Properties.Unpack(r, DISCONNECT) + if err != nil { + return err + } + + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (d *Disconnect) Buffers() net.Buffers { + idvp := d.Properties.Pack(DISCONNECT) + propLen := encodeVBI(len(idvp)) + n := net.Buffers{[]byte{d.ReasonCode}, propLen} + if len(idvp) > 0 { + n = append(n, idvp) + } + return n +} + +// WriteTo is the implementation of the interface required function for a packet +func (d *Disconnect) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: DISCONNECT}} + cp.Content = d + + return cp.WriteTo(w) +} + +// Reason returns a string representation of the meaning of the ReasonCode +func (d *Disconnect) Reason() string { + switch d.ReasonCode { + case 0: + return "Normal disconnection - Close the connection normally. Do not send the Will Message." + case 4: + return "Disconnect with Will Message - The Client wishes to disconnect but requires that the Server also publishes its Will Message." + case 128: + return "Unspecified error - The Connection is closed but the sender either does not wish to reveal the reason, or none of the other Reason Codes apply." + case 129: + return "Malformed Packet - The received packet does not conform to this specification." + case 130: + return "Protocol Error - An unexpected or out of order packet was received." + case 131: + return "Implementation specific error - The packet received is valid but cannot be processed by this implementation." + case 135: + return "Not authorized - The request is not authorized." + case 137: + return "Server busy - The Server is busy and cannot continue processing requests from this Client." + case 139: + return "Server shutting down - The Server is shutting down." + case 141: + return "Keep Alive timeout - The Connection is closed because no packet has been received for 1.5 times the Keepalive time." + case 142: + return "Session taken over - Another Connection using the same ClientID has connected causing this Connection to be closed." + case 143: + return "Topic Filter invalid - The Topic Filter is correctly formed, but is not accepted by this Sever." + case 144: + return "Topic Name invalid - The Topic Name is correctly formed, but is not accepted by this Client or Server." + case 147: + return "Receive Maximum exceeded - The Client or Server has received more than Receive Maximum publication for which it has not sent PUBACK or PUBCOMP." + case 148: + return "Topic Alias invalid - The Client or Server has received a PUBLISH packet containing a Topic Alias which is greater than the Maximum Topic Alias it sent in the CONNECT or CONNACK packet." + case 149: + return "Packet too large - The packet size is greater than Maximum Packet Size for this Client or Server." + case 150: + return "Message rate too high - The received data rate is too high." + case 151: + return "Quota exceeded - An implementation or administrative imposed limit has been exceeded." + case 152: + return "Administrative action - The Connection is closed due to an administrative action." + case 153: + return "Payload format invalid - The payload format does not match the one specified by the Payload Format Indicator." + case 154: + return "Retain not supported - The Server has does not support retained messages." + case 155: + return "QoS not supported - The Client specified a QoS greater than the QoS specified in a Maximum QoS in the CONNACK." + case 156: + return "Use another server - The Client should temporarily change its Server." + case 157: + return "Server moved - The Server is moved and the Client should permanently change its server location." + case 158: + return "Shared Subscription not supported - The Server does not support Shared Subscriptions." + case 159: + return "Connection rate exceeded - This connection is closed because the connection rate is too high." + case 160: + return "Maximum connect time - The maximum connection time authorized for this connection has been exceeded." + case 161: + return "Subscription Identifiers not supported - The Server does not support Subscription Identifiers; the subscription is not accepted." + case 162: + return "Wildcard subscriptions not supported - The Server does not support Wildcard subscription; the subscription is not accepted." + } + + return "" +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/packets.go b/vendor/github.com/eclipse/paho.golang/packets/packets.go new file mode 100644 index 000000000..496594012 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/packets.go @@ -0,0 +1,447 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" + "sync" +) + +// PacketType is a type alias to byte representing the different +// MQTT control packet types +// type PacketType byte + +// The following consts are the packet type number for each of the +// different control packets in MQTT +const ( + _ byte = iota + CONNECT + CONNACK + PUBLISH + PUBACK + PUBREC + PUBREL + PUBCOMP + SUBSCRIBE + SUBACK + UNSUBSCRIBE + UNSUBACK + PINGREQ + PINGRESP + DISCONNECT + AUTH +) + +type ( + // Packet is the interface defining the unique parts of a controlpacket + Packet interface { + Unpack(*bytes.Buffer) error + Buffers() net.Buffers + WriteTo(io.Writer) (int64, error) + } + + // FixedHeader is the definition of a control packet fixed header + FixedHeader struct { + remainingLength int + Type byte + Flags byte + } + + // ControlPacket is the definition of a control packet + ControlPacket struct { + Content Packet + FixedHeader + } +) + +// NewThreadSafeConn wraps net.Conn with a mutex. ControlPacket uses it in +// WriteTo method to ensure parallel writes are thread-Safe. +func NewThreadSafeConn(c net.Conn) net.Conn { + type threadSafeConn struct { + net.Conn + sync.Locker + } + + return &threadSafeConn{ + Conn: c, + Locker: &sync.Mutex{}, + } +} + +// WriteTo operates on a FixedHeader and takes the option values and produces +// the wire format byte that represents these. +func (f *FixedHeader) WriteTo(w io.Writer) (int64, error) { + if _, err := w.Write([]byte{byte(f.Type)<<4 | f.Flags}); err != nil { + return 0, err + } + if _, err := w.Write(encodeVBI(f.remainingLength)); err != nil { + return 0, err + } + + return 0, nil +} + +// PacketID is a helper function that returns the value of the PacketID +// field from any kind of mqtt packet in the Content element +func (c *ControlPacket) PacketID() uint16 { + switch r := c.Content.(type) { + case *Publish: + return r.PacketID + case *Puback: + return r.PacketID + case *Pubrec: + return r.PacketID + case *Pubrel: + return r.PacketID + case *Pubcomp: + return r.PacketID + case *Subscribe: + return r.PacketID + case *Suback: + return r.PacketID + case *Unsubscribe: + return r.PacketID + case *Unsuback: + return r.PacketID + default: + return 0 + } +} + +func (c *ControlPacket) PacketType() string { + return [...]string{ + "", + "CONNECT", + "CONNACK", + "PUBLISH", + "PUBACK", + "PUBREC", + "PUBREL", + "PUBCOMP", + "SUBSCRIBE", + "SUBACK", + "UNSUBSCRIBE", + "UNSUBACK", + "PINGREQ", + "PINGRESP", + "DISCONNECT", + "AUTH", + }[c.FixedHeader.Type] +} + +// NewControlPacket takes a packetType and returns a pointer to a +// ControlPacket where the VariableHeader field is a pointer to an +// instance of a VariableHeader definition for that packetType +func NewControlPacket(t byte) *ControlPacket { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: t}} + switch t { + case CONNECT: + cp.Content = &Connect{ + ProtocolName: "MQTT", + ProtocolVersion: 5, + Properties: &Properties{}, + } + case CONNACK: + cp.Content = &Connack{Properties: &Properties{}} + case PUBLISH: + cp.Content = &Publish{Properties: &Properties{}} + case PUBACK: + cp.Content = &Puback{Properties: &Properties{}} + case PUBREC: + cp.Content = &Pubrec{Properties: &Properties{}} + case PUBREL: + cp.Flags = 2 + cp.Content = &Pubrel{Properties: &Properties{}} + case PUBCOMP: + cp.Content = &Pubcomp{Properties: &Properties{}} + case SUBSCRIBE: + cp.Flags = 2 + cp.Content = &Subscribe{ + Subscriptions: make(map[string]SubOptions), + Properties: &Properties{}, + } + case SUBACK: + cp.Content = &Suback{Properties: &Properties{}} + case UNSUBSCRIBE: + cp.Flags = 2 + cp.Content = &Unsubscribe{Properties: &Properties{}} + case UNSUBACK: + cp.Content = &Unsuback{Properties: &Properties{}} + case PINGREQ: + cp.Content = &Pingreq{} + case PINGRESP: + cp.Content = &Pingresp{} + case DISCONNECT: + cp.Content = &Disconnect{Properties: &Properties{}} + case AUTH: + cp.Flags = 1 + cp.Content = &Auth{Properties: &Properties{}} + default: + return nil + } + return cp +} + +// ReadPacket reads a control packet from a io.Reader and returns a completed +// struct with the appropriate data +func ReadPacket(r io.Reader) (*ControlPacket, error) { + t := [1]byte{} + _, err := io.ReadFull(r, t[:]) + if err != nil { + return nil, err + } + // cp := NewControlPacket(PacketType(t[0] >> 4)) + // if cp == nil { + // return nil, fmt.Errorf("invalid packet type requested, %d", t[0]>>4) + // } + + pt := t[0] >> 4 + cp := &ControlPacket{FixedHeader: FixedHeader{Type: pt}} + switch pt { + case CONNECT: + cp.Content = &Connect{ + ProtocolName: "MQTT", + ProtocolVersion: 5, + Properties: &Properties{}, + } + case CONNACK: + cp.Content = &Connack{Properties: &Properties{}} + case PUBLISH: + cp.Content = &Publish{Properties: &Properties{}} + case PUBACK: + cp.Content = &Puback{Properties: &Properties{}} + case PUBREC: + cp.Content = &Pubrec{Properties: &Properties{}} + case PUBREL: + cp.Flags = 2 + cp.Content = &Pubrel{Properties: &Properties{}} + case PUBCOMP: + cp.Content = &Pubcomp{Properties: &Properties{}} + case SUBSCRIBE: + cp.Flags = 2 + cp.Content = &Subscribe{ + Subscriptions: make(map[string]SubOptions), + Properties: &Properties{}, + } + case SUBACK: + cp.Content = &Suback{Properties: &Properties{}} + case UNSUBSCRIBE: + cp.Flags = 2 + cp.Content = &Unsubscribe{Properties: &Properties{}} + case UNSUBACK: + cp.Content = &Unsuback{Properties: &Properties{}} + case PINGREQ: + cp.Content = &Pingreq{} + case PINGRESP: + cp.Content = &Pingresp{} + case DISCONNECT: + cp.Content = &Disconnect{Properties: &Properties{}} + case AUTH: + cp.Flags = 1 + cp.Content = &Auth{Properties: &Properties{}} + default: + return nil, fmt.Errorf("unknown packet type %d requested", pt) + } + + cp.Flags = t[0] & 0xF + if cp.Type == PUBLISH { + cp.Content.(*Publish).QoS = (cp.Flags & 0x6) >> 1 + } + vbi, err := getVBI(r) + if err != nil { + return nil, err + } + cp.remainingLength, err = decodeVBI(vbi) + if err != nil { + return nil, err + } + + var content bytes.Buffer + content.Grow(cp.remainingLength) + + n, err := io.CopyN(&content, r, int64(cp.remainingLength)) + if err != nil { + return nil, err + } + + if n != int64(cp.remainingLength) { + return nil, fmt.Errorf("failed to read packet, expected %d bytes, read %d", cp.remainingLength, n) + } + err = cp.Content.Unpack(&content) + if err != nil { + return nil, err + } + return cp, nil +} + +// WriteTo writes a packet to an io.Writer, handling packing all the parts of +// a control packet. +func (c *ControlPacket) WriteTo(w io.Writer) (int64, error) { + buffers := c.Content.Buffers() + for _, b := range buffers { + c.remainingLength += len(b) + } + + var header bytes.Buffer + if _, err := c.FixedHeader.WriteTo(&header); err != nil { + return 0, err + } + + buffers = append(net.Buffers{header.Bytes()}, buffers...) + + if safe, ok := w.(sync.Locker); ok { + safe.Lock() + defer safe.Unlock() + } + return buffers.WriteTo(w) +} + +func encodeVBI(length int) []byte { + var x int + b := [4]byte{} + for { + digit := byte(length % 128) + length /= 128 + if length > 0 { + digit |= 0x80 + } + b[x] = digit + x++ + if length == 0 { + return b[:x] + } + } +} + +func encodeVBIdirect(length int, buf *bytes.Buffer) { + var x int + b := [4]byte{} + for { + digit := byte(length % 128) + length /= 128 + if length > 0 { + digit |= 0x80 + } + b[x] = digit + x++ + if length == 0 { + buf.Write(b[:x]) + return + } + } +} + +func getVBI(r io.Reader) (*bytes.Buffer, error) { + var ret bytes.Buffer + digit := [1]byte{} + for { + _, err := io.ReadFull(r, digit[:]) + if err != nil { + return nil, err + } + ret.WriteByte(digit[0]) + if digit[0] <= 0x7f { + return &ret, nil + } + } +} + +func decodeVBI(r *bytes.Buffer) (int, error) { + var vbi uint32 + var multiplier uint32 + for { + digit, err := r.ReadByte() + if err != nil && err != io.EOF { + return 0, err + } + vbi |= uint32(digit&127) << multiplier + if (digit & 128) == 0 { + break + } + multiplier += 7 + } + return int(vbi), nil +} + +func writeUint16(u uint16, b *bytes.Buffer) error { + if err := b.WriteByte(byte(u >> 8)); err != nil { + return err + } + return b.WriteByte(byte(u)) +} + +func writeUint32(u uint32, b *bytes.Buffer) error { + if err := b.WriteByte(byte(u >> 24)); err != nil { + return err + } + if err := b.WriteByte(byte(u >> 16)); err != nil { + return err + } + if err := b.WriteByte(byte(u >> 8)); err != nil { + return err + } + return b.WriteByte(byte(u)) +} + +func writeString(s string, b *bytes.Buffer) { + writeUint16(uint16(len(s)), b) + b.WriteString(s) +} + +func writeBinary(d []byte, b *bytes.Buffer) { + writeUint16(uint16(len(d)), b) + b.Write(d) +} + +func readUint16(b *bytes.Buffer) (uint16, error) { + b1, err := b.ReadByte() + if err != nil { + return 0, err + } + b2, err := b.ReadByte() + if err != nil { + return 0, err + } + return (uint16(b1) << 8) | uint16(b2), nil +} + +func readUint32(b *bytes.Buffer) (uint32, error) { + b1, err := b.ReadByte() + if err != nil { + return 0, err + } + b2, err := b.ReadByte() + if err != nil { + return 0, err + } + b3, err := b.ReadByte() + if err != nil { + return 0, err + } + b4, err := b.ReadByte() + if err != nil { + return 0, err + } + return (uint32(b1) << 24) | (uint32(b2) << 16) | (uint32(b3) << 8) | uint32(b4), nil +} + +func readBinary(b *bytes.Buffer) ([]byte, error) { + size, err := readUint16(b) + if err != nil { + return nil, err + } + + var s bytes.Buffer + s.Grow(int(size)) + if _, err := io.CopyN(&s, b, int64(size)); err != nil { + return nil, err + } + + return s.Bytes(), nil +} + +func readString(b *bytes.Buffer) (string, error) { + s, err := readBinary(b) + return string(s), err +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/pingreq.go b/vendor/github.com/eclipse/paho.golang/packets/pingreq.go new file mode 100644 index 000000000..85f30c2b5 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/pingreq.go @@ -0,0 +1,34 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" +) + +// Pingreq is the Variable Header definition for a Pingreq control packet +type Pingreq struct { +} + +func (p *Pingreq) String() string { + return fmt.Sprintf("PINGREQ") +} + +//Unpack is the implementation of the interface required function for a packet +func (p *Pingreq) Unpack(r *bytes.Buffer) error { + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (p *Pingreq) Buffers() net.Buffers { + return nil +} + +// WriteTo is the implementation of the interface required function for a packet +func (p *Pingreq) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: PINGREQ}} + cp.Content = p + + return cp.WriteTo(w) +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/pingresp.go b/vendor/github.com/eclipse/paho.golang/packets/pingresp.go new file mode 100644 index 000000000..c110fc4dc --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/pingresp.go @@ -0,0 +1,34 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" +) + +// Pingresp is the Variable Header definition for a Pingresp control packet +type Pingresp struct { +} + +func (p *Pingresp) String() string { + return fmt.Sprintf("PINGRESP") +} + +//Unpack is the implementation of the interface required function for a packet +func (p *Pingresp) Unpack(r *bytes.Buffer) error { + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (p *Pingresp) Buffers() net.Buffers { + return nil +} + +// WriteTo is the implementation of the interface required function for a packet +func (p *Pingresp) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: PINGRESP}} + cp.Content = p + + return cp.WriteTo(w) +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/properties.go b/vendor/github.com/eclipse/paho.golang/packets/properties.go new file mode 100644 index 000000000..fe1f5e22e --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/properties.go @@ -0,0 +1,804 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "strings" +) + +// PropPayloadFormat, etc are the list of property codes for the +// MQTT packet properties +const ( + PropPayloadFormat byte = 1 + PropMessageExpiry byte = 2 + PropContentType byte = 3 + PropResponseTopic byte = 8 + PropCorrelationData byte = 9 + PropSubscriptionIdentifier byte = 11 + PropSessionExpiryInterval byte = 17 + PropAssignedClientID byte = 18 + PropServerKeepAlive byte = 19 + PropAuthMethod byte = 21 + PropAuthData byte = 22 + PropRequestProblemInfo byte = 23 + PropWillDelayInterval byte = 24 + PropRequestResponseInfo byte = 25 + PropResponseInfo byte = 26 + PropServerReference byte = 28 + PropReasonString byte = 31 + PropReceiveMaximum byte = 33 + PropTopicAliasMaximum byte = 34 + PropTopicAlias byte = 35 + PropMaximumQOS byte = 36 + PropRetainAvailable byte = 37 + PropUser byte = 38 + PropMaximumPacketSize byte = 39 + PropWildcardSubAvailable byte = 40 + PropSubIDAvailable byte = 41 + PropSharedSubAvailable byte = 42 +) + +// User is a struct for the User properties, originally it was a map +// then it was pointed out that user properties are allowed to appear +// more than once +type User struct { + Key, Value string +} + +// Properties is a struct representing the all the described properties +// allowed by the MQTT protocol, determining the validity of a property +// relvative to the packettype it was received in is provided by the +// ValidateID function +type Properties struct { + // PayloadFormat indicates the format of the payload of the message + // 0 is unspecified bytes + // 1 is UTF8 encoded character data + PayloadFormat *byte + // MessageExpiry is the lifetime of the message in seconds + MessageExpiry *uint32 + // ContentType is a UTF8 string describing the content of the message + // for example it could be a MIME type + ContentType string + // ResponseTopic is a UTF8 string indicating the topic name to which any + // response to this message should be sent + ResponseTopic string + // CorrelationData is binary data used to associate future response + // messages with the original request message + CorrelationData []byte + // SubscriptionIdentifier is an identifier of the subscription to which + // the Publish matched + SubscriptionIdentifier *int + // SessionExpiryInterval is the time in seconds after a client disconnects + // that the server should retain the session information (subscriptions etc) + SessionExpiryInterval *uint32 + // AssignedClientID is the server assigned client identifier in the case + // that a client connected without specifying a clientID the server + // generates one and returns it in the Connack + AssignedClientID string + // ServerKeepAlive allows the server to specify in the Connack packet + // the time in seconds to be used as the keep alive value + ServerKeepAlive *uint16 + // AuthMethod is a UTF8 string containing the name of the authentication + // method to be used for extended authentication + AuthMethod string + // AuthData is binary data containing authentication data + AuthData []byte + // RequestProblemInfo is used by the Client to indicate to the server to + // include the Reason String and/or User Properties in case of failures + RequestProblemInfo *byte + // WillDelayInterval is the number of seconds the server waits after the + // point at which it would otherwise send the will message before sending + // it. The client reconnecting before that time expires causes the server + // to cancel sending the will + WillDelayInterval *uint32 + // RequestResponseInfo is used by the Client to request the Server provide + // Response Information in the Connack + RequestResponseInfo *byte + // ResponseInfo is a UTF8 encoded string that can be used as the basis for + // createing a Response Topic. The way in which the Client creates a + // Response Topic from the Response Information is not defined. A common + // use of this is to pass a globally unique portion of the topic tree which + // is reserved for this Client for at least the lifetime of its Session. This + // often cannot just be a random name as both the requesting Client and the + // responding Client need to be authorized to use it. It is normal to use this + // as the root of a topic tree for a particular Client. For the Server to + // return this information, it normally needs to be correctly configured. + // Using this mechanism allows this configuration to be done once in the + // Server rather than in each Client + ResponseInfo string + // ServerReference is a UTF8 string indicating another server the client + // can use + ServerReference string + // ReasonString is a UTF8 string representing the reason associated with + // this response, intended to be human readable for diagnostic purposes + ReasonString string + // ReceiveMaximum is the maximum number of QOS1 & 2 messages allowed to be + // 'inflight' (not having received a PUBACK/PUBCOMP response for) + ReceiveMaximum *uint16 + // TopicAliasMaximum is the highest value permitted as a Topic Alias + TopicAliasMaximum *uint16 + // TopicAlias is used in place of the topic string to reduce the size of + // packets for repeated messages on a topic + TopicAlias *uint16 + // MaximumQOS is the highest QOS level permitted for a Publish + MaximumQOS *byte + // RetainAvailable indicates whether the server supports messages with the + // retain flag set + RetainAvailable *byte + // User is a slice of user provided properties (key and value) + User []User + // MaximumPacketSize allows the client or server to specify the maximum packet + // size in bytes that they support + MaximumPacketSize *uint32 + // WildcardSubAvailable indicates whether wildcard subscriptions are permitted + WildcardSubAvailable *byte + // SubIDAvailable indicates whether subscription identifiers are supported + SubIDAvailable *byte + // SharedSubAvailable indicates whether shared subscriptions are supported + SharedSubAvailable *byte +} + +func (p *Properties) String() string { + var b strings.Builder + if p.PayloadFormat != nil { + fmt.Fprintf(&b, "\tPayloadFormat:%d\n", *p.PayloadFormat) + } + if p.MessageExpiry != nil { + fmt.Fprintf(&b, "\tMessageExpiry:%d\n", *p.MessageExpiry) + } + if p.ContentType != "" { + fmt.Fprintf(&b, "\tContentType:%s\n", p.ContentType) + } + if p.ResponseTopic != "" { + fmt.Fprintf(&b, "\tResponseTopic:%s\n", p.ResponseTopic) + } + if len(p.CorrelationData) > 0 { + fmt.Fprintf(&b, "\tCorrelationData:%X\n", p.CorrelationData) + } + if p.SubscriptionIdentifier != nil { + fmt.Fprintf(&b, "\tSubscriptionIdentifier:%d\n", *p.SubscriptionIdentifier) + } + if p.SessionExpiryInterval != nil { + fmt.Fprintf(&b, "\tSessionExpiryInterval:%d\n", *p.SessionExpiryInterval) + } + if p.AssignedClientID != "" { + fmt.Fprintf(&b, "\tAssignedClientID:%s\n", p.AssignedClientID) + } + if p.ServerKeepAlive != nil { + fmt.Fprintf(&b, "\tServerKeepAlive:%d\n", *p.ServerKeepAlive) + } + if p.AuthMethod != "" { + fmt.Fprintf(&b, "\tAuthMethod:%s\n", p.AuthMethod) + } + if len(p.AuthData) > 0 { + fmt.Fprintf(&b, "\tAuthData:%X\n", p.AuthData) + } + if p.RequestProblemInfo != nil { + fmt.Fprintf(&b, "\tRequestProblemInfo:%d\n", *p.RequestProblemInfo) + } + if p.WillDelayInterval != nil { + fmt.Fprintf(&b, "\tWillDelayInterval:%d\n", *p.WillDelayInterval) + } + if p.RequestResponseInfo != nil { + fmt.Fprintf(&b, "\tRequestResponseInfo:%d\n", *p.RequestResponseInfo) + } + if p.ServerReference != "" { + fmt.Fprintf(&b, "\tServerReference:%s\n", p.ServerReference) + } + if p.ReasonString != "" { + fmt.Fprintf(&b, "\tReasonString:%s\n", p.ReasonString) + } + if p.ReceiveMaximum != nil { + fmt.Fprintf(&b, "\tReceiveMaximum:%d\n", *p.ReceiveMaximum) + } + if p.TopicAliasMaximum != nil { + fmt.Fprintf(&b, "\tTopicAliasMaximum:%d\n", *p.TopicAliasMaximum) + } + if p.TopicAlias != nil { + fmt.Fprintf(&b, "\tTopicAlias:%d\n", *p.TopicAlias) + } + if p.MaximumQOS != nil { + fmt.Fprintf(&b, "\tMaximumQOS:%d\n", *p.MaximumQOS) + } + if p.RetainAvailable != nil { + fmt.Fprintf(&b, "\tRetainAvailable:%d\n", *p.RetainAvailable) + } + if p.MaximumPacketSize != nil { + fmt.Fprintf(&b, "\tMaximumPacketSize:%d\n", *p.MaximumPacketSize) + } + if p.WildcardSubAvailable != nil { + fmt.Fprintf(&b, "\tWildcardSubAvailable:%d\n", *p.WildcardSubAvailable) + } + if p.SubIDAvailable != nil { + fmt.Fprintf(&b, "\tSubIDAvailable:%d\n", *p.SubIDAvailable) + } + if p.SharedSubAvailable != nil { + fmt.Fprintf(&b, "\tSharedSubAvailable:%d\n", *p.SharedSubAvailable) + } + if len(p.User) > 0 { + fmt.Fprint(&b, "\tUser Properties:\n") + for _, v := range p.User { + fmt.Fprintf(&b, "\t\t%s:%s\n", v.Key, v.Value) + } + } + + return b.String() +} + +// Pack takes all the defined properties for an Properties and produces +// a slice of bytes representing the wire format for the information +func (i *Properties) Pack(p byte) []byte { + var b bytes.Buffer + + if i == nil { + return nil + } + + if p == PUBLISH { + if i.PayloadFormat != nil { + b.WriteByte(PropPayloadFormat) + b.WriteByte(*i.PayloadFormat) + } + + if i.MessageExpiry != nil { + b.WriteByte(PropMessageExpiry) + writeUint32(*i.MessageExpiry, &b) + } + + if i.ContentType != "" { + b.WriteByte(PropContentType) + writeString(i.ContentType, &b) + } + + if i.ResponseTopic != "" { + b.WriteByte(PropResponseTopic) + writeString(i.ResponseTopic, &b) + } + + if len(i.CorrelationData) > 0 { + b.WriteByte(PropCorrelationData) + writeBinary(i.CorrelationData, &b) + } + + if i.TopicAlias != nil { + b.WriteByte(PropTopicAlias) + writeUint16(*i.TopicAlias, &b) + } + } + + if p == PUBLISH || p == SUBSCRIBE { + if i.SubscriptionIdentifier != nil { + b.WriteByte(PropSubscriptionIdentifier) + encodeVBIdirect(*i.SubscriptionIdentifier, &b) + } + } + + if p == CONNECT || p == CONNACK { + if i.ReceiveMaximum != nil { + b.WriteByte(PropReceiveMaximum) + writeUint16(*i.ReceiveMaximum, &b) + } + + if i.TopicAliasMaximum != nil { + b.WriteByte(PropTopicAliasMaximum) + writeUint16(*i.TopicAliasMaximum, &b) + } + + if i.MaximumQOS != nil { + b.WriteByte(PropMaximumQOS) + b.WriteByte(*i.MaximumQOS) + } + + if i.MaximumPacketSize != nil { + b.WriteByte(PropMaximumPacketSize) + writeUint32(*i.MaximumPacketSize, &b) + } + } + + if p == CONNACK { + if i.AssignedClientID != "" { + b.WriteByte(PropAssignedClientID) + writeString(i.AssignedClientID, &b) + } + + if i.ServerKeepAlive != nil { + b.WriteByte(PropServerKeepAlive) + writeUint16(*i.ServerKeepAlive, &b) + } + + if i.WildcardSubAvailable != nil { + b.WriteByte(PropWildcardSubAvailable) + b.WriteByte(*i.WildcardSubAvailable) + } + + if i.SubIDAvailable != nil { + b.WriteByte(PropSubIDAvailable) + b.WriteByte(*i.SubIDAvailable) + } + + if i.SharedSubAvailable != nil { + b.WriteByte(PropSharedSubAvailable) + b.WriteByte(*i.SharedSubAvailable) + } + + if i.RetainAvailable != nil { + b.WriteByte(PropRetainAvailable) + b.WriteByte(*i.RetainAvailable) + } + + if i.ResponseInfo != "" { + b.WriteByte(PropResponseInfo) + writeString(i.ResponseInfo, &b) + } + } + + if p == CONNECT { + if i.RequestProblemInfo != nil { + b.WriteByte(PropRequestProblemInfo) + b.WriteByte(*i.RequestProblemInfo) + } + + if i.WillDelayInterval != nil { + b.WriteByte(PropWillDelayInterval) + writeUint32(*i.WillDelayInterval, &b) + } + + if i.RequestResponseInfo != nil { + b.WriteByte(PropRequestResponseInfo) + b.WriteByte(*i.RequestResponseInfo) + } + } + + if p == CONNECT || p == CONNACK || p == DISCONNECT { + if i.SessionExpiryInterval != nil { + b.WriteByte(PropSessionExpiryInterval) + writeUint32(*i.SessionExpiryInterval, &b) + } + } + + if p == CONNECT || p == CONNACK || p == AUTH { + if i.AuthMethod != "" { + b.WriteByte(PropAuthMethod) + writeString(i.AuthMethod, &b) + } + + if i.AuthData != nil && len(i.AuthData) > 0 { + b.WriteByte(PropAuthData) + writeBinary(i.AuthData, &b) + } + } + + if p == CONNACK || p == DISCONNECT { + if i.ServerReference != "" { + b.WriteByte(PropServerReference) + writeString(i.ServerReference, &b) + } + } + + if p != CONNECT { + if i.ReasonString != "" { + b.WriteByte(PropReasonString) + writeString(i.ReasonString, &b) + } + } + + for _, v := range i.User { + b.WriteByte(PropUser) + writeString(v.Key, &b) + writeString(v.Value, &b) + } + + return b.Bytes() +} + +// PackBuf will create a bytes.Buffer of the packed properties, it +// will only pack the properties appropriate to the packet type p +// even though other properties may exist, it will silently ignore +// them +func (i *Properties) PackBuf(p byte) *bytes.Buffer { + var b bytes.Buffer + + if i == nil { + return nil + } + + if p == PUBLISH { + if i.PayloadFormat != nil { + b.WriteByte(PropPayloadFormat) + b.WriteByte(*i.PayloadFormat) + } + + if i.MessageExpiry != nil { + b.WriteByte(PropMessageExpiry) + writeUint32(*i.MessageExpiry, &b) + } + + if i.ContentType != "" { + b.WriteByte(PropContentType) + writeString(i.ContentType, &b) + } + + if i.ResponseTopic != "" { + b.WriteByte(PropResponseTopic) + writeString(i.ResponseTopic, &b) + } + + if i.CorrelationData != nil && len(i.CorrelationData) > 0 { + b.WriteByte(PropCorrelationData) + writeBinary(i.CorrelationData, &b) + } + + if i.TopicAlias != nil { + b.WriteByte(PropTopicAlias) + writeUint16(*i.TopicAlias, &b) + } + } + + if p == PUBLISH || p == SUBSCRIBE { + if i.SubscriptionIdentifier != nil { + b.WriteByte(PropSubscriptionIdentifier) + encodeVBIdirect(*i.SubscriptionIdentifier, &b) + } + } + + if p == CONNECT || p == CONNACK { + if i.ReceiveMaximum != nil { + b.WriteByte(PropReceiveMaximum) + writeUint16(*i.ReceiveMaximum, &b) + } + + if i.TopicAliasMaximum != nil { + b.WriteByte(PropTopicAliasMaximum) + writeUint16(*i.TopicAliasMaximum, &b) + } + + if i.MaximumQOS != nil { + b.WriteByte(PropMaximumQOS) + b.WriteByte(*i.MaximumQOS) + } + + if i.MaximumPacketSize != nil { + b.WriteByte(PropMaximumPacketSize) + writeUint32(*i.MaximumPacketSize, &b) + } + } + + if p == CONNACK { + if i.AssignedClientID != "" { + b.WriteByte(PropAssignedClientID) + writeString(i.AssignedClientID, &b) + } + + if i.ServerKeepAlive != nil { + b.WriteByte(PropServerKeepAlive) + writeUint16(*i.ServerKeepAlive, &b) + } + + if i.WildcardSubAvailable != nil { + b.WriteByte(PropWildcardSubAvailable) + b.WriteByte(*i.WildcardSubAvailable) + } + + if i.SubIDAvailable != nil { + b.WriteByte(PropSubIDAvailable) + b.WriteByte(*i.SubIDAvailable) + } + + if i.SharedSubAvailable != nil { + b.WriteByte(PropSharedSubAvailable) + b.WriteByte(*i.SharedSubAvailable) + } + + if i.RetainAvailable != nil { + b.WriteByte(PropRetainAvailable) + b.WriteByte(*i.RetainAvailable) + } + + if i.ResponseInfo != "" { + b.WriteByte(PropResponseInfo) + writeString(i.ResponseInfo, &b) + } + } + + if p == CONNECT { + if i.RequestProblemInfo != nil { + b.WriteByte(PropRequestProblemInfo) + b.WriteByte(*i.RequestProblemInfo) + } + + if i.WillDelayInterval != nil { + b.WriteByte(PropWillDelayInterval) + writeUint32(*i.WillDelayInterval, &b) + } + + if i.RequestResponseInfo != nil { + b.WriteByte(PropRequestResponseInfo) + b.WriteByte(*i.RequestResponseInfo) + } + } + + if p == CONNECT || p == CONNACK || p == DISCONNECT { + if i.SessionExpiryInterval != nil { + b.WriteByte(PropSessionExpiryInterval) + writeUint32(*i.SessionExpiryInterval, &b) + } + } + + if p == CONNECT || p == CONNACK || p == AUTH { + if i.AuthMethod != "" { + b.WriteByte(PropAuthMethod) + writeString(i.AuthMethod, &b) + } + + if i.AuthData != nil && len(i.AuthData) > 0 { + b.WriteByte(PropAuthData) + writeBinary(i.AuthData, &b) + } + } + + if p == CONNACK || p == DISCONNECT { + if i.ServerReference != "" { + b.WriteByte(PropServerReference) + writeString(i.ServerReference, &b) + } + } + + if p != CONNECT { + if i.ReasonString != "" { + b.WriteByte(PropReasonString) + writeString(i.ReasonString, &b) + } + } + + for _, v := range i.User { + b.WriteByte(PropUser) + writeString(v.Key, &b) + writeString(v.Value, &b) + } + + return &b +} + +// Unpack takes a buffer of bytes and reads out the defined properties +// filling in the appropriate entries in the struct, it returns the number +// of bytes used to store the Prop data and any error in decoding them +func (i *Properties) Unpack(r *bytes.Buffer, p byte) error { + vbi, err := getVBI(r) + if err != nil { + return err + } + size, err := decodeVBI(vbi) + if err != nil { + return err + } + if size == 0 { + return nil + } + + buf := bytes.NewBuffer(r.Next(size)) + for { + PropType, err := buf.ReadByte() + if err != nil && err != io.EOF { + return err + } + if err == io.EOF { + break + } + if !ValidateID(p, PropType) { + return fmt.Errorf("invalid Prop type %d for packet %d", PropType, p) + } + switch PropType { + case PropPayloadFormat: + pf, err := buf.ReadByte() + if err != nil { + return err + } + i.PayloadFormat = &pf + case PropMessageExpiry: + pe, err := readUint32(buf) + if err != nil { + return err + } + i.MessageExpiry = &pe + case PropContentType: + ct, err := readString(buf) + if err != nil { + return err + } + i.ContentType = ct + case PropResponseTopic: + tr, err := readString(buf) + if err != nil { + return err + } + i.ResponseTopic = tr + case PropCorrelationData: + cd, err := readBinary(buf) + if err != nil { + return err + } + i.CorrelationData = cd + case PropSubscriptionIdentifier: + si, err := decodeVBI(buf) + if err != nil { + return err + } + i.SubscriptionIdentifier = &si + case PropSessionExpiryInterval: + se, err := readUint32(buf) + if err != nil { + return err + } + i.SessionExpiryInterval = &se + case PropAssignedClientID: + ac, err := readString(buf) + if err != nil { + return err + } + i.AssignedClientID = ac + case PropServerKeepAlive: + sk, err := readUint16(buf) + if err != nil { + return err + } + i.ServerKeepAlive = &sk + case PropAuthMethod: + am, err := readString(buf) + if err != nil { + return err + } + i.AuthMethod = am + case PropAuthData: + ad, err := readBinary(buf) + if err != nil { + return err + } + i.AuthData = ad + case PropRequestProblemInfo: + rp, err := buf.ReadByte() + if err != nil { + return err + } + i.RequestProblemInfo = &rp + case PropWillDelayInterval: + wd, err := readUint32(buf) + if err != nil { + return err + } + i.WillDelayInterval = &wd + case PropRequestResponseInfo: + rp, err := buf.ReadByte() + if err != nil { + return err + } + i.RequestResponseInfo = &rp + case PropResponseInfo: + ri, err := readString(buf) + if err != nil { + return err + } + i.ResponseInfo = ri + case PropServerReference: + sr, err := readString(buf) + if err != nil { + return err + } + i.ServerReference = sr + case PropReasonString: + rs, err := readString(buf) + if err != nil { + return err + } + i.ReasonString = rs + case PropReceiveMaximum: + rm, err := readUint16(buf) + if err != nil { + return err + } + i.ReceiveMaximum = &rm + case PropTopicAliasMaximum: + ta, err := readUint16(buf) + if err != nil { + return err + } + i.TopicAliasMaximum = &ta + case PropTopicAlias: + ta, err := readUint16(buf) + if err != nil { + return err + } + i.TopicAlias = &ta + case PropMaximumQOS: + mq, err := buf.ReadByte() + if err != nil { + return err + } + i.MaximumQOS = &mq + case PropRetainAvailable: + ra, err := buf.ReadByte() + if err != nil { + return err + } + i.RetainAvailable = &ra + case PropUser: + k, err := readString(buf) + if err != nil { + return err + } + v, err := readString(buf) + if err != nil { + return err + } + i.User = append(i.User, User{k, v}) + case PropMaximumPacketSize: + mp, err := readUint32(buf) + if err != nil { + return err + } + i.MaximumPacketSize = &mp + case PropWildcardSubAvailable: + ws, err := buf.ReadByte() + if err != nil { + return err + } + i.WildcardSubAvailable = &ws + case PropSubIDAvailable: + si, err := buf.ReadByte() + if err != nil { + return err + } + i.SubIDAvailable = &si + case PropSharedSubAvailable: + ss, err := buf.ReadByte() + if err != nil { + return err + } + i.SharedSubAvailable = &ss + default: + return fmt.Errorf("unknown Prop type %d", PropType) + } + } + + return nil +} + +// ValidProperties is a map of the various properties and the +// PacketTypes that property is valid for. +var ValidProperties = map[byte]map[byte]struct{}{ + PropPayloadFormat: {PUBLISH: {}}, + PropMessageExpiry: {PUBLISH: {}}, + PropContentType: {PUBLISH: {}}, + PropResponseTopic: {PUBLISH: {}}, + PropCorrelationData: {PUBLISH: {}}, + PropTopicAlias: {PUBLISH: {}}, + PropSubscriptionIdentifier: {PUBLISH: {}, SUBSCRIBE: {}}, + PropSessionExpiryInterval: {CONNECT: {}, CONNACK: {}, DISCONNECT: {}}, + PropAssignedClientID: {CONNACK: {}}, + PropServerKeepAlive: {CONNACK: {}}, + PropWildcardSubAvailable: {CONNACK: {}}, + PropSubIDAvailable: {CONNACK: {}}, + PropSharedSubAvailable: {CONNACK: {}}, + PropRetainAvailable: {CONNACK: {}}, + PropResponseInfo: {CONNACK: {}}, + PropAuthMethod: {CONNECT: {}, CONNACK: {}, AUTH: {}}, + PropAuthData: {CONNECT: {}, CONNACK: {}, AUTH: {}}, + PropRequestProblemInfo: {CONNECT: {}}, + PropWillDelayInterval: {CONNECT: {}}, + PropRequestResponseInfo: {CONNECT: {}}, + PropServerReference: {CONNACK: {}, DISCONNECT: {}}, + PropReasonString: {CONNACK: {}, PUBACK: {}, PUBREC: {}, PUBREL: {}, PUBCOMP: {}, SUBACK: {}, UNSUBACK: {}, DISCONNECT: {}, AUTH: {}}, + PropReceiveMaximum: {CONNECT: {}, CONNACK: {}}, + PropTopicAliasMaximum: {CONNECT: {}, CONNACK: {}}, + PropMaximumQOS: {CONNECT: {}, CONNACK: {}}, + PropMaximumPacketSize: {CONNECT: {}, CONNACK: {}}, + PropUser: {CONNECT: {}, CONNACK: {}, PUBLISH: {}, PUBACK: {}, PUBREC: {}, PUBREL: {}, PUBCOMP: {}, SUBSCRIBE: {}, UNSUBSCRIBE: {}, SUBACK: {}, UNSUBACK: {}, DISCONNECT: {}, AUTH: {}}, +} + +// ValidateID takes a PacketType and a property name and returns +// a boolean indicating if that property is valid for that +// PacketType +func ValidateID(p byte, i byte) bool { + _, ok := ValidProperties[i][p] + return ok +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/puback.go b/vendor/github.com/eclipse/paho.golang/packets/puback.go new file mode 100644 index 000000000..67f404ce6 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/puback.go @@ -0,0 +1,115 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" +) + +// Puback is the Variable Header definition for a Puback control packet +type Puback struct { + Properties *Properties + PacketID uint16 + ReasonCode byte +} + +// PubackSuccess, etc are the list of valid puback reason codes. +const ( + PubackSuccess = 0x00 + PubackNoMatchingSubscribers = 0x10 + PubackUnspecifiedError = 0x80 + PubackImplementationSpecificError = 0x83 + PubackNotAuthorized = 0x87 + PubackTopicNameInvalid = 0x90 + PubackPacketIdentifierInUse = 0x91 + PubackQuotaExceeded = 0x97 + PubackPayloadFormatInvalid = 0x99 +) + +func (p *Puback) String() string { + var b strings.Builder + + fmt.Fprintf(&b, "PUBACK: PacketID:%d ReasonCode:%X", p.PacketID, p.ReasonCode) + if p.Properties != nil { + fmt.Fprintf(&b, " Properties:\n%s", p.Properties) + } else { + fmt.Fprint(&b, "\n") + } + + return b.String() +} + +//Unpack is the implementation of the interface required function for a packet +func (p *Puback) Unpack(r *bytes.Buffer) error { + var err error + success := r.Len() == 2 + noProps := r.Len() == 3 + p.PacketID, err = readUint16(r) + if err != nil { + return err + } + if !success { + p.ReasonCode, err = r.ReadByte() + if err != nil { + return err + } + + if !noProps { + err = p.Properties.Unpack(r, PUBACK) + if err != nil { + return err + } + } + } + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (p *Puback) Buffers() net.Buffers { + var b bytes.Buffer + writeUint16(p.PacketID, &b) + b.WriteByte(p.ReasonCode) + idvp := p.Properties.Pack(PUBACK) + propLen := encodeVBI(len(idvp)) + n := net.Buffers{b.Bytes(), propLen} + if len(idvp) > 0 { + n = append(n, idvp) + } + return n +} + +// WriteTo is the implementation of the interface required function for a packet +func (p *Puback) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: PUBACK}} + cp.Content = p + + return cp.WriteTo(w) +} + +// Reason returns a string representation of the meaning of the ReasonCode +func (p *Puback) Reason() string { + switch p.ReasonCode { + case 0: + return "The message is accepted. Publication of the QoS 1 message proceeds." + case 16: + return "The message is accepted but there are no subscribers. This is sent only by the Server. If the Server knows that there are no matching subscribers, it MAY use this Reason Code instead of 0x00 (Success)." + case 128: + return "The receiver does not accept the publish but either does not want to reveal the reason, or it does not match one of the other values." + case 131: + return "The PUBLISH is valid but the receiver is not willing to accept it." + case 135: + return "The PUBLISH is not authorized." + case 144: + return "The Topic Name is not malformed, but is not accepted by this Client or Server." + case 145: + return "The Packet Identifier is already in use. This might indicate a mismatch in the Session State between the Client and Server." + case 151: + return "An implementation or administrative imposed limit has been exceeded." + case 153: + return "The payload format does not match the specified Payload Format Indicator." + } + + return "" +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/pubcomp.go b/vendor/github.com/eclipse/paho.golang/packets/pubcomp.go new file mode 100644 index 000000000..1cdfe61e9 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/pubcomp.go @@ -0,0 +1,95 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" +) + +// Pubcomp is the Variable Header definition for a Pubcomp control packet +type Pubcomp struct { + Properties *Properties + PacketID uint16 + ReasonCode byte +} + +// PubcompSuccess, etc are the list of valid pubcomp reason codes. +const ( + PubcompSuccess = 0x00 + PubcompPacketIdentifierNotFound = 0x92 +) + +func (p *Pubcomp) String() string { + var b strings.Builder + + fmt.Fprintf(&b, "PUBCOMP: ReasonCode:%X PacketID:%d", p.ReasonCode, p.PacketID) + if p.Properties != nil { + fmt.Fprintf(&b, " Properties:\n%s", p.Properties) + } else { + fmt.Fprint(&b, "\n") + } + + return b.String() +} + +//Unpack is the implementation of the interface required function for a packet +func (p *Pubcomp) Unpack(r *bytes.Buffer) error { + var err error + success := r.Len() == 2 + noProps := r.Len() == 3 + p.PacketID, err = readUint16(r) + if err != nil { + return err + } + if !success { + p.ReasonCode, err = r.ReadByte() + if err != nil { + return err + } + + if !noProps { + err = p.Properties.Unpack(r, PUBACK) + if err != nil { + return err + } + } + } + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (p *Pubcomp) Buffers() net.Buffers { + var b bytes.Buffer + writeUint16(p.PacketID, &b) + b.WriteByte(p.ReasonCode) + n := net.Buffers{b.Bytes()} + idvp := p.Properties.Pack(PUBCOMP) + propLen := encodeVBI(len(idvp)) + if len(idvp) > 0 { + n = append(n, propLen) + n = append(n, idvp) + } + return n +} + +// WriteTo is the implementation of the interface required function for a packet +func (p *Pubcomp) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: PUBCOMP}} + cp.Content = p + + return cp.WriteTo(w) +} + +// Reason returns a string representation of the meaning of the ReasonCode +func (p *Pubcomp) Reason() string { + switch p.ReasonCode { + case 0: + return "Success - Packet Identifier released. Publication of QoS 2 message is complete." + case 146: + return "Packet Identifier not found - The Packet Identifier is not known. This is not an error during recovery, but at other times indicates a mismatch between the Session State on the Client and Server." + } + + return "" +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/publish.go b/vendor/github.com/eclipse/paho.golang/packets/publish.go new file mode 100644 index 000000000..ef834b7b9 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/publish.go @@ -0,0 +1,80 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net" +) + +// Publish is the Variable Header definition for a publish control packet +type Publish struct { + Payload []byte + Topic string + Properties *Properties + PacketID uint16 + QoS byte + Duplicate bool + Retain bool +} + +func (p *Publish) String() string { + return fmt.Sprintf("PUBLISH: PacketID:%d QOS:%d Topic:%s Duplicate:%t Retain:%t Payload:\n%s\nProperties\n%s", p.PacketID, p.QoS, p.Topic, p.Duplicate, p.Retain, string(p.Payload), p.Properties) +} + +//Unpack is the implementation of the interface required function for a packet +func (p *Publish) Unpack(r *bytes.Buffer) error { + var err error + p.Topic, err = readString(r) + if err != nil { + return err + } + if p.QoS > 0 { + p.PacketID, err = readUint16(r) + if err != nil { + return err + } + } + + err = p.Properties.Unpack(r, PUBLISH) + if err != nil { + return err + } + + p.Payload, err = ioutil.ReadAll(r) + if err != nil { + return err + } + + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (p *Publish) Buffers() net.Buffers { + var b bytes.Buffer + writeString(p.Topic, &b) + if p.QoS > 0 { + _ = writeUint16(p.PacketID, &b) + } + idvp := p.Properties.Pack(PUBLISH) + encodeVBIdirect(len(idvp), &b) + return net.Buffers{b.Bytes(), idvp, p.Payload} + +} + +// WriteTo is the implementation of the interface required function for a packet +func (p *Publish) WriteTo(w io.Writer) (int64, error) { + f := p.QoS << 1 + if p.Duplicate { + f |= 1 << 3 + } + if p.Retain { + f |= 1 + } + + cp := &ControlPacket{FixedHeader: FixedHeader{Type: PUBLISH, Flags: f}} + cp.Content = p + + return cp.WriteTo(w) +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/pubrec.go b/vendor/github.com/eclipse/paho.golang/packets/pubrec.go new file mode 100644 index 000000000..c3820191a --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/pubrec.go @@ -0,0 +1,117 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" +) + +// Pubrec is the Variable Header definition for a Pubrec control packet +type Pubrec struct { + Properties *Properties + PacketID uint16 + ReasonCode byte +} + +// PubrecSuccess, etc are the list of valid Pubrec reason codes +const ( + PubrecSuccess = 0x00 + PubrecNoMatchingSubscribers = 0x10 + PubrecUnspecifiedError = 0x80 + PubrecImplementationSpecificError = 0x83 + PubrecNotAuthorized = 0x87 + PubrecTopicNameInvalid = 0x90 + PubrecPacketIdentifierInUse = 0x91 + PubrecQuotaExceeded = 0x97 + PubrecPayloadFormatInvalid = 0x99 +) + +func (p *Pubrec) String() string { + var b strings.Builder + + fmt.Fprintf(&b, "PUBREC: ReasonCode:%X PacketID:%d", p.ReasonCode, p.PacketID) + if p.Properties != nil { + fmt.Fprintf(&b, " Properties:\n%s", p.Properties) + } else { + fmt.Fprint(&b, "\n") + } + + return b.String() +} + +//Unpack is the implementation of the interface required function for a packet +func (p *Pubrec) Unpack(r *bytes.Buffer) error { + var err error + success := r.Len() == 2 + noProps := r.Len() == 3 + p.PacketID, err = readUint16(r) + if err != nil { + return err + } + if !success { + p.ReasonCode, err = r.ReadByte() + if err != nil { + return err + } + + if !noProps { + err = p.Properties.Unpack(r, PUBACK) + if err != nil { + return err + } + } + } + + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (p *Pubrec) Buffers() net.Buffers { + var b bytes.Buffer + writeUint16(p.PacketID, &b) + b.WriteByte(p.ReasonCode) + n := net.Buffers{b.Bytes()} + idvp := p.Properties.Pack(PUBREC) + propLen := encodeVBI(len(idvp)) + if len(idvp) > 0 { + n = append(n, propLen) + n = append(n, idvp) + } + return n +} + +// WriteTo is the implementation of the interface required function for a packet +func (p *Pubrec) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: PUBREC}} + cp.Content = p + + return cp.WriteTo(w) +} + +// Reason returns a string representation of the meaning of the ReasonCode +func (p *Pubrec) Reason() string { + switch p.ReasonCode { + case 0: + return "Success - The message is accepted. Publication of the QoS 2 message proceeds." + case 16: + return "No matching subscribers. - The message is accepted but there are no subscribers. This is sent only by the Server. If the Server knows that case there are no matching subscribers, it MAY use this Reason Code instead of 0x00 (Success)" + case 128: + return "Unspecified error - The receiver does not accept the publish but either does not want to reveal the reason, or it does not match one of the other values." + case 131: + return "Implementation specific error - The PUBLISH is valid but the receiver is not willing to accept it." + case 135: + return "Not authorized - The PUBLISH is not authorized." + case 144: + return "Topic Name invalid - The Topic Name is not malformed, but is not accepted by this Client or Server." + case 145: + return "Packet Identifier in use - The Packet Identifier is already in use. This might indicate a mismatch in the Session State between the Client and Server." + case 151: + return "Quota exceeded - An implementation or administrative imposed limit has been exceeded." + case 153: + return "Payload format invalid - The payload format does not match the one specified in the Payload Format Indicator." + } + + return "" +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/pubrel.go b/vendor/github.com/eclipse/paho.golang/packets/pubrel.go new file mode 100644 index 000000000..27c48c240 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/pubrel.go @@ -0,0 +1,77 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" +) + +// Pubrel is the Variable Header definition for a Pubrel control packet +type Pubrel struct { + Properties *Properties + PacketID uint16 + ReasonCode byte +} + +func (p *Pubrel) String() string { + var b strings.Builder + + fmt.Fprintf(&b, "PUBREL: ReasonCode:%X PacketID:%d", p.ReasonCode, p.PacketID) + if p.Properties != nil { + fmt.Fprintf(&b, " Properties:\n%s", p.Properties) + } else { + fmt.Fprint(&b, "\n") + } + + return b.String() +} + +//Unpack is the implementation of the interface required function for a packet +func (p *Pubrel) Unpack(r *bytes.Buffer) error { + var err error + success := r.Len() == 2 + noProps := r.Len() == 3 + p.PacketID, err = readUint16(r) + if err != nil { + return err + } + if !success { + p.ReasonCode, err = r.ReadByte() + if err != nil { + return err + } + + if !noProps { + err = p.Properties.Unpack(r, PUBACK) + if err != nil { + return err + } + } + } + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (p *Pubrel) Buffers() net.Buffers { + var b bytes.Buffer + writeUint16(p.PacketID, &b) + b.WriteByte(p.ReasonCode) + n := net.Buffers{b.Bytes()} + idvp := p.Properties.Pack(PUBREL) + propLen := encodeVBI(len(idvp)) + if len(idvp) > 0 { + n = append(n, propLen) + n = append(n, idvp) + } + return n +} + +// WriteTo is the implementation of the interface required function for a packet +func (p *Pubrel) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: PUBREL, Flags: 2}} + cp.Content = p + + return cp.WriteTo(w) +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/suback.go b/vendor/github.com/eclipse/paho.golang/packets/suback.go new file mode 100644 index 000000000..2503aaf1a --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/suback.go @@ -0,0 +1,103 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" +) + +// Suback is the Variable Header definition for a Suback control packet +type Suback struct { + Properties *Properties + Reasons []byte + PacketID uint16 +} + +func (s *Suback) String() string { + return fmt.Sprintf("SUBACK: ReasonCode:%v PacketID:%d Properties:\n%s", s.Reasons, s.PacketID, s.Properties) +} + +// SubackGrantedQoS0, etc are the list of valid suback reason codes. +const ( + SubackGrantedQoS0 = 0x00 + SubackGrantedQoS1 = 0x01 + SubackGrantedQoS2 = 0x02 + SubackUnspecifiederror = 0x80 + SubackImplementationspecificerror = 0x83 + SubackNotauthorized = 0x87 + SubackTopicFilterinvalid = 0x8F + SubackPacketIdentifierinuse = 0x91 + SubackQuotaexceeded = 0x97 + SubackSharedSubscriptionnotsupported = 0x9E + SubackSubscriptionIdentifiersnotsupported = 0xA1 + SubackWildcardsubscriptionsnotsupported = 0xA2 +) + +//Unpack is the implementation of the interface required function for a packet +func (s *Suback) Unpack(r *bytes.Buffer) error { + var err error + s.PacketID, err = readUint16(r) + if err != nil { + return err + } + + err = s.Properties.Unpack(r, SUBACK) + if err != nil { + return err + } + + s.Reasons = r.Bytes() + + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (s *Suback) Buffers() net.Buffers { + var b bytes.Buffer + writeUint16(s.PacketID, &b) + idvp := s.Properties.Pack(SUBACK) + propLen := encodeVBI(len(idvp)) + return net.Buffers{b.Bytes(), propLen, idvp, s.Reasons} +} + +// WriteTo is the implementation of the interface required function for a packet +func (s *Suback) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: SUBACK}} + cp.Content = s + + return cp.WriteTo(w) +} + +// Reason returns a string representation of the meaning of the ReasonCode +func (s *Suback) Reason(index int) string { + if index >= 0 && index < len(s.Reasons) { + switch s.Reasons[index] { + case 0: + return "Granted QoS 0 - The subscription is accepted and the maximum QoS sent will be QoS 0. This might be a lower QoS than was requested." + case 1: + return "Granted QoS 1 - The subscription is accepted and the maximum QoS sent will be QoS 1. This might be a lower QoS than was requested." + case 2: + return "Granted QoS 2 - The subscription is accepted and any received QoS will be sent to this subscription." + case 128: + return "Unspecified error - The subscription is not accepted and the Server either does not wish to reveal the reason or none of the other Reason Codes apply." + case 131: + return "Implementation specific error - The SUBSCRIBE is valid but the Server does not accept it." + case 135: + return "Not authorized - The Client is not authorized to make this subscription." + case 143: + return "Topic Filter invalid - The Topic Filter is correctly formed but is not allowed for this Client." + case 145: + return "Packet Identifier in use - The specified Packet Identifier is already in use." + case 151: + return "Quota exceeded - An implementation or administrative imposed limit has been exceeded." + case 158: + return "Shared Subscription not supported - The Server does not support Shared Subscriptions for this Client." + case 161: + return "Subscription Identifiers not supported - The Server does not support Subscription Identifiers; the subscription is not accepted." + case 162: + return "Wildcard subscriptions not supported - The Server does not support Wildcard subscription; the subscription is not accepted." + } + } + return "Invalid Reason index" +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/subscribe.go b/vendor/github.com/eclipse/paho.golang/packets/subscribe.go new file mode 100644 index 000000000..3f457a28a --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/subscribe.go @@ -0,0 +1,116 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" +) + +// Subscribe is the Variable Header definition for a Subscribe control packet +type Subscribe struct { + Properties *Properties + Subscriptions map[string]SubOptions + PacketID uint16 +} + +func (s *Subscribe) String() string { + var b strings.Builder + + fmt.Fprintf(&b, "SUBSCRIBE: PacketID:%d Subscriptions:\n", s.PacketID) + for sub, o := range s.Subscriptions { + fmt.Fprintf(&b, "\t%s: QOS:%d RetainHandling:%X NoLocal:%t RetainAsPublished:%t\n", sub, o.QoS, o.RetainHandling, o.NoLocal, o.RetainAsPublished) + } + fmt.Fprintf(&b, "Properties:\n%s", s.Properties) + + return b.String() +} + +// SubOptions is the struct representing the options for a subscription +type SubOptions struct { + QoS byte + RetainHandling byte + NoLocal bool + RetainAsPublished bool +} + +// Pack is the implementation of the interface required function for a packet +func (s *SubOptions) Pack() byte { + var ret byte + ret |= s.QoS & 0x03 + if s.NoLocal { + ret |= 1 << 2 + } + if s.RetainAsPublished { + ret |= 1 << 3 + } + ret |= s.RetainHandling & 0x30 + + return ret +} + +// Unpack is the implementation of the interface required function for a packet +func (s *SubOptions) Unpack(r *bytes.Buffer) error { + b, err := r.ReadByte() + if err != nil { + return err + } + + s.QoS = b & 0x03 + s.NoLocal = (b & 1 << 2) == 1 + s.RetainAsPublished = (b & 1 << 3) == 1 + s.RetainHandling = b & 0x30 + + return nil +} + +// Unpack is the implementation of the interface required function for a packet +func (s *Subscribe) Unpack(r *bytes.Buffer) error { + var err error + s.PacketID, err = readUint16(r) + if err != nil { + return err + } + + err = s.Properties.Unpack(r, SUBSCRIBE) + if err != nil { + return err + } + + for r.Len() > 0 { + var so SubOptions + t, err := readString(r) + if err != nil { + return err + } + if err = so.Unpack(r); err != nil { + return err + } + s.Subscriptions[t] = so + } + + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (s *Subscribe) Buffers() net.Buffers { + var b bytes.Buffer + writeUint16(s.PacketID, &b) + var subs bytes.Buffer + for t, o := range s.Subscriptions { + writeString(t, &subs) + subs.WriteByte(o.Pack()) + } + idvp := s.Properties.Pack(SUBSCRIBE) + propLen := encodeVBI(len(idvp)) + return net.Buffers{b.Bytes(), propLen, idvp, subs.Bytes()} +} + +// WriteTo is the implementation of the interface required function for a packet +func (s *Subscribe) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: SUBSCRIBE, Flags: 2}} + cp.Content = s + + return cp.WriteTo(w) +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/unsuback.go b/vendor/github.com/eclipse/paho.golang/packets/unsuback.go new file mode 100644 index 000000000..ba5164b9f --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/unsuback.go @@ -0,0 +1,88 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" +) + +// Unsuback is the Variable Header definition for a Unsuback control packet +type Unsuback struct { + Reasons []byte + Properties *Properties + PacketID uint16 +} + +func (u *Unsuback) String() string { + return fmt.Sprintf("UNSUBACK: ReasonCode:%v PacketID:%d Properties:\n%s", u.Reasons, u.PacketID, u.Properties) +} + +// UnsubackSuccess, etc are the list of valid unsuback reason codes. +const ( + UnsubackSuccess = 0x00 + UnsubackNoSubscriptionFound = 0x11 + UnsubackUnspecifiedError = 0x80 + UnsubackImplementationSpecificError = 0x83 + UnsubackNotAuthorized = 0x87 + UnsubackTopicFilterInvalid = 0x8F + UnsubackPacketIdentifierInUse = 0x91 +) + +// Unpack is the implementation of the interface required function for a packet +func (u *Unsuback) Unpack(r *bytes.Buffer) error { + var err error + u.PacketID, err = readUint16(r) + if err != nil { + return err + } + + err = u.Properties.Unpack(r, UNSUBACK) + if err != nil { + return err + } + + u.Reasons = r.Bytes() + + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (u *Unsuback) Buffers() net.Buffers { + var b bytes.Buffer + writeUint16(u.PacketID, &b) + idvp := u.Properties.Pack(UNSUBACK) + propLen := encodeVBI(len(idvp)) + return net.Buffers{b.Bytes(), propLen, idvp, u.Reasons} +} + +// WriteTo is the implementation of the interface required function for a packet +func (u *Unsuback) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: UNSUBACK}} + cp.Content = u + + return cp.WriteTo(w) +} + +// Reason returns a string representation of the meaning of the ReasonCode +func (u *Unsuback) Reason(index int) string { + if index >= 0 && index < len(u.Reasons) { + switch u.Reasons[index] { + case 0x00: + return "Success - The subscription is deleted" + case 0x11: + return "No subscription found - No matching Topic Filter is being used by the Client." + case 0x80: + return "Unspecified error - The unsubscribe could not be completed and the Server either does not wish to reveal the reason or none of the other Reason Codes apply." + case 0x83: + return "Implementation specific error - The UNSUBSCRIBE is valid but the Server does not accept it." + case 0x87: + return "Not authorized - The Client is not authorized to unsubscribe." + case 0x8F: + return "Topic Filter invalid - The Topic Filter is correctly formed but is not allowed for this Client." + case 0x91: + return "Packet Identifier in use - The specified Packet Identifier is already in use." + } + } + return "Invalid Reason index" +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/unsubscribe.go b/vendor/github.com/eclipse/paho.golang/packets/unsubscribe.go new file mode 100644 index 000000000..dc4e2f89e --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/unsubscribe.go @@ -0,0 +1,67 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" +) + +// Unsubscribe is the Variable Header definition for a Unsubscribe control packet +type Unsubscribe struct { + Topics []string + Properties *Properties + PacketID uint16 +} + +func (u *Unsubscribe) String() string { + return fmt.Sprintf("UNSUBSCRIBE: PacketID:%d Topics:%v Properties:\n%s", u.PacketID, u.Topics, u.Properties) +} + +// Unpack is the implementation of the interface required function for a packet +func (u *Unsubscribe) Unpack(r *bytes.Buffer) error { + var err error + u.PacketID, err = readUint16(r) + if err != nil { + return err + } + + err = u.Properties.Unpack(r, UNSUBSCRIBE) + if err != nil { + return err + } + + for { + t, err := readString(r) + if err != nil && err != io.EOF { + return err + } + if err == io.EOF { + break + } + u.Topics = append(u.Topics, t) + } + + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (u *Unsubscribe) Buffers() net.Buffers { + var b bytes.Buffer + writeUint16(u.PacketID, &b) + var topics bytes.Buffer + for _, t := range u.Topics { + writeString(t, &topics) + } + idvp := u.Properties.Pack(UNSUBSCRIBE) + propLen := encodeVBI(len(idvp)) + return net.Buffers{b.Bytes(), propLen, idvp, topics.Bytes()} +} + +// WriteTo is the implementation of the interface required function for a packet +func (u *Unsubscribe) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: UNSUBSCRIBE, Flags: 2}} + cp.Content = u + + return cp.WriteTo(w) +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/acks_tracker.go b/vendor/github.com/eclipse/paho.golang/paho/acks_tracker.go new file mode 100644 index 000000000..47f11cb67 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/acks_tracker.go @@ -0,0 +1,79 @@ +package paho + +import ( + "errors" + "sync" + + "github.com/eclipse/paho.golang/packets" +) + +var ( + ErrPacketNotFound = errors.New("packet not found") +) + +type acksTracker struct { + mx sync.Mutex + order []packet +} + +func (t *acksTracker) add(pb *packets.Publish) { + t.mx.Lock() + defer t.mx.Unlock() + + for _, v := range t.order { + if v.pb.PacketID == pb.PacketID { + return // already added + } + } + + t.order = append(t.order, packet{pb: pb}) +} + +func (t *acksTracker) markAsAcked(pb *packets.Publish) error { + t.mx.Lock() + defer t.mx.Unlock() + + for k, v := range t.order { + if pb.PacketID == v.pb.PacketID { + t.order[k].acknowledged = true + return nil + } + } + + return ErrPacketNotFound +} + +func (t *acksTracker) flush(do func([]*packets.Publish)) { + t.mx.Lock() + defer t.mx.Unlock() + + var ( + buf []*packets.Publish + ) + for _, v := range t.order { + if v.acknowledged { + buf = append(buf, v.pb) + } else { + break + } + } + + if len(buf) == 0 { + return + } + + do(buf) + t.order = t.order[len(buf):] +} + +// reset should be used upon disconnections +func (t *acksTracker) reset() { + t.mx.Lock() + defer t.mx.Unlock() + t.order = nil +} + +type packet struct { + pb *packets.Publish + acknowledged bool +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/auth.go b/vendor/github.com/eclipse/paho.golang/paho/auth.go new file mode 100644 index 000000000..7d3a3c972 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/auth.go @@ -0,0 +1,8 @@ +package paho + +// Auther is the interface for something that implements the extended authentication +// flows in MQTT v5 +type Auther interface { + Authenticate(*Auth) *Auth + Authenticated() +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/client.go b/vendor/github.com/eclipse/paho.golang/paho/client.go new file mode 100644 index 000000000..f41e3d068 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/client.go @@ -0,0 +1,923 @@ +package paho + +import ( + "context" + "errors" + "fmt" + "math" + "net" + "strings" + "sync" + "time" + + "github.com/eclipse/paho.golang/packets" + "golang.org/x/sync/semaphore" +) + +type MQTTVersion byte + +const ( + MQTTv311 MQTTVersion = 4 + MQTTv5 MQTTVersion = 5 +) + +const defaultSendAckInterval = 50 * time.Millisecond + +var ( + ErrManualAcknowledgmentDisabled = errors.New("manual acknowledgments disabled") +) + +type ( + // ClientConfig are the user configurable options for the client, an + // instance of this struct is passed into NewClient(), not all options + // are required to be set, defaults are provided for Persistence, MIDs, + // PingHandler, PacketTimeout and Router. + ClientConfig struct { + ClientID string + // Conn is the connection to broker. + // BEWARE that most wrapped net.Conn implementations like tls.Conn are + // not thread safe for writing. To fix, use packets.NewThreadSafeConn + // wrapper or extend the custom net.Conn struct with sync.Locker. + Conn net.Conn + MIDs MIDService + AuthHandler Auther + PingHandler Pinger + Router Router + Persistence Persistence + PacketTimeout time.Duration + // OnServerDisconnect is called only when a packets.DISCONNECT is received from server + OnServerDisconnect func(*Disconnect) + // OnClientError is for example called on net.Error + OnClientError func(error) + // PublishHook allows a user provided function to be called before + // a Publish packet is sent allowing it to inspect or modify the + // Publish, an example of the utility of this is provided in the + // Topic Alias Handler extension which will automatically assign + // and use topic alias values rather than topic strings. + PublishHook func(*Publish) + // EnableManualAcknowledgment is used to control the acknowledgment of packets manually. + // BEWARE that the MQTT specs require clients to send acknowledgments in the order in which the corresponding + // PUBLISH packets were received. + // Consider the following scenario: the client receives packets 1,2,3,4 + // If you acknowledge 3 first, no ack is actually sent to the server but it's buffered until also 1 and 2 + // are acknowledged. + EnableManualAcknowledgment bool + // SendAcksInterval is used only when EnableManualAcknowledgment is true + // it determines how often the client tries to send a batch of acknowledgments in the right order to the server. + SendAcksInterval time.Duration + } + // Client is the struct representing an MQTT client + Client struct { + mu sync.Mutex + ClientConfig + // raCtx is used for handling the MQTTv5 authentication exchange. + raCtx *CPContext + stop chan struct{} + publishPackets chan *packets.Publish + acksTracker acksTracker + workers sync.WaitGroup + serverProps CommsProperties + clientProps CommsProperties + serverInflight *semaphore.Weighted + clientInflight *semaphore.Weighted + debug Logger + errors Logger + } + + // CommsProperties is a struct of the communication properties that may + // be set by the server in the Connack and that the client needs to be + // aware of for future subscribes/publishes + CommsProperties struct { + MaximumPacketSize uint32 + ReceiveMaximum uint16 + TopicAliasMaximum uint16 + MaximumQoS byte + RetainAvailable bool + WildcardSubAvailable bool + SubIDAvailable bool + SharedSubAvailable bool + } + + caContext struct { + Context context.Context + Return chan *packets.Connack + } +) + +// NewClient is used to create a new default instance of an MQTT client. +// It returns a pointer to the new client instance. +// The default client uses the provided PingHandler, MessageID and +// StandardRouter implementations, and a noop Persistence. +// These should be replaced if desired before the client is connected. +// client.Conn *MUST* be set to an already connected net.Conn before +// Connect() is called. +func NewClient(conf ClientConfig) *Client { + c := &Client{ + serverProps: CommsProperties{ + ReceiveMaximum: 65535, + MaximumQoS: 2, + MaximumPacketSize: 0, + TopicAliasMaximum: 0, + RetainAvailable: true, + WildcardSubAvailable: true, + SubIDAvailable: true, + SharedSubAvailable: true, + }, + clientProps: CommsProperties{ + ReceiveMaximum: 65535, + MaximumQoS: 2, + MaximumPacketSize: 0, + TopicAliasMaximum: 0, + }, + ClientConfig: conf, + errors: NOOPLogger{}, + debug: NOOPLogger{}, + } + + if c.Persistence == nil { + c.Persistence = &noopPersistence{} + } + if c.MIDs == nil { + c.MIDs = &MIDs{index: make([]*CPContext, int(midMax))} + } + if c.PacketTimeout == 0 { + c.PacketTimeout = 10 * time.Second + } + if c.Router == nil { + c.Router = NewStandardRouter() + } + if c.PingHandler == nil { + c.PingHandler = DefaultPingerWithCustomFailHandler(func(e error) { + go c.error(e) + }) + } + if c.OnClientError == nil { + c.OnClientError = func(e error) {} + } + + return c +} + +// Connect is used to connect the client to a server. It presumes that +// the Client instance already has a working network connection. +// The function takes a pre-prepared Connect packet, and uses that to +// establish an MQTT connection. Assuming the connection completes +// successfully the rest of the client is initiated and the Connack +// returned. Otherwise the failure Connack (if there is one) is returned +// along with an error indicating the reason for the failure to connect. +func (c *Client) Connect(ctx context.Context, cp *Connect) (*Connack, error) { + if c.Conn == nil { + return nil, fmt.Errorf("client connection is nil") + } + + cleanup := func() { + close(c.stop) + close(c.publishPackets) + _ = c.Conn.Close() + c.mu.Unlock() + } + + c.mu.Lock() + c.stop = make(chan struct{}) + + var publishPacketsSize uint16 = math.MaxUint16 + if cp.Properties != nil && cp.Properties.ReceiveMaximum != nil { + publishPacketsSize = *cp.Properties.ReceiveMaximum + } + c.publishPackets = make(chan *packets.Publish, publishPacketsSize) + + keepalive := cp.KeepAlive + c.ClientID = cp.ClientID + if cp.Properties != nil { + if cp.Properties.MaximumPacketSize != nil { + c.clientProps.MaximumPacketSize = *cp.Properties.MaximumPacketSize + } + if cp.Properties.MaximumQOS != nil { + c.clientProps.MaximumQoS = *cp.Properties.MaximumQOS + } + if cp.Properties.ReceiveMaximum != nil { + c.clientProps.ReceiveMaximum = *cp.Properties.ReceiveMaximum + } + if cp.Properties.TopicAliasMaximum != nil { + c.clientProps.TopicAliasMaximum = *cp.Properties.TopicAliasMaximum + } + } + + c.debug.Println("connecting") + connCtx, cf := context.WithTimeout(ctx, c.PacketTimeout) + defer cf() + + ccp := cp.Packet() + ccp.ProtocolName = "MQTT" + ccp.ProtocolVersion = 5 + + c.debug.Println("sending CONNECT") + if _, err := ccp.WriteTo(c.Conn); err != nil { + cleanup() + return nil, err + } + + c.debug.Println("waiting for CONNACK/AUTH") + var ( + caPacket *packets.Connack + caPacketCh = make(chan *packets.Connack) + caPacketErr = make(chan error) + ) + go c.expectConnack(caPacketCh, caPacketErr) + select { + case <-connCtx.Done(): + if ctxErr := connCtx.Err(); ctxErr != nil { + c.debug.Println(fmt.Sprintf("terminated due to context: %v", ctxErr)) + } + cleanup() + return nil, connCtx.Err() + case err := <-caPacketErr: + c.debug.Println(err) + cleanup() + return nil, err + case caPacket = <-caPacketCh: + } + + ca := ConnackFromPacketConnack(caPacket) + + if ca.ReasonCode >= 0x80 { + var reason string + c.debug.Println("received an error code in Connack:", ca.ReasonCode) + if ca.Properties != nil { + reason = ca.Properties.ReasonString + } + cleanup() + return ca, fmt.Errorf("failed to connect to server: %s", reason) + } + + // no more possible calls to cleanup(), defer an unlock + defer c.mu.Unlock() + + if ca.Properties != nil { + if ca.Properties.ServerKeepAlive != nil { + keepalive = *ca.Properties.ServerKeepAlive + } + if ca.Properties.AssignedClientID != "" { + c.ClientID = ca.Properties.AssignedClientID + } + if ca.Properties.ReceiveMaximum != nil { + c.serverProps.ReceiveMaximum = *ca.Properties.ReceiveMaximum + } + if ca.Properties.MaximumQoS != nil { + c.serverProps.MaximumQoS = *ca.Properties.MaximumQoS + } + if ca.Properties.MaximumPacketSize != nil { + c.serverProps.MaximumPacketSize = *ca.Properties.MaximumPacketSize + } + if ca.Properties.TopicAliasMaximum != nil { + c.serverProps.TopicAliasMaximum = *ca.Properties.TopicAliasMaximum + } + c.serverProps.RetainAvailable = ca.Properties.RetainAvailable + c.serverProps.WildcardSubAvailable = ca.Properties.WildcardSubAvailable + c.serverProps.SubIDAvailable = ca.Properties.SubIDAvailable + c.serverProps.SharedSubAvailable = ca.Properties.SharedSubAvailable + } + + c.serverInflight = semaphore.NewWeighted(int64(c.serverProps.ReceiveMaximum)) + c.clientInflight = semaphore.NewWeighted(int64(c.clientProps.ReceiveMaximum)) + + c.debug.Println("received CONNACK, starting PingHandler") + c.workers.Add(1) + go func() { + defer c.workers.Done() + defer c.debug.Println("returning from ping handler worker") + c.PingHandler.Start(c.Conn, time.Duration(keepalive)*time.Second) + }() + + c.debug.Println("starting publish packets loop") + c.workers.Add(1) + go func() { + defer c.workers.Done() + defer c.debug.Println("returning from publish packets loop worker") + c.routePublishPackets() + }() + + c.debug.Println("starting incoming") + c.workers.Add(1) + go func() { + defer c.workers.Done() + defer c.debug.Println("returning from incoming worker") + c.incoming() + }() + + if c.EnableManualAcknowledgment { + c.debug.Println("starting acking routine") + + c.acksTracker.reset() + sendAcksInterval := defaultSendAckInterval + if c.SendAcksInterval > 0 { + sendAcksInterval = c.SendAcksInterval + } + + c.workers.Add(1) + go func() { + defer c.workers.Done() + defer c.debug.Println("returning from ack tracker routine") + t := time.NewTicker(sendAcksInterval) + for { + select { + case <-c.stop: + return + case <-t.C: + c.acksTracker.flush(func(pbs []*packets.Publish) { + for _, pb := range pbs { + c.ack(pb) + } + }) + } + } + }() + } + + return ca, nil +} + +func (c *Client) Ack(pb *Publish) error { + if !c.EnableManualAcknowledgment { + return ErrManualAcknowledgmentDisabled + } + if pb.QoS == 0 { + return nil + } + return c.acksTracker.markAsAcked(pb.Packet()) +} + +func (c *Client) ack(pb *packets.Publish) { + switch pb.QoS { + case 1: + pa := packets.Puback{ + Properties: &packets.Properties{}, + PacketID: pb.PacketID, + } + c.debug.Println("sending PUBACK") + _, err := pa.WriteTo(c.Conn) + if err != nil { + c.errors.Printf("failed to send PUBACK for %d: %s", pb.PacketID, err) + } + case 2: + pr := packets.Pubrec{ + Properties: &packets.Properties{}, + PacketID: pb.PacketID, + } + c.debug.Printf("sending PUBREC") + _, err := pr.WriteTo(c.Conn) + if err != nil { + c.errors.Printf("failed to send PUBREC for %d: %s", pb.PacketID, err) + } + } +} + +func (c *Client) routePublishPackets() { + for { + select { + case <-c.stop: + return + case pb, open := <-c.publishPackets: + if !open { + return + } + + if !c.ClientConfig.EnableManualAcknowledgment { + c.Router.Route(pb) + c.ack(pb) + continue + } + + if pb.QoS != 0 { + c.acksTracker.add(pb) + } + + c.Router.Route(pb) + } + } +} + +// incoming is the Client function that reads and handles incoming +// packets from the server. The function is started as a goroutine +// from Connect(), it exits when it receives a server initiated +// Disconnect, the Stop channel is closed or there is an error reading +// a packet from the network connection +func (c *Client) incoming() { + defer c.debug.Println("client stopping, incoming stopping") + for { + select { + case <-c.stop: + return + default: + recv, err := packets.ReadPacket(c.Conn) + if err != nil { + go c.error(err) + return + } + switch recv.Type { + case packets.CONNACK: + c.debug.Println("received CONNACK") + go c.error(fmt.Errorf("received unexpected CONNACK")) + return + case packets.AUTH: + c.debug.Println("received AUTH") + ap := recv.Content.(*packets.Auth) + switch ap.ReasonCode { + case 0x0: + if c.AuthHandler != nil { + go c.AuthHandler.Authenticated() + } + if c.raCtx != nil { + c.raCtx.Return <- *recv + } + case 0x18: + if c.AuthHandler != nil { + if _, err := c.AuthHandler.Authenticate(AuthFromPacketAuth(ap)).Packet().WriteTo(c.Conn); err != nil { + go c.error(err) + return + } + } + } + case packets.PUBLISH: + pb := recv.Content.(*packets.Publish) + c.debug.Printf("received QoS%d PUBLISH", pb.QoS) + c.mu.Lock() + select { + case <-c.stop: + c.mu.Unlock() + return + default: + c.publishPackets <- pb + c.mu.Unlock() + } + case packets.PUBACK, packets.PUBCOMP, packets.SUBACK, packets.UNSUBACK: + c.debug.Printf("received %s packet with id %d", recv.PacketType(), recv.PacketID()) + if cpCtx := c.MIDs.Get(recv.PacketID()); cpCtx != nil { + cpCtx.Return <- *recv + } else { + c.debug.Println("received a response for a message ID we don't know:", recv.PacketID()) + } + case packets.PUBREC: + c.debug.Println("received PUBREC for", recv.PacketID()) + if cpCtx := c.MIDs.Get(recv.PacketID()); cpCtx == nil { + c.debug.Println("received a PUBREC for a message ID we don't know:", recv.PacketID()) + pl := packets.Pubrel{ + PacketID: recv.Content.(*packets.Pubrec).PacketID, + ReasonCode: 0x92, + } + c.debug.Println("sending PUBREL for", pl.PacketID) + _, err := pl.WriteTo(c.Conn) + if err != nil { + c.errors.Printf("failed to send PUBREL for %d: %s", pl.PacketID, err) + } + } else { + pr := recv.Content.(*packets.Pubrec) + if pr.ReasonCode >= 0x80 { + //Received a failure code, shortcut and return + cpCtx.Return <- *recv + } else { + pl := packets.Pubrel{ + PacketID: pr.PacketID, + } + c.debug.Println("sending PUBREL for", pl.PacketID) + _, err := pl.WriteTo(c.Conn) + if err != nil { + c.errors.Printf("failed to send PUBREL for %d: %s", pl.PacketID, err) + } + } + } + case packets.PUBREL: + c.debug.Println("received PUBREL for", recv.PacketID()) + //Auto respond to pubrels unless failure code + pr := recv.Content.(*packets.Pubrel) + if pr.ReasonCode >= 0x80 { + //Received a failure code, continue + continue + } else { + pc := packets.Pubcomp{ + PacketID: pr.PacketID, + } + c.debug.Println("sending PUBCOMP for", pr.PacketID) + _, err := pc.WriteTo(c.Conn) + if err != nil { + c.errors.Printf("failed to send PUBCOMP for %d: %s", pc.PacketID, err) + } + } + case packets.DISCONNECT: + c.debug.Println("received DISCONNECT") + if c.raCtx != nil { + c.raCtx.Return <- *recv + } + go func() { + if c.OnServerDisconnect != nil { + go c.serverDisconnect(DisconnectFromPacketDisconnect(recv.Content.(*packets.Disconnect))) + } else { + go c.error(fmt.Errorf("server initiated disconnect")) + } + }() + return + case packets.PINGRESP: + c.debug.Println("received PINGRESP") + c.PingHandler.PingResp() + } + } + } +} + +func (c *Client) close() { + c.mu.Lock() + defer c.mu.Unlock() + + select { + case <-c.stop: + //already shutting down, do nothing + return + default: + } + + close(c.stop) + close(c.publishPackets) + + c.debug.Println("client stopped") + c.PingHandler.Stop() + c.debug.Println("ping stopped") + _ = c.Conn.Close() + c.debug.Println("conn closed") + c.acksTracker.reset() + c.debug.Println("acks tracker reset") +} + +// error is called to signify that an error situation has occurred, this +// causes the client's Stop channel to be closed (if it hasn't already been) +// which results in the other client goroutines terminating. +// It also closes the client network connection. +func (c *Client) error(e error) { + c.debug.Println("error called:", e) + c.close() + c.workers.Wait() + go c.OnClientError(e) +} + +func (c *Client) serverDisconnect(d *Disconnect) { + c.close() + c.workers.Wait() + c.debug.Println("calling OnServerDisconnect") + go c.OnServerDisconnect(d) +} + +// Authenticate is used to initiate a reauthentication of credentials with the +// server. This function sends the initial Auth packet to start the reauthentication +// then relies on the client AuthHandler managing any further requests from the +// server until either a successful Auth packet is passed back, or a Disconnect +// is received. +func (c *Client) Authenticate(ctx context.Context, a *Auth) (*AuthResponse, error) { + c.debug.Println("client initiated reauthentication") + + c.mu.Lock() + if c.raCtx != nil { + c.mu.Unlock() + return nil, fmt.Errorf("previous authentication is still in progress") + } + c.raCtx = &CPContext{ctx, make(chan packets.ControlPacket, 1)} + c.mu.Unlock() + defer func() { + c.mu.Lock() + c.raCtx = nil + c.mu.Unlock() + }() + + c.debug.Println("sending AUTH") + if _, err := a.Packet().WriteTo(c.Conn); err != nil { + return nil, err + } + + var rp packets.ControlPacket + select { + case <-ctx.Done(): + if ctxErr := ctx.Err(); ctxErr != nil { + c.debug.Println(fmt.Sprintf("terminated due to context: %v", ctxErr)) + return nil, ctxErr + } + case rp = <-c.raCtx.Return: + } + + switch rp.Type { + case packets.AUTH: + //If we've received one here it must be successful, the only way + //to abort a reauth is a server initiated disconnect + return AuthResponseFromPacketAuth(rp.Content.(*packets.Auth)), nil + case packets.DISCONNECT: + return AuthResponseFromPacketDisconnect(rp.Content.(*packets.Disconnect)), nil + } + + return nil, fmt.Errorf("error with Auth, didn't receive Auth or Disconnect") +} + +// Subscribe is used to send a Subscription request to the MQTT server. +// It is passed a pre-prepared Subscribe packet and blocks waiting for +// a response Suback, or for the timeout to fire. Any response Suback +// is returned from the function, along with any errors. +func (c *Client) Subscribe(ctx context.Context, s *Subscribe) (*Suback, error) { + if !c.serverProps.WildcardSubAvailable { + for t := range s.Subscriptions { + if strings.ContainsAny(t, "#+") { + // Using a wildcard in a subscription when not supported + return nil, fmt.Errorf("cannot subscribe to %s, server does not support wildcards", t) + } + } + } + if !c.serverProps.SubIDAvailable && s.Properties != nil && s.Properties.SubscriptionIdentifier != nil { + return nil, fmt.Errorf("cannot send subscribe with subID set, server does not support subID") + } + if !c.serverProps.SharedSubAvailable { + for t := range s.Subscriptions { + if strings.HasPrefix(t, "$share") { + return nil, fmt.Errorf("cannont subscribe to %s, server does not support shared subscriptions", t) + } + } + } + + c.debug.Printf("subscribing to %+v", s.Subscriptions) + + subCtx, cf := context.WithTimeout(ctx, c.PacketTimeout) + defer cf() + cpCtx := &CPContext{subCtx, make(chan packets.ControlPacket, 1)} + + sp := s.Packet() + + mid, err := c.MIDs.Request(cpCtx) + if err != nil { + return nil, err + } + defer c.MIDs.Free(mid) + sp.PacketID = mid + + c.debug.Println("sending SUBSCRIBE") + if _, err := sp.WriteTo(c.Conn); err != nil { + return nil, err + } + c.debug.Println("waiting for SUBACK") + var sap packets.ControlPacket + + select { + case <-subCtx.Done(): + if ctxErr := subCtx.Err(); ctxErr != nil { + c.debug.Println(fmt.Sprintf("terminated due to context: %v", ctxErr)) + return nil, ctxErr + } + case sap = <-cpCtx.Return: + } + + if sap.Type != packets.SUBACK { + return nil, fmt.Errorf("received %d instead of Suback", sap.Type) + } + c.debug.Println("received SUBACK") + + sa := SubackFromPacketSuback(sap.Content.(*packets.Suback)) + switch { + case len(sa.Reasons) == 1: + if sa.Reasons[0] >= 0x80 { + var reason string + c.debug.Println("received an error code in Suback:", sa.Reasons[0]) + if sa.Properties != nil { + reason = sa.Properties.ReasonString + } + return sa, fmt.Errorf("failed to subscribe to topic: %s", reason) + } + default: + for _, code := range sa.Reasons { + if code >= 0x80 { + c.debug.Println("received an error code in Suback:", code) + return sa, fmt.Errorf("at least one requested subscription failed") + } + } + } + + return sa, nil +} + +// Unsubscribe is used to send an Unsubscribe request to the MQTT server. +// It is passed a pre-prepared Unsubscribe packet and blocks waiting for +// a response Unsuback, or for the timeout to fire. Any response Unsuback +// is returned from the function, along with any errors. +func (c *Client) Unsubscribe(ctx context.Context, u *Unsubscribe) (*Unsuback, error) { + c.debug.Printf("unsubscribing from %+v", u.Topics) + unsubCtx, cf := context.WithTimeout(ctx, c.PacketTimeout) + defer cf() + cpCtx := &CPContext{unsubCtx, make(chan packets.ControlPacket, 1)} + + up := u.Packet() + + mid, err := c.MIDs.Request(cpCtx) + if err != nil { + return nil, err + } + defer c.MIDs.Free(mid) + up.PacketID = mid + + c.debug.Println("sending UNSUBSCRIBE") + if _, err := up.WriteTo(c.Conn); err != nil { + return nil, err + } + c.debug.Println("waiting for UNSUBACK") + var uap packets.ControlPacket + + select { + case <-unsubCtx.Done(): + if ctxErr := unsubCtx.Err(); ctxErr != nil { + c.debug.Println(fmt.Sprintf("terminated due to context: %v", ctxErr)) + return nil, ctxErr + } + case uap = <-cpCtx.Return: + } + + if uap.Type != packets.UNSUBACK { + return nil, fmt.Errorf("received %d instead of Unsuback", uap.Type) + } + c.debug.Println("received SUBACK") + + ua := UnsubackFromPacketUnsuback(uap.Content.(*packets.Unsuback)) + switch { + case len(ua.Reasons) == 1: + if ua.Reasons[0] >= 0x80 { + var reason string + c.debug.Println("received an error code in Unsuback:", ua.Reasons[0]) + if ua.Properties != nil { + reason = ua.Properties.ReasonString + } + return ua, fmt.Errorf("failed to unsubscribe from topic: %s", reason) + } + default: + for _, code := range ua.Reasons { + if code >= 0x80 { + c.debug.Println("received an error code in Suback:", code) + return ua, fmt.Errorf("at least one requested unsubscribe failed") + } + } + } + + return ua, nil +} + +// Publish is used to send a publication to the MQTT server. +// It is passed a pre-prepared Publish packet and blocks waiting for +// the appropriate response, or for the timeout to fire. +// Any response message is returned from the function, along with any errors. +func (c *Client) Publish(ctx context.Context, p *Publish) (*PublishResponse, error) { + if p.QoS > c.serverProps.MaximumQoS { + return nil, fmt.Errorf("cannot send Publish with QoS %d, server maximum QoS is %d", p.QoS, c.serverProps.MaximumQoS) + } + if p.Properties != nil && p.Properties.TopicAlias != nil { + if c.serverProps.TopicAliasMaximum > 0 && *p.Properties.TopicAlias > c.serverProps.TopicAliasMaximum { + return nil, fmt.Errorf("cannot send publish with TopicAlias %d, server topic alias maximum is %d", *p.Properties.TopicAlias, c.serverProps.TopicAliasMaximum) + } + } + if !c.serverProps.RetainAvailable && p.Retain { + return nil, fmt.Errorf("cannot send Publish with retain flag set, server does not support retained messages") + } + if (p.Properties == nil || p.Properties.TopicAlias == nil) && p.Topic == "" { + return nil, fmt.Errorf("cannot send a publish with no TopicAlias and no Topic set") + } + + if c.ClientConfig.PublishHook != nil { + c.ClientConfig.PublishHook(p) + } + + c.debug.Printf("sending message to %s", p.Topic) + + pb := p.Packet() + + switch p.QoS { + case 0: + c.debug.Println("sending QoS0 message") + if _, err := pb.WriteTo(c.Conn); err != nil { + return nil, err + } + return nil, nil + case 1, 2: + return c.publishQoS12(ctx, pb) + } + + return nil, fmt.Errorf("QoS isn't 0, 1 or 2") +} + +func (c *Client) publishQoS12(ctx context.Context, pb *packets.Publish) (*PublishResponse, error) { + c.debug.Println("sending QoS12 message") + pubCtx, cf := context.WithTimeout(ctx, c.PacketTimeout) + defer cf() + if err := c.serverInflight.Acquire(pubCtx, 1); err != nil { + return nil, err + } + defer c.serverInflight.Release(1) + cpCtx := &CPContext{pubCtx, make(chan packets.ControlPacket, 1)} + + mid, err := c.MIDs.Request(cpCtx) + if err != nil { + return nil, err + } + defer c.MIDs.Free(mid) + pb.PacketID = mid + + if _, err := pb.WriteTo(c.Conn); err != nil { + return nil, err + } + var resp packets.ControlPacket + + select { + case <-pubCtx.Done(): + if ctxErr := pubCtx.Err(); ctxErr != nil { + c.debug.Println(fmt.Sprintf("terminated due to context: %v", ctxErr)) + return nil, ctxErr + } + case resp = <-cpCtx.Return: + } + + switch pb.QoS { + case 1: + if resp.Type != packets.PUBACK { + return nil, fmt.Errorf("received %d instead of PUBACK", resp.Type) + } + + pr := PublishResponseFromPuback(resp.Content.(*packets.Puback)) + if pr.ReasonCode >= 0x80 { + c.debug.Println("received an error code in Puback:", pr.ReasonCode) + return pr, fmt.Errorf("error publishing: %s", resp.Content.(*packets.Puback).Reason()) + } + return pr, nil + case 2: + switch resp.Type { + case packets.PUBCOMP: + pr := PublishResponseFromPubcomp(resp.Content.(*packets.Pubcomp)) + return pr, nil + case packets.PUBREC: + c.debug.Printf("received PUBREC for %s (must have errored)", pb.PacketID) + pr := PublishResponseFromPubrec(resp.Content.(*packets.Pubrec)) + return pr, nil + default: + return nil, fmt.Errorf("received %d instead of PUBCOMP", resp.Type) + } + } + + c.debug.Println("ended up with a non QoS1/2 message:", pb.QoS) + return nil, fmt.Errorf("ended up with a non QoS1/2 message: %d", pb.QoS) +} + +func (c *Client) expectConnack(packet chan<- *packets.Connack, errs chan<- error) { + recv, err := packets.ReadPacket(c.Conn) + if err != nil { + errs <- err + return + } + switch r := recv.Content.(type) { + case *packets.Connack: + c.debug.Println("received CONNACK") + if r.ReasonCode == packets.ConnackSuccess && r.Properties != nil && r.Properties.AuthMethod != "" { + // Successful connack and AuthMethod is defined, must have successfully authed during connect + go c.AuthHandler.Authenticated() + } + packet <- r + case *packets.Auth: + c.debug.Println("received AUTH") + if c.AuthHandler == nil { + errs <- fmt.Errorf("enhanced authentication flow started but no AuthHandler configured") + return + } + c.debug.Println("sending AUTH") + _, err := c.AuthHandler.Authenticate(AuthFromPacketAuth(r)).Packet().WriteTo(c.Conn) + if err != nil { + errs <- fmt.Errorf("error sending authentication packet: %w", err) + return + } + // go round again, either another AUTH or CONNACK + go c.expectConnack(packet, errs) + default: + errs <- fmt.Errorf("received unexpected packet %v", recv.Type) + } + +} + +// Disconnect is used to send a Disconnect packet to the MQTT server +// Whether or not the attempt to send the Disconnect packet fails +// (and if it does this function returns any error) the network connection +// is closed. +func (c *Client) Disconnect(d *Disconnect) error { + c.debug.Println("disconnecting") + _, err := d.Packet().WriteTo(c.Conn) + + c.close() + c.workers.Wait() + + return err +} + +// SetDebugLogger takes an instance of the paho Logger interface +// and sets it to be used by the debug log endpoint +func (c *Client) SetDebugLogger(l Logger) { + c.debug = l +} + +// SetErrorLogger takes an instance of the paho Logger interface +// and sets it to be used by the error log endpoint +func (c *Client) SetErrorLogger(l Logger) { + c.errors = l +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/cp_auth.go b/vendor/github.com/eclipse/paho.golang/paho/cp_auth.go new file mode 100644 index 000000000..6ccef9b47 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/cp_auth.go @@ -0,0 +1,92 @@ +package paho + +import "github.com/eclipse/paho.golang/packets" + +type ( + // Auth is a representation of the MQTT Auth packet + Auth struct { + Properties *AuthProperties + ReasonCode byte + } + + // AuthProperties is a struct of the properties that can be set + // for a Auth packet + AuthProperties struct { + AuthData []byte + AuthMethod string + ReasonString string + User UserProperties + } +) + +// InitProperties is a function that takes a lower level +// Properties struct and completes the properties of the Auth on +// which it is called +func (a *Auth) InitProperties(p *packets.Properties) { + a.Properties = &AuthProperties{ + AuthMethod: p.AuthMethod, + AuthData: p.AuthData, + ReasonString: p.ReasonString, + User: UserPropertiesFromPacketUser(p.User), + } +} + +// AuthFromPacketAuth takes a packets library Auth and +// returns a paho library Auth +func AuthFromPacketAuth(a *packets.Auth) *Auth { + v := &Auth{ReasonCode: a.ReasonCode} + v.InitProperties(a.Properties) + + return v +} + +// Packet returns a packets library Auth from the paho Auth +// on which it is called +func (a *Auth) Packet() *packets.Auth { + v := &packets.Auth{ReasonCode: a.ReasonCode} + + if a.Properties != nil { + v.Properties = &packets.Properties{ + AuthMethod: a.Properties.AuthMethod, + AuthData: a.Properties.AuthData, + ReasonString: a.Properties.ReasonString, + User: a.Properties.User.ToPacketProperties(), + } + } + + return v +} + +// AuthResponse is a represenation of the response to an Auth +// packet +type AuthResponse struct { + Properties *AuthProperties + ReasonCode byte + Success bool +} + +// AuthResponseFromPacketAuth takes a packets library Auth and +// returns a paho library AuthResponse +func AuthResponseFromPacketAuth(a *packets.Auth) *AuthResponse { + return &AuthResponse{ + Success: true, + ReasonCode: a.ReasonCode, + Properties: &AuthProperties{ + ReasonString: a.Properties.ReasonString, + User: UserPropertiesFromPacketUser(a.Properties.User), + }, + } +} + +// AuthResponseFromPacketDisconnect takes a packets library Disconnect and +// returns a paho library AuthResponse +func AuthResponseFromPacketDisconnect(d *packets.Disconnect) *AuthResponse { + return &AuthResponse{ + Success: true, + ReasonCode: d.ReasonCode, + Properties: &AuthProperties{ + ReasonString: d.Properties.ReasonString, + User: UserPropertiesFromPacketUser(d.Properties.User), + }, + } +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/cp_connack.go b/vendor/github.com/eclipse/paho.golang/paho/cp_connack.go new file mode 100644 index 000000000..9c7233618 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/cp_connack.go @@ -0,0 +1,84 @@ +package paho + +import "github.com/eclipse/paho.golang/packets" + +type ( + // Connack is a representation of the MQTT Connack packet + Connack struct { + Properties *ConnackProperties + ReasonCode byte + SessionPresent bool + } + + // ConnackProperties is a struct of the properties that can be set + // for a Connack packet + ConnackProperties struct { + SessionExpiryInterval *uint32 + AuthData []byte + AuthMethod string + ResponseInfo string + ServerReference string + ReasonString string + AssignedClientID string + MaximumPacketSize *uint32 + ReceiveMaximum *uint16 + TopicAliasMaximum *uint16 + ServerKeepAlive *uint16 + MaximumQoS *byte + User UserProperties + WildcardSubAvailable bool + SubIDAvailable bool + SharedSubAvailable bool + RetainAvailable bool + } +) + +// InitProperties is a function that takes a lower level +// Properties struct and completes the properties of the Connack on +// which it is called +func (c *Connack) InitProperties(p *packets.Properties) { + c.Properties = &ConnackProperties{ + AssignedClientID: p.AssignedClientID, + ServerKeepAlive: p.ServerKeepAlive, + WildcardSubAvailable: true, + SubIDAvailable: true, + SharedSubAvailable: true, + RetainAvailable: true, + ResponseInfo: p.ResponseInfo, + SessionExpiryInterval: p.SessionExpiryInterval, + AuthMethod: p.AuthMethod, + AuthData: p.AuthData, + ServerReference: p.ServerReference, + ReasonString: p.ReasonString, + ReceiveMaximum: p.ReceiveMaximum, + TopicAliasMaximum: p.TopicAliasMaximum, + MaximumQoS: p.MaximumQOS, + MaximumPacketSize: p.MaximumPacketSize, + User: UserPropertiesFromPacketUser(p.User), + } + + if p.WildcardSubAvailable != nil { + c.Properties.WildcardSubAvailable = *p.WildcardSubAvailable == 1 + } + if p.SubIDAvailable != nil { + c.Properties.SubIDAvailable = *p.SubIDAvailable == 1 + } + if p.SharedSubAvailable != nil { + c.Properties.SharedSubAvailable = *p.SharedSubAvailable == 1 + } + if p.RetainAvailable != nil { + c.Properties.RetainAvailable = *p.RetainAvailable == 1 + } +} + +// ConnackFromPacketConnack takes a packets library Connack and +// returns a paho library Connack +func ConnackFromPacketConnack(c *packets.Connack) *Connack { + v := &Connack{ + SessionPresent: c.SessionPresent, + ReasonCode: c.ReasonCode, + } + v.InitProperties(c.Properties) + + return v +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/cp_connect.go b/vendor/github.com/eclipse/paho.golang/paho/cp_connect.go new file mode 100644 index 000000000..8d731764d --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/cp_connect.go @@ -0,0 +1,180 @@ +package paho + +import "github.com/eclipse/paho.golang/packets" + +type ( + // Connect is a representation of the MQTT Connect packet + Connect struct { + Password []byte + Username string + ClientID string + Properties *ConnectProperties + WillMessage *WillMessage + WillProperties *WillProperties + KeepAlive uint16 + CleanStart bool + UsernameFlag bool + PasswordFlag bool + } + + // ConnectProperties is a struct of the properties that can be set + // for a Connect packet + ConnectProperties struct { + AuthData []byte + AuthMethod string + SessionExpiryInterval *uint32 + WillDelayInterval *uint32 + ReceiveMaximum *uint16 + TopicAliasMaximum *uint16 + MaximumQOS *byte + MaximumPacketSize *uint32 + User UserProperties + RequestProblemInfo bool + RequestResponseInfo bool + } +) + +// InitProperties is a function that takes a lower level +// Properties struct and completes the properties of the Connect on +// which it is called +func (c *Connect) InitProperties(p *packets.Properties) { + c.Properties = &ConnectProperties{ + SessionExpiryInterval: p.SessionExpiryInterval, + AuthMethod: p.AuthMethod, + AuthData: p.AuthData, + WillDelayInterval: p.WillDelayInterval, + RequestResponseInfo: false, + RequestProblemInfo: true, + ReceiveMaximum: p.ReceiveMaximum, + TopicAliasMaximum: p.TopicAliasMaximum, + MaximumQOS: p.MaximumQOS, + MaximumPacketSize: p.MaximumPacketSize, + User: UserPropertiesFromPacketUser(p.User), + } + + if p.RequestResponseInfo != nil { + c.Properties.RequestResponseInfo = *p.RequestProblemInfo == 1 + } + if p.RequestProblemInfo != nil { + c.Properties.RequestProblemInfo = *p.RequestProblemInfo == 1 + } +} + +// InitWillProperties is a function that takes a lower level +// Properties struct and completes the properties of the Will in the Connect on +// which it is called +func (c *Connect) InitWillProperties(p *packets.Properties) { + c.WillProperties = &WillProperties{ + WillDelayInterval: p.WillDelayInterval, + PayloadFormat: p.PayloadFormat, + MessageExpiry: p.MessageExpiry, + ContentType: p.ContentType, + ResponseTopic: p.ResponseTopic, + CorrelationData: p.CorrelationData, + User: UserPropertiesFromPacketUser(p.User), + } +} + +// ConnectFromPacketConnect takes a packets library Connect and +// returns a paho library Connect +func ConnectFromPacketConnect(p *packets.Connect) *Connect { + v := &Connect{ + UsernameFlag: p.UsernameFlag, + Username: p.Username, + PasswordFlag: p.PasswordFlag, + Password: p.Password, + ClientID: p.ClientID, + CleanStart: p.CleanStart, + KeepAlive: p.KeepAlive, + } + v.InitProperties(p.Properties) + if p.WillFlag { + v.WillMessage = &WillMessage{ + Retain: p.WillRetain, + QoS: p.WillQOS, + Topic: p.WillTopic, + Payload: p.WillMessage, + } + v.InitWillProperties(p.WillProperties) + } + + return v +} + +// Packet returns a packets library Connect from the paho Connect +// on which it is called +func (c *Connect) Packet() *packets.Connect { + v := &packets.Connect{ + UsernameFlag: c.UsernameFlag, + Username: c.Username, + PasswordFlag: c.PasswordFlag, + Password: c.Password, + ClientID: c.ClientID, + CleanStart: c.CleanStart, + KeepAlive: c.KeepAlive, + } + + if c.Properties != nil { + v.Properties = &packets.Properties{ + SessionExpiryInterval: c.Properties.SessionExpiryInterval, + AuthMethod: c.Properties.AuthMethod, + AuthData: c.Properties.AuthData, + WillDelayInterval: c.Properties.WillDelayInterval, + ReceiveMaximum: c.Properties.ReceiveMaximum, + TopicAliasMaximum: c.Properties.TopicAliasMaximum, + MaximumQOS: c.Properties.MaximumQOS, + MaximumPacketSize: c.Properties.MaximumPacketSize, + User: c.Properties.User.ToPacketProperties(), + } + if c.Properties.RequestResponseInfo { + v.Properties.RequestResponseInfo = Byte(1) + } + if !c.Properties.RequestProblemInfo { + v.Properties.RequestProblemInfo = Byte(0) + } + } + + if c.WillMessage != nil { + v.WillFlag = true + v.WillQOS = c.WillMessage.QoS + v.WillTopic = c.WillMessage.Topic + v.WillRetain = c.WillMessage.Retain + v.WillMessage = c.WillMessage.Payload + if c.WillProperties != nil { + v.WillProperties = &packets.Properties{ + WillDelayInterval: c.WillProperties.WillDelayInterval, + PayloadFormat: c.WillProperties.PayloadFormat, + MessageExpiry: c.WillProperties.MessageExpiry, + ContentType: c.WillProperties.ContentType, + ResponseTopic: c.WillProperties.ResponseTopic, + CorrelationData: c.WillProperties.CorrelationData, + User: c.WillProperties.User.ToPacketProperties(), + } + } + } + + return v +} + +type ( + // WillMessage is a representation of the LWT message that can + // be sent with the Connect packet + WillMessage struct { + Retain bool + QoS byte + Topic string + Payload []byte + } + + // WillProperties is a struct of the properties that can be set + // for a Will in a Connect packet + WillProperties struct { + WillDelayInterval *uint32 + PayloadFormat *byte + MessageExpiry *uint32 + ContentType string + ResponseTopic string + CorrelationData []byte + User UserProperties + } +) diff --git a/vendor/github.com/eclipse/paho.golang/paho/cp_disconnect.go b/vendor/github.com/eclipse/paho.golang/paho/cp_disconnect.go new file mode 100644 index 000000000..5caa85b14 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/cp_disconnect.go @@ -0,0 +1,58 @@ +package paho + +import "github.com/eclipse/paho.golang/packets" + +type ( + // Disconnect is a representation of the MQTT Disconnect packet + Disconnect struct { + Properties *DisconnectProperties + ReasonCode byte + } + + // DisconnectProperties is a struct of the properties that can be set + // for a Disconnect packet + DisconnectProperties struct { + ServerReference string + ReasonString string + SessionExpiryInterval *uint32 + User UserProperties + } +) + +// InitProperties is a function that takes a lower level +// Properties struct and completes the properties of the Disconnect on +// which it is called +func (d *Disconnect) InitProperties(p *packets.Properties) { + d.Properties = &DisconnectProperties{ + SessionExpiryInterval: p.SessionExpiryInterval, + ServerReference: p.ServerReference, + ReasonString: p.ReasonString, + User: UserPropertiesFromPacketUser(p.User), + } +} + +// DisconnectFromPacketDisconnect takes a packets library Disconnect and +// returns a paho library Disconnect +func DisconnectFromPacketDisconnect(p *packets.Disconnect) *Disconnect { + v := &Disconnect{ReasonCode: p.ReasonCode} + v.InitProperties(p.Properties) + + return v +} + +// Packet returns a packets library Disconnect from the paho Disconnect +// on which it is called +func (d *Disconnect) Packet() *packets.Disconnect { + v := &packets.Disconnect{ReasonCode: d.ReasonCode} + + if d.Properties != nil { + v.Properties = &packets.Properties{ + SessionExpiryInterval: d.Properties.SessionExpiryInterval, + ServerReference: d.Properties.ServerReference, + ReasonString: d.Properties.ReasonString, + User: d.Properties.User.ToPacketProperties(), + } + } + + return v +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/cp_publish.go b/vendor/github.com/eclipse/paho.golang/paho/cp_publish.go new file mode 100644 index 000000000..1bb9654b3 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/cp_publish.go @@ -0,0 +1,123 @@ +package paho + +import ( + "bytes" + "fmt" + + "github.com/eclipse/paho.golang/packets" +) + +type ( + // Publish is a representation of the MQTT Publish packet + Publish struct { + PacketID uint16 + QoS byte + Retain bool + Topic string + Properties *PublishProperties + Payload []byte + } + + // PublishProperties is a struct of the properties that can be set + // for a Publish packet + PublishProperties struct { + CorrelationData []byte + ContentType string + ResponseTopic string + PayloadFormat *byte + MessageExpiry *uint32 + SubscriptionIdentifier *int + TopicAlias *uint16 + User UserProperties + } +) + +// InitProperties is a function that takes a lower level +// Properties struct and completes the properties of the Publish on +// which it is called +func (p *Publish) InitProperties(prop *packets.Properties) { + p.Properties = &PublishProperties{ + PayloadFormat: prop.PayloadFormat, + MessageExpiry: prop.MessageExpiry, + ContentType: prop.ContentType, + ResponseTopic: prop.ResponseTopic, + CorrelationData: prop.CorrelationData, + TopicAlias: prop.TopicAlias, + SubscriptionIdentifier: prop.SubscriptionIdentifier, + User: UserPropertiesFromPacketUser(prop.User), + } +} + +// PublishFromPacketPublish takes a packets library Publish and +// returns a paho library Publish +func PublishFromPacketPublish(p *packets.Publish) *Publish { + v := &Publish{ + PacketID: p.PacketID, + QoS: p.QoS, + Retain: p.Retain, + Topic: p.Topic, + Payload: p.Payload, + } + v.InitProperties(p.Properties) + + return v +} + +// Packet returns a packets library Publish from the paho Publish +// on which it is called +func (p *Publish) Packet() *packets.Publish { + v := &packets.Publish{ + PacketID: p.PacketID, + QoS: p.QoS, + Retain: p.Retain, + Topic: p.Topic, + Payload: p.Payload, + } + if p.Properties != nil { + v.Properties = &packets.Properties{ + PayloadFormat: p.Properties.PayloadFormat, + MessageExpiry: p.Properties.MessageExpiry, + ContentType: p.Properties.ContentType, + ResponseTopic: p.Properties.ResponseTopic, + CorrelationData: p.Properties.CorrelationData, + TopicAlias: p.Properties.TopicAlias, + SubscriptionIdentifier: p.Properties.SubscriptionIdentifier, + User: p.Properties.User.ToPacketProperties(), + } + } + + return v +} + +func (p *Publish) String() string { + var b bytes.Buffer + + fmt.Fprintf(&b, "topic: %s qos: %d retain: %t\n", p.Topic, p.QoS, p.Retain) + if p.Properties.PayloadFormat != nil { + fmt.Fprintf(&b, "PayloadFormat: %v\n", p.Properties.PayloadFormat) + } + if p.Properties.MessageExpiry != nil { + fmt.Fprintf(&b, "MessageExpiry: %v\n", p.Properties.MessageExpiry) + } + if p.Properties.ContentType != "" { + fmt.Fprintf(&b, "ContentType: %v\n", p.Properties.ContentType) + } + if p.Properties.ResponseTopic != "" { + fmt.Fprintf(&b, "ResponseTopic: %v\n", p.Properties.ResponseTopic) + } + if p.Properties.CorrelationData != nil { + fmt.Fprintf(&b, "CorrelationData: %v\n", p.Properties.CorrelationData) + } + if p.Properties.TopicAlias != nil { + fmt.Fprintf(&b, "TopicAlias: %d\n", p.Properties.TopicAlias) + } + if p.Properties.SubscriptionIdentifier != nil { + fmt.Fprintf(&b, "SubscriptionIdentifier: %v\n", p.Properties.SubscriptionIdentifier) + } + for _, v := range p.Properties.User { + fmt.Fprintf(&b, "User: %s : %s\n", v.Key, v.Value) + } + b.WriteString(string(p.Payload)) + + return b.String() +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/cp_pubresp.go b/vendor/github.com/eclipse/paho.golang/paho/cp_pubresp.go new file mode 100644 index 000000000..0c4e174aa --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/cp_pubresp.go @@ -0,0 +1,55 @@ +package paho + +import "github.com/eclipse/paho.golang/packets" + +type ( + // PublishResponse is a generic representation of a response + // to a QoS1 or QoS2 Publish + PublishResponse struct { + Properties *PublishResponseProperties + ReasonCode byte + } + + // PublishResponseProperties is the properties associated with + // a response to a QoS1 or QoS2 Publish + PublishResponseProperties struct { + ReasonString string + User UserProperties + } +) + +// PublishResponseFromPuback takes a packets library Puback and +// returns a paho library PublishResponse +func PublishResponseFromPuback(pa *packets.Puback) *PublishResponse { + return &PublishResponse{ + ReasonCode: pa.ReasonCode, + Properties: &PublishResponseProperties{ + ReasonString: pa.Properties.ReasonString, + User: UserPropertiesFromPacketUser(pa.Properties.User), + }, + } +} + +// PublishResponseFromPubcomp takes a packets library Pubcomp and +// returns a paho library PublishResponse +func PublishResponseFromPubcomp(pc *packets.Pubcomp) *PublishResponse { + return &PublishResponse{ + ReasonCode: pc.ReasonCode, + Properties: &PublishResponseProperties{ + ReasonString: pc.Properties.ReasonString, + User: UserPropertiesFromPacketUser(pc.Properties.User), + }, + } +} + +// PublishResponseFromPubrec takes a packets library Pubrec and +// returns a paho library PublishResponse +func PublishResponseFromPubrec(pr *packets.Pubrec) *PublishResponse { + return &PublishResponse{ + ReasonCode: pr.ReasonCode, + Properties: &PublishResponseProperties{ + ReasonString: pr.Properties.ReasonString, + User: UserPropertiesFromPacketUser(pr.Properties.User), + }, + } +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/cp_suback.go b/vendor/github.com/eclipse/paho.golang/paho/cp_suback.go new file mode 100644 index 000000000..c1034c26c --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/cp_suback.go @@ -0,0 +1,41 @@ +package paho + +import "github.com/eclipse/paho.golang/packets" + +type ( + // Suback is a representation of an MQTT suback packet + Suback struct { + Properties *SubackProperties + Reasons []byte + } + + // SubackProperties is a struct of the properties that can be set + // for a Suback packet + SubackProperties struct { + ReasonString string + User UserProperties + } +) + +// Packet returns a packets library Suback from the paho Suback +// on which it is called +func (s *Suback) Packet() *packets.Suback { + return &packets.Suback{ + Reasons: s.Reasons, + Properties: &packets.Properties{ + User: s.Properties.User.ToPacketProperties(), + }, + } +} + +// SubackFromPacketSuback takes a packets library Suback and +// returns a paho library Suback +func SubackFromPacketSuback(s *packets.Suback) *Suback { + return &Suback{ + Reasons: s.Reasons, + Properties: &SubackProperties{ + ReasonString: s.Properties.ReasonString, + User: UserPropertiesFromPacketUser(s.Properties.User), + }, + } +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/cp_subscribe.go b/vendor/github.com/eclipse/paho.golang/paho/cp_subscribe.go new file mode 100644 index 000000000..e111f0cf6 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/cp_subscribe.go @@ -0,0 +1,67 @@ +package paho + +import "github.com/eclipse/paho.golang/packets" + +type ( + // Subscribe is a representation of a MQTT subscribe packet + Subscribe struct { + Properties *SubscribeProperties + Subscriptions map[string]SubscribeOptions + } + + // SubscribeOptions is the struct representing the options for a subscription + SubscribeOptions struct { + QoS byte + RetainHandling byte + NoLocal bool + RetainAsPublished bool + } +) + +// SubscribeProperties is a struct of the properties that can be set +// for a Subscribe packet +type SubscribeProperties struct { + SubscriptionIdentifier *int + User UserProperties +} + +// InitProperties is a function that takes a packet library +// Properties struct and completes the properties of the Subscribe on +// which it is called +func (s *Subscribe) InitProperties(prop *packets.Properties) { + s.Properties = &SubscribeProperties{ + SubscriptionIdentifier: prop.SubscriptionIdentifier, + User: UserPropertiesFromPacketUser(prop.User), + } +} + +// PacketSubOptionsFromSubscribeOptions returns a map of string to packet +// library SubOptions for the paho Subscribe on which it is called +func (s *Subscribe) PacketSubOptionsFromSubscribeOptions() map[string]packets.SubOptions { + r := make(map[string]packets.SubOptions) + for k, v := range s.Subscriptions { + r[k] = packets.SubOptions{ + QoS: v.QoS, + NoLocal: v.NoLocal, + RetainAsPublished: v.RetainAsPublished, + RetainHandling: v.RetainHandling, + } + } + + return r +} + +// Packet returns a packets library Subscribe from the paho Subscribe +// on which it is called +func (s *Subscribe) Packet() *packets.Subscribe { + v := &packets.Subscribe{Subscriptions: s.PacketSubOptionsFromSubscribeOptions()} + + if s.Properties != nil { + v.Properties = &packets.Properties{ + SubscriptionIdentifier: s.Properties.SubscriptionIdentifier, + User: s.Properties.User.ToPacketProperties(), + } + } + + return v +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/cp_unsuback.go b/vendor/github.com/eclipse/paho.golang/paho/cp_unsuback.go new file mode 100644 index 000000000..15ca83885 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/cp_unsuback.go @@ -0,0 +1,41 @@ +package paho + +import "github.com/eclipse/paho.golang/packets" + +type ( + // Unsuback is a representation of an MQTT Unsuback packet + Unsuback struct { + Reasons []byte + Properties *UnsubackProperties + } + + // UnsubackProperties is a struct of the properties that can be set + // for a Unsuback packet + UnsubackProperties struct { + ReasonString string + User UserProperties + } +) + +// Packet returns a packets library Unsuback from the paho Unsuback +// on which it is called +func (u *Unsuback) Packet() *packets.Unsuback { + return &packets.Unsuback{ + Reasons: u.Reasons, + Properties: &packets.Properties{ + User: u.Properties.User.ToPacketProperties(), + }, + } +} + +// UnsubackFromPacketUnsuback takes a packets library Unsuback and +// returns a paho library Unsuback +func UnsubackFromPacketUnsuback(u *packets.Unsuback) *Unsuback { + return &Unsuback{ + Reasons: u.Reasons, + Properties: &UnsubackProperties{ + ReasonString: u.Properties.ReasonString, + User: UserPropertiesFromPacketUser(u.Properties.User), + }, + } +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/cp_unsubscribe.go b/vendor/github.com/eclipse/paho.golang/paho/cp_unsubscribe.go new file mode 100644 index 000000000..375b917c8 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/cp_unsubscribe.go @@ -0,0 +1,31 @@ +package paho + +import "github.com/eclipse/paho.golang/packets" + +type ( + // Unsubscribe is a representation of an MQTT unsubscribe packet + Unsubscribe struct { + Topics []string + Properties *UnsubscribeProperties + } + + // UnsubscribeProperties is a struct of the properties that can be set + // for a Unsubscribe packet + UnsubscribeProperties struct { + User UserProperties + } +) + +// Packet returns a packets library Unsubscribe from the paho Unsubscribe +// on which it is called +func (u *Unsubscribe) Packet() *packets.Unsubscribe { + v := &packets.Unsubscribe{Topics: u.Topics} + + if u.Properties != nil { + v.Properties = &packets.Properties{ + User: u.Properties.User.ToPacketProperties(), + } + } + + return v +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/cp_utils.go b/vendor/github.com/eclipse/paho.golang/paho/cp_utils.go new file mode 100644 index 000000000..2d7995f5c --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/cp_utils.go @@ -0,0 +1,100 @@ +package paho + +import ( + "github.com/eclipse/paho.golang/packets" +) + +// UserProperty is a struct for the user provided values +// permitted in the properties section +type UserProperty struct { + Key, Value string +} + +// UserProperties is a slice of UserProperty +type UserProperties []UserProperty + +// Add is a helper function for easily adding a new user property +func (u *UserProperties) Add(key, value string) *UserProperties { + *u = append(*u, UserProperty{key, value}) + + return u +} + +// Get returns the first entry in the UserProperties that matches +// key, or an empty string if the key is not found. Note that it is +// permitted to have multiple entries with the same key, use GetAll +// if it is expected to have multiple matches +func (u UserProperties) Get(key string) string { + for _, v := range u { + if v.Key == key { + return v.Value + } + } + + return "" +} + +// GetAll returns a slice of all entries in the UserProperties +// that match key, or a nil slice if none were found. +func (u UserProperties) GetAll(key string) []string { + var ret []string + for _, v := range u { + if v.Key == key { + ret = append(ret, v.Value) + } + } + + return ret +} + +// ToPacketProperties converts a UserProperties to a slice +// of packets.User which is used internally in the packets +// library for user properties +func (u UserProperties) ToPacketProperties() []packets.User { + ret := make([]packets.User, len(u)) + for i, v := range u { + ret[i] = packets.User{Key: v.Key, Value: v.Value} + } + + return ret +} + +// UserPropertiesFromPacketUser converts a slice of packets.User +// to an instance of UserProperties for easier consumption within +// the client library +func UserPropertiesFromPacketUser(up []packets.User) UserProperties { + ret := make(UserProperties, len(up)) + for i, v := range up { + ret[i] = UserProperty{v.Key, v.Value} + } + + return ret +} + +// Byte is a helper function that take a byte and returns +// a pointer to a byte of that value +func Byte(b byte) *byte { + return &b +} + +// Uint32 is a helper function that take a uint32 and returns +// a pointer to a uint32 of that value +func Uint32(u uint32) *uint32 { + return &u +} + +// Uint16 is a helper function that take a uint16 and returns +// a pointer to a uint16 of that value +func Uint16(u uint16) *uint16 { + return &u +} + +// BoolToByte is a helper function that take a bool and returns +// a pointer to a byte of value 1 if true or 0 if false +func BoolToByte(b bool) *byte { + var v byte + if b { + v = 1 + } + return &v +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/message_ids.go b/vendor/github.com/eclipse/paho.golang/paho/message_ids.go new file mode 100644 index 000000000..58b03e324 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/message_ids.go @@ -0,0 +1,93 @@ +package paho + +import ( + "context" + "errors" + "sync" + + "github.com/eclipse/paho.golang/packets" +) + +const ( + midMin uint16 = 1 + midMax uint16 = 65535 +) + +// ErrorMidsExhausted is returned from Request() when there are no +// free message ids to be used. +var ErrorMidsExhausted = errors.New("all message ids in use") + +// MIDService defines the interface for a struct that handles the +// relationship between message ids and CPContexts +// Request() takes a *CPContext and returns a uint16 that is the +// messageid that should be used by the code that called Request() +// Get() takes a uint16 that is a messageid and returns the matching +// *CPContext that the MIDService has associated with that messageid +// Free() takes a uint16 that is a messageid and instructs the MIDService +// to mark that messageid as available for reuse +// Clear() resets the internal state of the MIDService +type MIDService interface { + Request(*CPContext) (uint16, error) + Get(uint16) *CPContext + Free(uint16) + Clear() +} + +// CPContext is the struct that is used to return responses to +// ControlPackets that have them, eg: the suback to a subscribe. +// The response packet is send down the Return channel and the +// Context is used to track timeouts. +type CPContext struct { + Context context.Context + Return chan packets.ControlPacket +} + +// MIDs is the default MIDService provided by this library. +// It uses a map of uint16 to *CPContext to track responses +// to messages with a messageid +type MIDs struct { + sync.Mutex + lastMid uint16 + index []*CPContext +} + +// Request is the library provided MIDService's implementation of +// the required interface function() +func (m *MIDs) Request(c *CPContext) (uint16, error) { + m.Lock() + defer m.Unlock() + for i := uint16(1); i < midMax; i++ { + v := (m.lastMid + i) % midMax + if v == 0 { + continue + } + if inuse := m.index[v]; inuse == nil { + m.index[v] = c + m.lastMid = v + return v, nil + } + } + return 0, ErrorMidsExhausted +} + +// Get is the library provided MIDService's implementation of +// the required interface function() +func (m *MIDs) Get(i uint16) *CPContext { + m.Lock() + defer m.Unlock() + return m.index[i] +} + +// Free is the library provided MIDService's implementation of +// the required interface function() +func (m *MIDs) Free(i uint16) { + m.Lock() + m.index[i] = nil + m.Unlock() +} + +// Clear is the library provided MIDService's implementation of +// the required interface function() +func (m *MIDs) Clear() { + m.index = make([]*CPContext, int(midMax)) +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/noop_persistence.go b/vendor/github.com/eclipse/paho.golang/paho/noop_persistence.go new file mode 100644 index 000000000..d2d15704f --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/noop_persistence.go @@ -0,0 +1,23 @@ +package paho + +import "github.com/eclipse/paho.golang/packets" + +type noopPersistence struct{} + +func (n *noopPersistence) Open() {} + +func (n *noopPersistence) Put(id uint16, cp packets.ControlPacket) {} + +func (n *noopPersistence) Get(id uint16) packets.ControlPacket { + return packets.ControlPacket{} +} + +func (n *noopPersistence) All() []packets.ControlPacket { + return nil +} + +func (n *noopPersistence) Delete(id uint16) {} + +func (n *noopPersistence) Close() {} + +func (n *noopPersistence) Reset() {} diff --git a/vendor/github.com/eclipse/paho.golang/paho/persistence.go b/vendor/github.com/eclipse/paho.golang/paho/persistence.go new file mode 100644 index 000000000..f02b846cc --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/persistence.go @@ -0,0 +1,98 @@ +package paho + +import ( + "sync" + + "github.com/eclipse/paho.golang/packets" +) + +// Persistence is an interface of the functions for a struct +// that is used to persist ControlPackets. +// Open() is an initialiser to prepare the Persistence for use +// Put() takes a uint16 which is a messageid and a ControlPacket +// to persist against that messageid +// Get() takes a uint16 which is a messageid and returns the +// persisted ControlPacket from the Persistence for that messageid +// All() returns a slice of all ControlPackets persisted +// Delete() takes a uint16 which is a messageid and deletes the +// associated stored ControlPacket from the Persistence +// Close() closes the Persistence +// Reset() clears the Persistence and prepares it to be reused +type Persistence interface { + Open() + Put(uint16, packets.ControlPacket) + Get(uint16) packets.ControlPacket + All() []packets.ControlPacket + Delete(uint16) + Close() + Reset() +} + +// MemoryPersistence is an implementation of a Persistence +// that stores the ControlPackets in memory using a map +type MemoryPersistence struct { + sync.RWMutex + packets map[uint16]packets.ControlPacket +} + +// Open is the library provided MemoryPersistence's implementation of +// the required interface function() +func (m *MemoryPersistence) Open() { + m.Lock() + m.packets = make(map[uint16]packets.ControlPacket) + m.Unlock() +} + +// Put is the library provided MemoryPersistence's implementation of +// the required interface function() +func (m *MemoryPersistence) Put(id uint16, cp packets.ControlPacket) { + m.Lock() + m.packets[id] = cp + m.Unlock() +} + +// Get is the library provided MemoryPersistence's implementation of +// the required interface function() +func (m *MemoryPersistence) Get(id uint16) packets.ControlPacket { + m.RLock() + defer m.RUnlock() + return m.packets[id] +} + +// All is the library provided MemoryPersistence's implementation of +// the required interface function() +func (m *MemoryPersistence) All() []packets.ControlPacket { + m.Lock() + defer m.RUnlock() + ret := make([]packets.ControlPacket, len(m.packets)) + + for _, cp := range m.packets { + ret = append(ret, cp) + } + + return ret +} + +// Delete is the library provided MemoryPersistence's implementation of +// the required interface function() +func (m *MemoryPersistence) Delete(id uint16) { + m.Lock() + delete(m.packets, id) + m.Unlock() +} + +// Close is the library provided MemoryPersistence's implementation of +// the required interface function() +func (m *MemoryPersistence) Close() { + m.Lock() + m.packets = nil + m.Unlock() +} + +// Reset is the library provided MemoryPersistence's implementation of +// the required interface function() +func (m *MemoryPersistence) Reset() { + m.Lock() + m.packets = make(map[uint16]packets.ControlPacket) + m.Unlock() +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/pinger.go b/vendor/github.com/eclipse/paho.golang/paho/pinger.go new file mode 100644 index 000000000..e135d25ac --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/pinger.go @@ -0,0 +1,122 @@ +package paho + +import ( + "fmt" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/eclipse/paho.golang/packets" +) + +// PingFailHandler is a type for the function that is invoked +// when we have sent a Pingreq to the server and not received +// a Pingresp within 1.5x our pingtimeout +type PingFailHandler func(error) + +// Pinger is an interface of the functions for a struct that is +// used to manage sending PingRequests and responding to +// PingResponses +// Start() takes a net.Conn which is a connection over which an +// MQTT session has already been established, and a time.Duration +// of the keepalive setting passed to the server when the MQTT +// session was established. +// Stop() is used to stop the Pinger +// PingResp() is the function that is called by the Client when +// a PingResponse is received +// SetDebug() is used to pass in a Logger to be used to log debug +// information, for example sharing a logger with the main client +type Pinger interface { + Start(net.Conn, time.Duration) + Stop() + PingResp() + SetDebug(Logger) +} + +// PingHandler is the library provided default Pinger +type PingHandler struct { + mu sync.Mutex + lastPing time.Time + conn net.Conn + stop chan struct{} + pingFailHandler PingFailHandler + pingOutstanding int32 + debug Logger +} + +// DefaultPingerWithCustomFailHandler returns an instance of the +// default Pinger but with a custom PingFailHandler that is called +// when the client has not received a response to a PingRequest +// within the appropriate amount of time +func DefaultPingerWithCustomFailHandler(pfh PingFailHandler) *PingHandler { + return &PingHandler{ + pingFailHandler: pfh, + debug: NOOPLogger{}, + } +} + +// Start is the library provided Pinger's implementation of +// the required interface function() +func (p *PingHandler) Start(c net.Conn, pt time.Duration) { + p.mu.Lock() + p.conn = c + p.stop = make(chan struct{}) + p.mu.Unlock() + checkTicker := time.NewTicker(pt / 4) + defer checkTicker.Stop() + for { + select { + case <-p.stop: + return + case <-checkTicker.C: + if atomic.LoadInt32(&p.pingOutstanding) > 0 && time.Since(p.lastPing) > (pt+pt>>1) { + p.pingFailHandler(fmt.Errorf("ping resp timed out")) + //ping outstanding and not reset in 1.5 times ping timer + return + } + if time.Since(p.lastPing) >= pt { + //time to send a ping + if _, err := packets.NewControlPacket(packets.PINGREQ).WriteTo(p.conn); err != nil { + if p.pingFailHandler != nil { + p.pingFailHandler(err) + } + return + } + atomic.AddInt32(&p.pingOutstanding, 1) + p.lastPing = time.Now() + p.debug.Println("pingHandler sending ping request") + } + } + } +} + +// Stop is the library provided Pinger's implementation of +// the required interface function() +func (p *PingHandler) Stop() { + p.mu.Lock() + defer p.mu.Unlock() + if p.stop == nil { + return + } + p.debug.Println("pingHandler stopping") + select { + case <-p.stop: + //Already stopped, do nothing + default: + close(p.stop) + } +} + +// PingResp is the library provided Pinger's implementation of +// the required interface function() +func (p *PingHandler) PingResp() { + p.debug.Println("pingHandler resetting pingOutstanding") + atomic.StoreInt32(&p.pingOutstanding, 0) +} + +// SetDebug sets the logger l to be used for printing debug +// information for the pinger +func (p *PingHandler) SetDebug(l Logger) { + p.debug = l +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/router.go b/vendor/github.com/eclipse/paho.golang/paho/router.go new file mode 100644 index 000000000..05031596f --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/router.go @@ -0,0 +1,212 @@ +package paho + +import ( + "strings" + "sync" + + "github.com/eclipse/paho.golang/packets" +) + +// MessageHandler is a type for a function that is invoked +// by a Router when it has received a Publish. +type MessageHandler func(*Publish) + +// Router is an interface of the functions for a struct that is +// used to handle invoking MessageHandlers depending on the +// the topic the message was published on. +// RegisterHandler() takes a string of the topic, and a MessageHandler +// to be invoked when Publishes are received that match that topic +// UnregisterHandler() takes a string of the topic to remove +// MessageHandlers for +// Route() takes a Publish message and determines which MessageHandlers +// should be invoked +type Router interface { + RegisterHandler(string, MessageHandler) + UnregisterHandler(string) + Route(*packets.Publish) + SetDebugLogger(Logger) +} + +// StandardRouter is a library provided implementation of a Router that +// allows for unique and multiple MessageHandlers per topic +type StandardRouter struct { + sync.RWMutex + subscriptions map[string][]MessageHandler + aliases map[uint16]string + debug Logger +} + +// NewStandardRouter instantiates and returns an instance of a StandardRouter +func NewStandardRouter() *StandardRouter { + return &StandardRouter{ + subscriptions: make(map[string][]MessageHandler), + aliases: make(map[uint16]string), + debug: NOOPLogger{}, + } +} + +// RegisterHandler is the library provided StandardRouter's +// implementation of the required interface function() +func (r *StandardRouter) RegisterHandler(topic string, h MessageHandler) { + r.debug.Println("registering handler for:", topic) + r.Lock() + defer r.Unlock() + + r.subscriptions[topic] = append(r.subscriptions[topic], h) +} + +// UnregisterHandler is the library provided StandardRouter's +// implementation of the required interface function() +func (r *StandardRouter) UnregisterHandler(topic string) { + r.debug.Println("unregistering handler for:", topic) + r.Lock() + defer r.Unlock() + + delete(r.subscriptions, topic) +} + +// Route is the library provided StandardRouter's implementation +// of the required interface function() +func (r *StandardRouter) Route(pb *packets.Publish) { + r.debug.Println("routing message for:", pb.Topic) + r.RLock() + defer r.RUnlock() + + m := PublishFromPacketPublish(pb) + + var topic string + if pb.Properties.TopicAlias != nil { + r.debug.Println("message is using topic aliasing") + if pb.Topic != "" { + //Register new alias + r.debug.Printf("registering new topic alias '%d' for topic '%s'", *pb.Properties.TopicAlias, m.Topic) + r.aliases[*pb.Properties.TopicAlias] = pb.Topic + } + if t, ok := r.aliases[*pb.Properties.TopicAlias]; ok { + r.debug.Printf("aliased topic '%d' translates to '%s'", *pb.Properties.TopicAlias, m.Topic) + topic = t + } + } else { + topic = m.Topic + } + + for route, handlers := range r.subscriptions { + if match(route, topic) { + r.debug.Println("found handler for:", route) + for _, handler := range handlers { + handler(m) + } + } + } +} + +// SetDebugLogger sets the logger l to be used for printing debug +// information for the router +func (r *StandardRouter) SetDebugLogger(l Logger) { + r.debug = l +} + +func match(route, topic string) bool { + return route == topic || routeIncludesTopic(route, topic) +} + +func matchDeep(route []string, topic []string) bool { + if len(route) == 0 { + return len(topic) == 0 + } + + if len(topic) == 0 { + return route[0] == "#" + } + + if route[0] == "#" { + return true + } + + if (route[0] == "+") || (route[0] == topic[0]) { + return matchDeep(route[1:], topic[1:]) + } + return false +} + +func routeIncludesTopic(route, topic string) bool { + return matchDeep(routeSplit(route), topicSplit(topic)) +} + +func routeSplit(route string) []string { + if len(route) == 0 { + return nil + } + var result []string + if strings.HasPrefix(route, "$share") { + result = strings.Split(route, "/")[2:] + } else { + result = strings.Split(route, "/") + } + return result +} + +func topicSplit(topic string) []string { + if len(topic) == 0 { + return nil + } + return strings.Split(topic, "/") +} + +// SingleHandlerRouter is a library provided implementation of a Router +// that stores only a single MessageHandler and invokes this MessageHandler +// for all received Publishes +type SingleHandlerRouter struct { + sync.Mutex + aliases map[uint16]string + handler MessageHandler + debug Logger +} + +// NewSingleHandlerRouter instantiates and returns an instance of a SingleHandlerRouter +func NewSingleHandlerRouter(h MessageHandler) *SingleHandlerRouter { + return &SingleHandlerRouter{ + aliases: make(map[uint16]string), + handler: h, + debug: NOOPLogger{}, + } +} + +// RegisterHandler is the library provided SingleHandlerRouter's +// implementation of the required interface function() +func (s *SingleHandlerRouter) RegisterHandler(topic string, h MessageHandler) { + s.debug.Println("registering handler for:", topic) + s.handler = h +} + +// UnregisterHandler is the library provided SingleHandlerRouter's +// implementation of the required interface function() +func (s *SingleHandlerRouter) UnregisterHandler(topic string) {} + +// Route is the library provided SingleHandlerRouter's +// implementation of the required interface function() +func (s *SingleHandlerRouter) Route(pb *packets.Publish) { + m := PublishFromPacketPublish(pb) + + s.debug.Println("routing message for:", m.Topic) + + if pb.Properties.TopicAlias != nil { + s.debug.Println("message is using topic aliasing") + if pb.Topic != "" { + //Register new alias + s.debug.Printf("registering new topic alias '%d' for topic '%s'", *pb.Properties.TopicAlias, m.Topic) + s.aliases[*pb.Properties.TopicAlias] = pb.Topic + } + if t, ok := s.aliases[*pb.Properties.TopicAlias]; ok { + s.debug.Printf("aliased topic '%d' translates to '%s'", *pb.Properties.TopicAlias, m.Topic) + m.Topic = t + } + } + s.handler(m) +} + +// SetDebugLogger sets the logger l to be used for printing debug +// information for the router +func (s *SingleHandlerRouter) SetDebugLogger(l Logger) { + s.debug = l +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/trace.go b/vendor/github.com/eclipse/paho.golang/paho/trace.go new file mode 100644 index 000000000..586c92398 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/trace.go @@ -0,0 +1,22 @@ +package paho + +type ( + // Logger interface allows implementations to provide to this package any + // object that implements the methods defined in it. + Logger interface { + Println(v ...interface{}) + Printf(format string, v ...interface{}) + } + + // NOOPLogger implements the logger that does not perform any operation + // by default. This allows us to efficiently discard the unwanted messages. + NOOPLogger struct{} +) + +// Println is the library provided NOOPLogger's +// implementation of the required interface function() +func (NOOPLogger) Println(v ...interface{}) {} + +// Printf is the library provided NOOPLogger's +// implementation of the required interface function(){} +func (NOOPLogger) Printf(format string, v ...interface{}) {} diff --git a/vendor/github.com/go-logr/logr/slogr/slogr.go b/vendor/github.com/go-logr/logr/slogr/slogr.go deleted file mode 100644 index 36432c56f..000000000 --- a/vendor/github.com/go-logr/logr/slogr/slogr.go +++ /dev/null @@ -1,61 +0,0 @@ -//go:build go1.21 -// +build go1.21 - -/* -Copyright 2023 The logr Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package slogr enables usage of a slog.Handler with logr.Logger as front-end -// API and of a logr.LogSink through the slog.Handler and thus slog.Logger -// APIs. -// -// See the README in the top-level [./logr] package for a discussion of -// interoperability. -// -// Deprecated: use the main logr package instead. -package slogr - -import ( - "log/slog" - - "github.com/go-logr/logr" -) - -// NewLogr returns a logr.Logger which writes to the slog.Handler. -// -// Deprecated: use [logr.FromSlogHandler] instead. -func NewLogr(handler slog.Handler) logr.Logger { - return logr.FromSlogHandler(handler) -} - -// NewSlogHandler returns a slog.Handler which writes to the same sink as the logr.Logger. -// -// Deprecated: use [logr.ToSlogHandler] instead. -func NewSlogHandler(logger logr.Logger) slog.Handler { - return logr.ToSlogHandler(logger) -} - -// ToSlogHandler returns a slog.Handler which writes to the same sink as the logr.Logger. -// -// Deprecated: use [logr.ToSlogHandler] instead. -func ToSlogHandler(logger logr.Logger) slog.Handler { - return logr.ToSlogHandler(logger) -} - -// SlogSink is an optional interface that a LogSink can implement to support -// logging through the slog.Logger or slog.Handler APIs better. -// -// Deprecated: use [logr.SlogSink] instead. -type SlogSink = logr.SlogSink diff --git a/vendor/github.com/golang/protobuf/jsonpb/decode.go b/vendor/github.com/golang/protobuf/jsonpb/decode.go deleted file mode 100644 index c6f66f103..000000000 --- a/vendor/github.com/golang/protobuf/jsonpb/decode.go +++ /dev/null @@ -1,531 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package jsonpb - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "math" - "reflect" - "strconv" - "strings" - "time" - - "github.com/golang/protobuf/proto" - "google.golang.org/protobuf/encoding/protojson" - protoV2 "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" -) - -const wrapJSONUnmarshalV2 = false - -// UnmarshalNext unmarshals the next JSON object from d into m. -func UnmarshalNext(d *json.Decoder, m proto.Message) error { - return new(Unmarshaler).UnmarshalNext(d, m) -} - -// Unmarshal unmarshals a JSON object from r into m. -func Unmarshal(r io.Reader, m proto.Message) error { - return new(Unmarshaler).Unmarshal(r, m) -} - -// UnmarshalString unmarshals a JSON object from s into m. -func UnmarshalString(s string, m proto.Message) error { - return new(Unmarshaler).Unmarshal(strings.NewReader(s), m) -} - -// Unmarshaler is a configurable object for converting from a JSON -// representation to a protocol buffer object. -type Unmarshaler struct { - // AllowUnknownFields specifies whether to allow messages to contain - // unknown JSON fields, as opposed to failing to unmarshal. - AllowUnknownFields bool - - // AnyResolver is used to resolve the google.protobuf.Any well-known type. - // If unset, the global registry is used by default. - AnyResolver AnyResolver -} - -// JSONPBUnmarshaler is implemented by protobuf messages that customize the way -// they are unmarshaled from JSON. Messages that implement this should also -// implement JSONPBMarshaler so that the custom format can be produced. -// -// The JSON unmarshaling must follow the JSON to proto specification: -// -// https://developers.google.com/protocol-buffers/docs/proto3#json -// -// Deprecated: Custom types should implement protobuf reflection instead. -type JSONPBUnmarshaler interface { - UnmarshalJSONPB(*Unmarshaler, []byte) error -} - -// Unmarshal unmarshals a JSON object from r into m. -func (u *Unmarshaler) Unmarshal(r io.Reader, m proto.Message) error { - return u.UnmarshalNext(json.NewDecoder(r), m) -} - -// UnmarshalNext unmarshals the next JSON object from d into m. -func (u *Unmarshaler) UnmarshalNext(d *json.Decoder, m proto.Message) error { - if m == nil { - return errors.New("invalid nil message") - } - - // Parse the next JSON object from the stream. - raw := json.RawMessage{} - if err := d.Decode(&raw); err != nil { - return err - } - - // Check for custom unmarshalers first since they may not properly - // implement protobuf reflection that the logic below relies on. - if jsu, ok := m.(JSONPBUnmarshaler); ok { - return jsu.UnmarshalJSONPB(u, raw) - } - - mr := proto.MessageReflect(m) - - // NOTE: For historical reasons, a top-level null is treated as a noop. - // This is incorrect, but kept for compatibility. - if string(raw) == "null" && mr.Descriptor().FullName() != "google.protobuf.Value" { - return nil - } - - if wrapJSONUnmarshalV2 { - // NOTE: If input message is non-empty, we need to preserve merge semantics - // of the old jsonpb implementation. These semantics are not supported by - // the protobuf JSON specification. - isEmpty := true - mr.Range(func(protoreflect.FieldDescriptor, protoreflect.Value) bool { - isEmpty = false // at least one iteration implies non-empty - return false - }) - if !isEmpty { - // Perform unmarshaling into a newly allocated, empty message. - mr = mr.New() - - // Use a defer to copy all unmarshaled fields into the original message. - dst := proto.MessageReflect(m) - defer mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { - dst.Set(fd, v) - return true - }) - } - - // Unmarshal using the v2 JSON unmarshaler. - opts := protojson.UnmarshalOptions{ - DiscardUnknown: u.AllowUnknownFields, - } - if u.AnyResolver != nil { - opts.Resolver = anyResolver{u.AnyResolver} - } - return opts.Unmarshal(raw, mr.Interface()) - } else { - if err := u.unmarshalMessage(mr, raw); err != nil { - return err - } - return protoV2.CheckInitialized(mr.Interface()) - } -} - -func (u *Unmarshaler) unmarshalMessage(m protoreflect.Message, in []byte) error { - md := m.Descriptor() - fds := md.Fields() - - if jsu, ok := proto.MessageV1(m.Interface()).(JSONPBUnmarshaler); ok { - return jsu.UnmarshalJSONPB(u, in) - } - - if string(in) == "null" && md.FullName() != "google.protobuf.Value" { - return nil - } - - switch wellKnownType(md.FullName()) { - case "Any": - var jsonObject map[string]json.RawMessage - if err := json.Unmarshal(in, &jsonObject); err != nil { - return err - } - - rawTypeURL, ok := jsonObject["@type"] - if !ok { - return errors.New("Any JSON doesn't have '@type'") - } - typeURL, err := unquoteString(string(rawTypeURL)) - if err != nil { - return fmt.Errorf("can't unmarshal Any's '@type': %q", rawTypeURL) - } - m.Set(fds.ByNumber(1), protoreflect.ValueOfString(typeURL)) - - var m2 protoreflect.Message - if u.AnyResolver != nil { - mi, err := u.AnyResolver.Resolve(typeURL) - if err != nil { - return err - } - m2 = proto.MessageReflect(mi) - } else { - mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL) - if err != nil { - if err == protoregistry.NotFound { - return fmt.Errorf("could not resolve Any message type: %v", typeURL) - } - return err - } - m2 = mt.New() - } - - if wellKnownType(m2.Descriptor().FullName()) != "" { - rawValue, ok := jsonObject["value"] - if !ok { - return errors.New("Any JSON doesn't have 'value'") - } - if err := u.unmarshalMessage(m2, rawValue); err != nil { - return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err) - } - } else { - delete(jsonObject, "@type") - rawJSON, err := json.Marshal(jsonObject) - if err != nil { - return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err) - } - if err = u.unmarshalMessage(m2, rawJSON); err != nil { - return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err) - } - } - - rawWire, err := protoV2.Marshal(m2.Interface()) - if err != nil { - return fmt.Errorf("can't marshal proto %v into Any.Value: %v", typeURL, err) - } - m.Set(fds.ByNumber(2), protoreflect.ValueOfBytes(rawWire)) - return nil - case "BoolValue", "BytesValue", "StringValue", - "Int32Value", "UInt32Value", "FloatValue", - "Int64Value", "UInt64Value", "DoubleValue": - fd := fds.ByNumber(1) - v, err := u.unmarshalValue(m.NewField(fd), in, fd) - if err != nil { - return err - } - m.Set(fd, v) - return nil - case "Duration": - v, err := unquoteString(string(in)) - if err != nil { - return err - } - d, err := time.ParseDuration(v) - if err != nil { - return fmt.Errorf("bad Duration: %v", err) - } - - sec := d.Nanoseconds() / 1e9 - nsec := d.Nanoseconds() % 1e9 - m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec))) - m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec))) - return nil - case "Timestamp": - v, err := unquoteString(string(in)) - if err != nil { - return err - } - t, err := time.Parse(time.RFC3339Nano, v) - if err != nil { - return fmt.Errorf("bad Timestamp: %v", err) - } - - sec := t.Unix() - nsec := t.Nanosecond() - m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec))) - m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec))) - return nil - case "Value": - switch { - case string(in) == "null": - m.Set(fds.ByNumber(1), protoreflect.ValueOfEnum(0)) - case string(in) == "true": - m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(true)) - case string(in) == "false": - m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(false)) - case hasPrefixAndSuffix('"', in, '"'): - s, err := unquoteString(string(in)) - if err != nil { - return fmt.Errorf("unrecognized type for Value %q", in) - } - m.Set(fds.ByNumber(3), protoreflect.ValueOfString(s)) - case hasPrefixAndSuffix('[', in, ']'): - v := m.Mutable(fds.ByNumber(6)) - return u.unmarshalMessage(v.Message(), in) - case hasPrefixAndSuffix('{', in, '}'): - v := m.Mutable(fds.ByNumber(5)) - return u.unmarshalMessage(v.Message(), in) - default: - f, err := strconv.ParseFloat(string(in), 0) - if err != nil { - return fmt.Errorf("unrecognized type for Value %q", in) - } - m.Set(fds.ByNumber(2), protoreflect.ValueOfFloat64(f)) - } - return nil - case "ListValue": - var jsonArray []json.RawMessage - if err := json.Unmarshal(in, &jsonArray); err != nil { - return fmt.Errorf("bad ListValue: %v", err) - } - - lv := m.Mutable(fds.ByNumber(1)).List() - for _, raw := range jsonArray { - ve := lv.NewElement() - if err := u.unmarshalMessage(ve.Message(), raw); err != nil { - return err - } - lv.Append(ve) - } - return nil - case "Struct": - var jsonObject map[string]json.RawMessage - if err := json.Unmarshal(in, &jsonObject); err != nil { - return fmt.Errorf("bad StructValue: %v", err) - } - - mv := m.Mutable(fds.ByNumber(1)).Map() - for key, raw := range jsonObject { - kv := protoreflect.ValueOf(key).MapKey() - vv := mv.NewValue() - if err := u.unmarshalMessage(vv.Message(), raw); err != nil { - return fmt.Errorf("bad value in StructValue for key %q: %v", key, err) - } - mv.Set(kv, vv) - } - return nil - } - - var jsonObject map[string]json.RawMessage - if err := json.Unmarshal(in, &jsonObject); err != nil { - return err - } - - // Handle known fields. - for i := 0; i < fds.Len(); i++ { - fd := fds.Get(i) - if fd.IsWeak() && fd.Message().IsPlaceholder() { - continue // weak reference is not linked in - } - - // Search for any raw JSON value associated with this field. - var raw json.RawMessage - name := string(fd.Name()) - if fd.Kind() == protoreflect.GroupKind { - name = string(fd.Message().Name()) - } - if v, ok := jsonObject[name]; ok { - delete(jsonObject, name) - raw = v - } - name = string(fd.JSONName()) - if v, ok := jsonObject[name]; ok { - delete(jsonObject, name) - raw = v - } - - field := m.NewField(fd) - // Unmarshal the field value. - if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) { - continue - } - v, err := u.unmarshalValue(field, raw, fd) - if err != nil { - return err - } - m.Set(fd, v) - } - - // Handle extension fields. - for name, raw := range jsonObject { - if !strings.HasPrefix(name, "[") || !strings.HasSuffix(name, "]") { - continue - } - - // Resolve the extension field by name. - xname := protoreflect.FullName(name[len("[") : len(name)-len("]")]) - xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname) - if xt == nil && isMessageSet(md) { - xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension")) - } - if xt == nil { - continue - } - delete(jsonObject, name) - fd := xt.TypeDescriptor() - if fd.ContainingMessage().FullName() != m.Descriptor().FullName() { - return fmt.Errorf("extension field %q does not extend message %q", xname, m.Descriptor().FullName()) - } - - field := m.NewField(fd) - // Unmarshal the field value. - if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) { - continue - } - v, err := u.unmarshalValue(field, raw, fd) - if err != nil { - return err - } - m.Set(fd, v) - } - - if !u.AllowUnknownFields && len(jsonObject) > 0 { - for name := range jsonObject { - return fmt.Errorf("unknown field %q in %v", name, md.FullName()) - } - } - return nil -} - -func isSingularWellKnownValue(fd protoreflect.FieldDescriptor) bool { - if fd.Cardinality() == protoreflect.Repeated { - return false - } - if md := fd.Message(); md != nil { - return md.FullName() == "google.protobuf.Value" - } - if ed := fd.Enum(); ed != nil { - return ed.FullName() == "google.protobuf.NullValue" - } - return false -} - -func isSingularJSONPBUnmarshaler(v protoreflect.Value, fd protoreflect.FieldDescriptor) bool { - if fd.Message() != nil && fd.Cardinality() != protoreflect.Repeated { - _, ok := proto.MessageV1(v.Interface()).(JSONPBUnmarshaler) - return ok - } - return false -} - -func (u *Unmarshaler) unmarshalValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { - switch { - case fd.IsList(): - var jsonArray []json.RawMessage - if err := json.Unmarshal(in, &jsonArray); err != nil { - return v, err - } - lv := v.List() - for _, raw := range jsonArray { - ve, err := u.unmarshalSingularValue(lv.NewElement(), raw, fd) - if err != nil { - return v, err - } - lv.Append(ve) - } - return v, nil - case fd.IsMap(): - var jsonObject map[string]json.RawMessage - if err := json.Unmarshal(in, &jsonObject); err != nil { - return v, err - } - kfd := fd.MapKey() - vfd := fd.MapValue() - mv := v.Map() - for key, raw := range jsonObject { - var kv protoreflect.MapKey - if kfd.Kind() == protoreflect.StringKind { - kv = protoreflect.ValueOf(key).MapKey() - } else { - v, err := u.unmarshalSingularValue(kfd.Default(), []byte(key), kfd) - if err != nil { - return v, err - } - kv = v.MapKey() - } - - vv, err := u.unmarshalSingularValue(mv.NewValue(), raw, vfd) - if err != nil { - return v, err - } - mv.Set(kv, vv) - } - return v, nil - default: - return u.unmarshalSingularValue(v, in, fd) - } -} - -var nonFinite = map[string]float64{ - `"NaN"`: math.NaN(), - `"Infinity"`: math.Inf(+1), - `"-Infinity"`: math.Inf(-1), -} - -func (u *Unmarshaler) unmarshalSingularValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { - switch fd.Kind() { - case protoreflect.BoolKind: - return unmarshalValue(in, new(bool)) - case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: - return unmarshalValue(trimQuote(in), new(int32)) - case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: - return unmarshalValue(trimQuote(in), new(int64)) - case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: - return unmarshalValue(trimQuote(in), new(uint32)) - case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: - return unmarshalValue(trimQuote(in), new(uint64)) - case protoreflect.FloatKind: - if f, ok := nonFinite[string(in)]; ok { - return protoreflect.ValueOfFloat32(float32(f)), nil - } - return unmarshalValue(trimQuote(in), new(float32)) - case protoreflect.DoubleKind: - if f, ok := nonFinite[string(in)]; ok { - return protoreflect.ValueOfFloat64(float64(f)), nil - } - return unmarshalValue(trimQuote(in), new(float64)) - case protoreflect.StringKind: - return unmarshalValue(in, new(string)) - case protoreflect.BytesKind: - return unmarshalValue(in, new([]byte)) - case protoreflect.EnumKind: - if hasPrefixAndSuffix('"', in, '"') { - vd := fd.Enum().Values().ByName(protoreflect.Name(trimQuote(in))) - if vd == nil { - return v, fmt.Errorf("unknown value %q for enum %s", in, fd.Enum().FullName()) - } - return protoreflect.ValueOfEnum(vd.Number()), nil - } - return unmarshalValue(in, new(protoreflect.EnumNumber)) - case protoreflect.MessageKind, protoreflect.GroupKind: - err := u.unmarshalMessage(v.Message(), in) - return v, err - default: - panic(fmt.Sprintf("invalid kind %v", fd.Kind())) - } -} - -func unmarshalValue(in []byte, v interface{}) (protoreflect.Value, error) { - err := json.Unmarshal(in, v) - return protoreflect.ValueOf(reflect.ValueOf(v).Elem().Interface()), err -} - -func unquoteString(in string) (out string, err error) { - err = json.Unmarshal([]byte(in), &out) - return out, err -} - -func hasPrefixAndSuffix(prefix byte, in []byte, suffix byte) bool { - if len(in) >= 2 && in[0] == prefix && in[len(in)-1] == suffix { - return true - } - return false -} - -// trimQuote is like unquoteString but simply strips surrounding quotes. -// This is incorrect, but is behavior done by the legacy implementation. -func trimQuote(in []byte) []byte { - if len(in) >= 2 && in[0] == '"' && in[len(in)-1] == '"' { - in = in[1 : len(in)-1] - } - return in -} diff --git a/vendor/github.com/golang/protobuf/jsonpb/encode.go b/vendor/github.com/golang/protobuf/jsonpb/encode.go deleted file mode 100644 index e9438a93f..000000000 --- a/vendor/github.com/golang/protobuf/jsonpb/encode.go +++ /dev/null @@ -1,560 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package jsonpb - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "math" - "reflect" - "sort" - "strconv" - "strings" - "time" - - "github.com/golang/protobuf/proto" - "google.golang.org/protobuf/encoding/protojson" - protoV2 "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" -) - -const wrapJSONMarshalV2 = false - -// Marshaler is a configurable object for marshaling protocol buffer messages -// to the specified JSON representation. -type Marshaler struct { - // OrigName specifies whether to use the original protobuf name for fields. - OrigName bool - - // EnumsAsInts specifies whether to render enum values as integers, - // as opposed to string values. - EnumsAsInts bool - - // EmitDefaults specifies whether to render fields with zero values. - EmitDefaults bool - - // Indent controls whether the output is compact or not. - // If empty, the output is compact JSON. Otherwise, every JSON object - // entry and JSON array value will be on its own line. - // Each line will be preceded by repeated copies of Indent, where the - // number of copies is the current indentation depth. - Indent string - - // AnyResolver is used to resolve the google.protobuf.Any well-known type. - // If unset, the global registry is used by default. - AnyResolver AnyResolver -} - -// JSONPBMarshaler is implemented by protobuf messages that customize the -// way they are marshaled to JSON. Messages that implement this should also -// implement JSONPBUnmarshaler so that the custom format can be parsed. -// -// The JSON marshaling must follow the proto to JSON specification: -// -// https://developers.google.com/protocol-buffers/docs/proto3#json -// -// Deprecated: Custom types should implement protobuf reflection instead. -type JSONPBMarshaler interface { - MarshalJSONPB(*Marshaler) ([]byte, error) -} - -// Marshal serializes a protobuf message as JSON into w. -func (jm *Marshaler) Marshal(w io.Writer, m proto.Message) error { - b, err := jm.marshal(m) - if len(b) > 0 { - if _, err := w.Write(b); err != nil { - return err - } - } - return err -} - -// MarshalToString serializes a protobuf message as JSON in string form. -func (jm *Marshaler) MarshalToString(m proto.Message) (string, error) { - b, err := jm.marshal(m) - if err != nil { - return "", err - } - return string(b), nil -} - -func (jm *Marshaler) marshal(m proto.Message) ([]byte, error) { - v := reflect.ValueOf(m) - if m == nil || (v.Kind() == reflect.Ptr && v.IsNil()) { - return nil, errors.New("Marshal called with nil") - } - - // Check for custom marshalers first since they may not properly - // implement protobuf reflection that the logic below relies on. - if jsm, ok := m.(JSONPBMarshaler); ok { - return jsm.MarshalJSONPB(jm) - } - - if wrapJSONMarshalV2 { - opts := protojson.MarshalOptions{ - UseProtoNames: jm.OrigName, - UseEnumNumbers: jm.EnumsAsInts, - EmitUnpopulated: jm.EmitDefaults, - Indent: jm.Indent, - } - if jm.AnyResolver != nil { - opts.Resolver = anyResolver{jm.AnyResolver} - } - return opts.Marshal(proto.MessageReflect(m).Interface()) - } else { - // Check for unpopulated required fields first. - m2 := proto.MessageReflect(m) - if err := protoV2.CheckInitialized(m2.Interface()); err != nil { - return nil, err - } - - w := jsonWriter{Marshaler: jm} - err := w.marshalMessage(m2, "", "") - return w.buf, err - } -} - -type jsonWriter struct { - *Marshaler - buf []byte -} - -func (w *jsonWriter) write(s string) { - w.buf = append(w.buf, s...) -} - -func (w *jsonWriter) marshalMessage(m protoreflect.Message, indent, typeURL string) error { - if jsm, ok := proto.MessageV1(m.Interface()).(JSONPBMarshaler); ok { - b, err := jsm.MarshalJSONPB(w.Marshaler) - if err != nil { - return err - } - if typeURL != "" { - // we are marshaling this object to an Any type - var js map[string]*json.RawMessage - if err = json.Unmarshal(b, &js); err != nil { - return fmt.Errorf("type %T produced invalid JSON: %v", m.Interface(), err) - } - turl, err := json.Marshal(typeURL) - if err != nil { - return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err) - } - js["@type"] = (*json.RawMessage)(&turl) - if b, err = json.Marshal(js); err != nil { - return err - } - } - w.write(string(b)) - return nil - } - - md := m.Descriptor() - fds := md.Fields() - - // Handle well-known types. - const secondInNanos = int64(time.Second / time.Nanosecond) - switch wellKnownType(md.FullName()) { - case "Any": - return w.marshalAny(m, indent) - case "BoolValue", "BytesValue", "StringValue", - "Int32Value", "UInt32Value", "FloatValue", - "Int64Value", "UInt64Value", "DoubleValue": - fd := fds.ByNumber(1) - return w.marshalValue(fd, m.Get(fd), indent) - case "Duration": - const maxSecondsInDuration = 315576000000 - // "Generated output always contains 0, 3, 6, or 9 fractional digits, - // depending on required precision." - s := m.Get(fds.ByNumber(1)).Int() - ns := m.Get(fds.ByNumber(2)).Int() - if s < -maxSecondsInDuration || s > maxSecondsInDuration { - return fmt.Errorf("seconds out of range %v", s) - } - if ns <= -secondInNanos || ns >= secondInNanos { - return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos) - } - if (s > 0 && ns < 0) || (s < 0 && ns > 0) { - return errors.New("signs of seconds and nanos do not match") - } - var sign string - if s < 0 || ns < 0 { - sign, s, ns = "-", -1*s, -1*ns - } - x := fmt.Sprintf("%s%d.%09d", sign, s, ns) - x = strings.TrimSuffix(x, "000") - x = strings.TrimSuffix(x, "000") - x = strings.TrimSuffix(x, ".000") - w.write(fmt.Sprintf(`"%vs"`, x)) - return nil - case "Timestamp": - // "RFC 3339, where generated output will always be Z-normalized - // and uses 0, 3, 6 or 9 fractional digits." - s := m.Get(fds.ByNumber(1)).Int() - ns := m.Get(fds.ByNumber(2)).Int() - if ns < 0 || ns >= secondInNanos { - return fmt.Errorf("ns out of range [0, %v)", secondInNanos) - } - t := time.Unix(s, ns).UTC() - // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits). - x := t.Format("2006-01-02T15:04:05.000000000") - x = strings.TrimSuffix(x, "000") - x = strings.TrimSuffix(x, "000") - x = strings.TrimSuffix(x, ".000") - w.write(fmt.Sprintf(`"%vZ"`, x)) - return nil - case "Value": - // JSON value; which is a null, number, string, bool, object, or array. - od := md.Oneofs().Get(0) - fd := m.WhichOneof(od) - if fd == nil { - return errors.New("nil Value") - } - return w.marshalValue(fd, m.Get(fd), indent) - case "Struct", "ListValue": - // JSON object or array. - fd := fds.ByNumber(1) - return w.marshalValue(fd, m.Get(fd), indent) - } - - w.write("{") - if w.Indent != "" { - w.write("\n") - } - - firstField := true - if typeURL != "" { - if err := w.marshalTypeURL(indent, typeURL); err != nil { - return err - } - firstField = false - } - - for i := 0; i < fds.Len(); { - fd := fds.Get(i) - if od := fd.ContainingOneof(); od != nil { - fd = m.WhichOneof(od) - i += od.Fields().Len() - if fd == nil { - continue - } - } else { - i++ - } - - v := m.Get(fd) - - if !m.Has(fd) { - if !w.EmitDefaults || fd.ContainingOneof() != nil { - continue - } - if fd.Cardinality() != protoreflect.Repeated && (fd.Message() != nil || fd.Syntax() == protoreflect.Proto2) { - v = protoreflect.Value{} // use "null" for singular messages or proto2 scalars - } - } - - if !firstField { - w.writeComma() - } - if err := w.marshalField(fd, v, indent); err != nil { - return err - } - firstField = false - } - - // Handle proto2 extensions. - if md.ExtensionRanges().Len() > 0 { - // Collect a sorted list of all extension descriptor and values. - type ext struct { - desc protoreflect.FieldDescriptor - val protoreflect.Value - } - var exts []ext - m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { - if fd.IsExtension() { - exts = append(exts, ext{fd, v}) - } - return true - }) - sort.Slice(exts, func(i, j int) bool { - return exts[i].desc.Number() < exts[j].desc.Number() - }) - - for _, ext := range exts { - if !firstField { - w.writeComma() - } - if err := w.marshalField(ext.desc, ext.val, indent); err != nil { - return err - } - firstField = false - } - } - - if w.Indent != "" { - w.write("\n") - w.write(indent) - } - w.write("}") - return nil -} - -func (w *jsonWriter) writeComma() { - if w.Indent != "" { - w.write(",\n") - } else { - w.write(",") - } -} - -func (w *jsonWriter) marshalAny(m protoreflect.Message, indent string) error { - // "If the Any contains a value that has a special JSON mapping, - // it will be converted as follows: {"@type": xxx, "value": yyy}. - // Otherwise, the value will be converted into a JSON object, - // and the "@type" field will be inserted to indicate the actual data type." - md := m.Descriptor() - typeURL := m.Get(md.Fields().ByNumber(1)).String() - rawVal := m.Get(md.Fields().ByNumber(2)).Bytes() - - var m2 protoreflect.Message - if w.AnyResolver != nil { - mi, err := w.AnyResolver.Resolve(typeURL) - if err != nil { - return err - } - m2 = proto.MessageReflect(mi) - } else { - mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL) - if err != nil { - return err - } - m2 = mt.New() - } - - if err := protoV2.Unmarshal(rawVal, m2.Interface()); err != nil { - return err - } - - if wellKnownType(m2.Descriptor().FullName()) == "" { - return w.marshalMessage(m2, indent, typeURL) - } - - w.write("{") - if w.Indent != "" { - w.write("\n") - } - if err := w.marshalTypeURL(indent, typeURL); err != nil { - return err - } - w.writeComma() - if w.Indent != "" { - w.write(indent) - w.write(w.Indent) - w.write(`"value": `) - } else { - w.write(`"value":`) - } - if err := w.marshalMessage(m2, indent+w.Indent, ""); err != nil { - return err - } - if w.Indent != "" { - w.write("\n") - w.write(indent) - } - w.write("}") - return nil -} - -func (w *jsonWriter) marshalTypeURL(indent, typeURL string) error { - if w.Indent != "" { - w.write(indent) - w.write(w.Indent) - } - w.write(`"@type":`) - if w.Indent != "" { - w.write(" ") - } - b, err := json.Marshal(typeURL) - if err != nil { - return err - } - w.write(string(b)) - return nil -} - -// marshalField writes field description and value to the Writer. -func (w *jsonWriter) marshalField(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { - if w.Indent != "" { - w.write(indent) - w.write(w.Indent) - } - w.write(`"`) - switch { - case fd.IsExtension(): - // For message set, use the fname of the message as the extension name. - name := string(fd.FullName()) - if isMessageSet(fd.ContainingMessage()) { - name = strings.TrimSuffix(name, ".message_set_extension") - } - - w.write("[" + name + "]") - case w.OrigName: - name := string(fd.Name()) - if fd.Kind() == protoreflect.GroupKind { - name = string(fd.Message().Name()) - } - w.write(name) - default: - w.write(string(fd.JSONName())) - } - w.write(`":`) - if w.Indent != "" { - w.write(" ") - } - return w.marshalValue(fd, v, indent) -} - -func (w *jsonWriter) marshalValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { - switch { - case fd.IsList(): - w.write("[") - comma := "" - lv := v.List() - for i := 0; i < lv.Len(); i++ { - w.write(comma) - if w.Indent != "" { - w.write("\n") - w.write(indent) - w.write(w.Indent) - w.write(w.Indent) - } - if err := w.marshalSingularValue(fd, lv.Get(i), indent+w.Indent); err != nil { - return err - } - comma = "," - } - if w.Indent != "" { - w.write("\n") - w.write(indent) - w.write(w.Indent) - } - w.write("]") - return nil - case fd.IsMap(): - kfd := fd.MapKey() - vfd := fd.MapValue() - mv := v.Map() - - // Collect a sorted list of all map keys and values. - type entry struct{ key, val protoreflect.Value } - var entries []entry - mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { - entries = append(entries, entry{k.Value(), v}) - return true - }) - sort.Slice(entries, func(i, j int) bool { - switch kfd.Kind() { - case protoreflect.BoolKind: - return !entries[i].key.Bool() && entries[j].key.Bool() - case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: - return entries[i].key.Int() < entries[j].key.Int() - case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind: - return entries[i].key.Uint() < entries[j].key.Uint() - case protoreflect.StringKind: - return entries[i].key.String() < entries[j].key.String() - default: - panic("invalid kind") - } - }) - - w.write(`{`) - comma := "" - for _, entry := range entries { - w.write(comma) - if w.Indent != "" { - w.write("\n") - w.write(indent) - w.write(w.Indent) - w.write(w.Indent) - } - - s := fmt.Sprint(entry.key.Interface()) - b, err := json.Marshal(s) - if err != nil { - return err - } - w.write(string(b)) - - w.write(`:`) - if w.Indent != "" { - w.write(` `) - } - - if err := w.marshalSingularValue(vfd, entry.val, indent+w.Indent); err != nil { - return err - } - comma = "," - } - if w.Indent != "" { - w.write("\n") - w.write(indent) - w.write(w.Indent) - } - w.write(`}`) - return nil - default: - return w.marshalSingularValue(fd, v, indent) - } -} - -func (w *jsonWriter) marshalSingularValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { - switch { - case !v.IsValid(): - w.write("null") - return nil - case fd.Message() != nil: - return w.marshalMessage(v.Message(), indent+w.Indent, "") - case fd.Enum() != nil: - if fd.Enum().FullName() == "google.protobuf.NullValue" { - w.write("null") - return nil - } - - vd := fd.Enum().Values().ByNumber(v.Enum()) - if vd == nil || w.EnumsAsInts { - w.write(strconv.Itoa(int(v.Enum()))) - } else { - w.write(`"` + string(vd.Name()) + `"`) - } - return nil - default: - switch v.Interface().(type) { - case float32, float64: - switch { - case math.IsInf(v.Float(), +1): - w.write(`"Infinity"`) - return nil - case math.IsInf(v.Float(), -1): - w.write(`"-Infinity"`) - return nil - case math.IsNaN(v.Float()): - w.write(`"NaN"`) - return nil - } - case int64, uint64: - w.write(fmt.Sprintf(`"%d"`, v.Interface())) - return nil - } - - b, err := json.Marshal(v.Interface()) - if err != nil { - return err - } - w.write(string(b)) - return nil - } -} diff --git a/vendor/github.com/golang/protobuf/jsonpb/json.go b/vendor/github.com/golang/protobuf/jsonpb/json.go deleted file mode 100644 index 480e2448d..000000000 --- a/vendor/github.com/golang/protobuf/jsonpb/json.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package jsonpb provides functionality to marshal and unmarshal between a -// protocol buffer message and JSON. It follows the specification at -// https://developers.google.com/protocol-buffers/docs/proto3#json. -// -// Do not rely on the default behavior of the standard encoding/json package -// when called on generated message types as it does not operate correctly. -// -// Deprecated: Use the "google.golang.org/protobuf/encoding/protojson" -// package instead. -package jsonpb - -import ( - "github.com/golang/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" - "google.golang.org/protobuf/runtime/protoimpl" -) - -// AnyResolver takes a type URL, present in an Any message, -// and resolves it into an instance of the associated message. -type AnyResolver interface { - Resolve(typeURL string) (proto.Message, error) -} - -type anyResolver struct{ AnyResolver } - -func (r anyResolver) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { - return r.FindMessageByURL(string(message)) -} - -func (r anyResolver) FindMessageByURL(url string) (protoreflect.MessageType, error) { - m, err := r.Resolve(url) - if err != nil { - return nil, err - } - return protoimpl.X.MessageTypeOf(m), nil -} - -func (r anyResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { - return protoregistry.GlobalTypes.FindExtensionByName(field) -} - -func (r anyResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { - return protoregistry.GlobalTypes.FindExtensionByNumber(message, field) -} - -func wellKnownType(s protoreflect.FullName) string { - if s.Parent() == "google.protobuf" { - switch s.Name() { - case "Empty", "Any", - "BoolValue", "BytesValue", "StringValue", - "Int32Value", "UInt32Value", "FloatValue", - "Int64Value", "UInt64Value", "DoubleValue", - "Duration", "Timestamp", - "NullValue", "Struct", "Value", "ListValue": - return string(s.Name()) - } - } - return "" -} - -func isMessageSet(md protoreflect.MessageDescriptor) bool { - ms, ok := md.(interface{ IsMessageSet() bool }) - return ok && ms.IsMessageSet() -} diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go new file mode 100644 index 000000000..16686a655 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go @@ -0,0 +1,62 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: github.com/golang/protobuf/ptypes/empty/empty.proto + +package empty + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + reflect "reflect" +) + +// Symbols defined in public import of google/protobuf/empty.proto. + +type Empty = emptypb.Empty + +var File_github_com_golang_protobuf_ptypes_empty_empty_proto protoreflect.FileDescriptor + +var file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc = []byte{ + 0x0a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x3b, 0x65, 0x6d, + 0x70, 0x74, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_golang_protobuf_ptypes_empty_empty_proto_init() } +func file_github_com_golang_protobuf_ptypes_empty_empty_proto_init() { + if File_github_com_golang_protobuf_ptypes_empty_empty_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_empty_empty_proto = out.File + file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs = nil +} diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml deleted file mode 100644 index d8156a60b..000000000 --- a/vendor/github.com/google/uuid/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go - -go: - - 1.4.3 - - 1.5.3 - - tip - -script: - - go test -v ./... diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md new file mode 100644 index 000000000..7ec5ac7ea --- /dev/null +++ b/vendor/github.com/google/uuid/CHANGELOG.md @@ -0,0 +1,41 @@ +# Changelog + +## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16) + + +### Features + +* add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3)) + + +### Bug Fixes + +* fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06)) +* Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6)) + +## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12) + + +### Features + +* Validate UUID without creating new UUID ([#141](https://github.com/google/uuid/issues/141)) ([9ee7366](https://github.com/google/uuid/commit/9ee7366e66c9ad96bab89139418a713dc584ae29)) + +## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26) + + +### Features + +* UUIDs slice type with Strings() convenience method ([#133](https://github.com/google/uuid/issues/133)) ([cd5fbbd](https://github.com/google/uuid/commit/cd5fbbdd02f3e3467ac18940e07e062be1f864b4)) + +### Fixes + +* Clarify that Parse's job is to parse but not necessarily validate strings. (Documents current behavior) + +## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18) + + +### Bug Fixes + +* Use .EqualFold() to parse urn prefixed UUIDs ([#118](https://github.com/google/uuid/issues/118)) ([574e687](https://github.com/google/uuid/commit/574e6874943741fb99d41764c705173ada5293f0)) + +## Changelog diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md index 04fdf09f1..a502fdc51 100644 --- a/vendor/github.com/google/uuid/CONTRIBUTING.md +++ b/vendor/github.com/google/uuid/CONTRIBUTING.md @@ -2,6 +2,22 @@ We definitely welcome patches and contribution to this project! +### Tips + +Commits must be formatted according to the [Conventional Commits Specification](https://www.conventionalcommits.org). + +Always try to include a test case! If it is not possible or not necessary, +please explain why in the pull request description. + +### Releasing + +Commits that would precipitate a SemVer change, as described in the Conventional +Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action) +to create a release candidate pull request. Once submitted, `release-please` +will create a release. + +For tips on how to work with `release-please`, see its documentation. + ### Legal requirements In order to protect both you and ourselves, you will need to sign the diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md index f765a46f9..3e9a61889 100644 --- a/vendor/github.com/google/uuid/README.md +++ b/vendor/github.com/google/uuid/README.md @@ -1,6 +1,6 @@ -# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) +# uuid The uuid package generates and inspects UUIDs based on -[RFC 4122](http://tools.ietf.org/html/rfc4122) +[RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122) and DCE 1.1: Authentication and Security Services. This package is based on the github.com/pborman/uuid package (previously named @@ -9,10 +9,12 @@ a UUID is a 16 byte array rather than a byte slice. One loss due to this change is the ability to represent an invalid UUID (vs a NIL UUID). ###### Install -`go get github.com/google/uuid` +```sh +go get github.com/google/uuid +``` ###### Documentation -[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) +[![Go Reference](https://pkg.go.dev/badge/github.com/google/uuid.svg)](https://pkg.go.dev/github.com/google/uuid) Full `go doc` style documentation for the package can be viewed online without installing this package by using the GoDoc site here: diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go index b404f4bec..dc60082d3 100644 --- a/vendor/github.com/google/uuid/hash.go +++ b/vendor/github.com/google/uuid/hash.go @@ -17,6 +17,12 @@ var ( NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) Nil UUID // empty UUID, all zeros + + // The Max UUID is special form of UUID that is specified to have all 128 bits set to 1. + Max = UUID{ + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + } ) // NewHash returns a new UUID derived from the hash of space concatenated with diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go index 24b78edc9..b2a0bc871 100644 --- a/vendor/github.com/google/uuid/node_js.go +++ b/vendor/github.com/google/uuid/node_js.go @@ -7,6 +7,6 @@ package uuid // getHardwareInterface returns nil values for the JS version of the code. -// This remvoves the "net" dependency, because it is not used in the browser. +// This removes the "net" dependency, because it is not used in the browser. // Using the "net" library inflates the size of the transpiled JS code by 673k bytes. func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go index e6ef06cdc..c35112927 100644 --- a/vendor/github.com/google/uuid/time.go +++ b/vendor/github.com/google/uuid/time.go @@ -108,12 +108,23 @@ func setClockSequence(seq int) { } // Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in -// uuid. The time is only defined for version 1 and 2 UUIDs. +// uuid. The time is only defined for version 1, 2, 6 and 7 UUIDs. func (uuid UUID) Time() Time { - time := int64(binary.BigEndian.Uint32(uuid[0:4])) - time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 - time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 - return Time(time) + var t Time + switch uuid.Version() { + case 6: + time := binary.BigEndian.Uint64(uuid[:8]) // Ignore uuid[6] version b0110 + t = Time(time) + case 7: + time := binary.BigEndian.Uint64(uuid[:8]) + t = Time((time>>16)*10000 + g1582ns100) + default: // forward compatible + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + t = Time(time) + } + return t } // ClockSequence returns the clock sequence encoded in uuid. diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go index a57207aeb..5232b4867 100644 --- a/vendor/github.com/google/uuid/uuid.go +++ b/vendor/github.com/google/uuid/uuid.go @@ -56,11 +56,15 @@ func IsInvalidLengthError(err error) bool { return ok } -// Parse decodes s into a UUID or returns an error. Both the standard UUID -// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the -// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex -// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx. +// Parse decodes s into a UUID or returns an error if it cannot be parsed. Both +// the standard UUID forms defined in RFC 4122 +// (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx) are decoded. In addition, +// Parse accepts non-standard strings such as the raw hex encoding +// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx and 38 byte "Microsoft style" encodings, +// e.g. {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}. Only the middle 36 bytes are +// examined in the latter case. Parse should not be used to validate strings as +// it parses non-standard encodings as indicated above. func Parse(s string) (UUID, error) { var uuid UUID switch len(s) { @@ -69,7 +73,7 @@ func Parse(s string) (UUID, error) { // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx case 36 + 9: - if strings.ToLower(s[:9]) != "urn:uuid:" { + if !strings.EqualFold(s[:9], "urn:uuid:") { return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) } s = s[9:] @@ -101,7 +105,8 @@ func Parse(s string) (UUID, error) { 9, 11, 14, 16, 19, 21, - 24, 26, 28, 30, 32, 34} { + 24, 26, 28, 30, 32, 34, + } { v, ok := xtob(s[x], s[x+1]) if !ok { return uuid, errors.New("invalid UUID format") @@ -117,7 +122,7 @@ func ParseBytes(b []byte) (UUID, error) { switch len(b) { case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { + if !bytes.EqualFold(b[:9], []byte("urn:uuid:")) { return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) } b = b[9:] @@ -145,7 +150,8 @@ func ParseBytes(b []byte) (UUID, error) { 9, 11, 14, 16, 19, 21, - 24, 26, 28, 30, 32, 34} { + 24, 26, 28, 30, 32, 34, + } { v, ok := xtob(b[x], b[x+1]) if !ok { return uuid, errors.New("invalid UUID format") @@ -180,6 +186,59 @@ func Must(uuid UUID, err error) UUID { return uuid } +// Validate returns an error if s is not a properly formatted UUID in one of the following formats: +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} +// It returns an error if the format is invalid, otherwise nil. +func Validate(s string) error { + switch len(s) { + // Standard UUID format + case 36: + + // UUID with "urn:uuid:" prefix + case 36 + 9: + if !strings.EqualFold(s[:9], "urn:uuid:") { + return fmt.Errorf("invalid urn prefix: %q", s[:9]) + } + s = s[9:] + + // UUID enclosed in braces + case 36 + 2: + if s[0] != '{' || s[len(s)-1] != '}' { + return fmt.Errorf("invalid bracketed UUID format") + } + s = s[1 : len(s)-1] + + // UUID without hyphens + case 32: + for i := 0; i < len(s); i += 2 { + _, ok := xtob(s[i], s[i+1]) + if !ok { + return errors.New("invalid UUID format") + } + } + + default: + return invalidLengthError{len(s)} + } + + // Check for standard UUID format + if len(s) == 36 { + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return errors.New("invalid UUID format") + } + for _, x := range []int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} { + if _, ok := xtob(s[x], s[x+1]); !ok { + return errors.New("invalid UUID format") + } + } + } + + return nil +} + // String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx // , or "" if uuid is invalid. func (uuid UUID) String() string { @@ -292,3 +351,15 @@ func DisableRandPool() { poolMu.Lock() poolPos = randPoolSize } + +// UUIDs is a slice of UUID types. +type UUIDs []UUID + +// Strings returns a string slice containing the string form of each UUID in uuids. +func (uuids UUIDs) Strings() []string { + var uuidStrs = make([]string, len(uuids)) + for i, uuid := range uuids { + uuidStrs[i] = uuid.String() + } + return uuidStrs +} diff --git a/vendor/github.com/google/uuid/version6.go b/vendor/github.com/google/uuid/version6.go new file mode 100644 index 000000000..339a959a7 --- /dev/null +++ b/vendor/github.com/google/uuid/version6.go @@ -0,0 +1,56 @@ +// Copyright 2023 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "encoding/binary" + +// UUID version 6 is a field-compatible version of UUIDv1, reordered for improved DB locality. +// It is expected that UUIDv6 will primarily be used in contexts where there are existing v1 UUIDs. +// Systems that do not involve legacy UUIDv1 SHOULD consider using UUIDv7 instead. +// +// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#uuidv6 +// +// NewV6 returns a Version 6 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewV6 set NodeID is random bits automatically . If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewV6 returns Nil and an error. +func NewV6() (UUID, error) { + var uuid UUID + now, seq, err := GetTime() + if err != nil { + return uuid, err + } + + /* + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | time_high | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | time_mid | time_low_and_version | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |clk_seq_hi_res | clk_seq_low | node (0-1) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | node (2-5) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ + + binary.BigEndian.PutUint64(uuid[0:], uint64(now)) + binary.BigEndian.PutUint16(uuid[8:], seq) + + uuid[6] = 0x60 | (uuid[6] & 0x0F) + uuid[8] = 0x80 | (uuid[8] & 0x3F) + + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + copy(uuid[10:], nodeID[:]) + nodeMu.Unlock() + + return uuid, nil +} diff --git a/vendor/github.com/google/uuid/version7.go b/vendor/github.com/google/uuid/version7.go new file mode 100644 index 000000000..3167b643d --- /dev/null +++ b/vendor/github.com/google/uuid/version7.go @@ -0,0 +1,104 @@ +// Copyright 2023 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// UUID version 7 features a time-ordered value field derived from the widely +// implemented and well known Unix Epoch timestamp source, +// the number of milliseconds seconds since midnight 1 Jan 1970 UTC, leap seconds excluded. +// As well as improved entropy characteristics over versions 1 or 6. +// +// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#name-uuid-version-7 +// +// Implementations SHOULD utilize UUID version 7 over UUID version 1 and 6 if possible. +// +// NewV7 returns a Version 7 UUID based on the current time(Unix Epoch). +// Uses the randomness pool if it was enabled with EnableRandPool. +// On error, NewV7 returns Nil and an error +func NewV7() (UUID, error) { + uuid, err := NewRandom() + if err != nil { + return uuid, err + } + makeV7(uuid[:]) + return uuid, nil +} + +// NewV7FromReader returns a Version 7 UUID based on the current time(Unix Epoch). +// it use NewRandomFromReader fill random bits. +// On error, NewV7FromReader returns Nil and an error. +func NewV7FromReader(r io.Reader) (UUID, error) { + uuid, err := NewRandomFromReader(r) + if err != nil { + return uuid, err + } + + makeV7(uuid[:]) + return uuid, nil +} + +// makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6]) +// uuid[8] already has the right version number (Variant is 10) +// see function NewV7 and NewV7FromReader +func makeV7(uuid []byte) { + /* + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | unix_ts_ms | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | unix_ts_ms | ver | rand_a (12 bit seq) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |var| rand_b | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | rand_b | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ + _ = uuid[15] // bounds check + + t, s := getV7Time() + + uuid[0] = byte(t >> 40) + uuid[1] = byte(t >> 32) + uuid[2] = byte(t >> 24) + uuid[3] = byte(t >> 16) + uuid[4] = byte(t >> 8) + uuid[5] = byte(t) + + uuid[6] = 0x70 | (0x0F & byte(s>>8)) + uuid[7] = byte(s) +} + +// lastV7time is the last time we returned stored as: +// +// 52 bits of time in milliseconds since epoch +// 12 bits of (fractional nanoseconds) >> 8 +var lastV7time int64 + +const nanoPerMilli = 1000000 + +// getV7Time returns the time in milliseconds and nanoseconds / 256. +// The returned (milli << 12 + seq) is guarenteed to be greater than +// (milli << 12 + seq) returned by any previous call to getV7Time. +func getV7Time() (milli, seq int64) { + timeMu.Lock() + defer timeMu.Unlock() + + nano := timeNow().UnixNano() + milli = nano / nanoPerMilli + // Sequence number is between 0 and 3906 (nanoPerMilli>>8) + seq = (nano - milli*nanoPerMilli) >> 8 + now := milli<<12 + seq + if now <= lastV7time { + now = lastV7time + 1 + milli = now >> 12 + seq = now & 0xfff + } + lastV7time = now + return milli, seq +} diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index fe72a7b18..9a14b8151 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,28 @@ +## 1.31.1 + +### Fixes +- Inverted arguments order of FailureMessage of BeComparableToMatcher [e0dd999] +- Update test in case keeping msg is desired [ad1a367] + +### Maintenance +- Show how to import the format sub package [24e958d] +- tidy up go.sum [26661b8] +- bump dependencies [bde8f7a] + +## 1.31.0 + +### Features +- Async assertions include context cancellation cause if present [121c37f] + +### Maintenance +- Bump minimum go version [dee1e3c] +- docs: fix typo in example usage "occured" -> "occurred" [49005fe] +- Bump actions/setup-go from 4 to 5 (#714) [f1c8757] +- Bump github/codeql-action from 2 to 3 (#715) [9836e76] +- Bump github.com/onsi/ginkgo/v2 from 2.13.0 to 2.13.2 (#713) [54726f0] +- Bump golang.org/x/net from 0.17.0 to 0.19.0 (#711) [df97ecc] +- docs: fix `HaveExactElement` typo (#712) [a672c86] + ## 1.30.0 ### Features diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index c271a366a..5b46a1658 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.30.0" +const GOMEGA_VERSION = "1.31.1" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). diff --git a/vendor/github.com/onsi/gomega/internal/async_assertion.go b/vendor/github.com/onsi/gomega/internal/async_assertion.go index 1188b0bce..cde9e2ec8 100644 --- a/vendor/github.com/onsi/gomega/internal/async_assertion.go +++ b/vendor/github.com/onsi/gomega/internal/async_assertion.go @@ -553,7 +553,12 @@ func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch lock.Unlock() } case <-contextDone: - fail("Context was cancelled") + err := context.Cause(assertion.ctx) + if err != nil && err != context.Canceled { + fail(fmt.Sprintf("Context was cancelled (cause: %s)", err)) + } else { + fail("Context was cancelled") + } return false case <-timeout: if assertion.asyncType == AsyncAssertionTypeEventually { diff --git a/vendor/github.com/onsi/gomega/matchers.go b/vendor/github.com/onsi/gomega/matchers.go index 43f994374..8860d677f 100644 --- a/vendor/github.com/onsi/gomega/matchers.go +++ b/vendor/github.com/onsi/gomega/matchers.go @@ -394,7 +394,7 @@ func ConsistOf(elements ...interface{}) types.GomegaMatcher { } } -// HaveExactElemets succeeds if actual contains elements that precisely match the elemets passed into the matcher. The ordering of the elements does matter. +// HaveExactElements succeeds if actual contains elements that precisely match the elemets passed into the matcher. The ordering of the elements does matter. // By default HaveExactElements() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples: // // Expect([]string{"Foo", "FooBar"}).Should(HaveExactElements("Foo", "FooBar")) diff --git a/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go index 8ab4bb919..4e3897858 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go @@ -41,9 +41,9 @@ func (matcher *BeComparableToMatcher) Match(actual interface{}) (success bool, m } func (matcher *BeComparableToMatcher) FailureMessage(actual interface{}) (message string) { - return cmp.Diff(matcher.Expected, actual, matcher.Options) + return fmt.Sprint("Expected object to be comparable, diff: ", cmp.Diff(actual, matcher.Expected, matcher.Options...)) } func (matcher *BeComparableToMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return format.Message(actual, "not to equal", matcher.Expected) + return format.Message(actual, "not to be comparable to", matcher.Expected) } diff --git a/vendor/github.com/openshift/build-machinery-go/.ci-operator.yaml b/vendor/github.com/openshift/build-machinery-go/.ci-operator.yaml index 00d7adfcf..7096df34e 100644 --- a/vendor/github.com/openshift/build-machinery-go/.ci-operator.yaml +++ b/vendor/github.com/openshift/build-machinery-go/.ci-operator.yaml @@ -1,4 +1,4 @@ build_root_image: name: release namespace: openshift - tag: rhel-8-release-golang-1.17-openshift-4.10 + tag: rhel-9-release-golang-1.20-openshift-4.15 diff --git a/vendor/github.com/openshift/build-machinery-go/Dockerfile.commitchecker b/vendor/github.com/openshift/build-machinery-go/Dockerfile.commitchecker new file mode 100644 index 000000000..a6d656344 --- /dev/null +++ b/vendor/github.com/openshift/build-machinery-go/Dockerfile.commitchecker @@ -0,0 +1,13 @@ +# This Dockerfile must be on the top-level of this repo, because it needs to copy +# both commitchecker/ and make/ into the build container. + +FROM registry.ci.openshift.org/ocp/builder:rhel-9-golang-1.20-openshift-4.15 AS builder +WORKDIR /go/src/github.com/openshift/build-machinery-go +COPY . . +RUN make -C commitchecker + +FROM registry.ci.openshift.org/ocp/4.15:base +COPY --from=builder /go/src/github.com/openshift/build-machinery-go/commitchecker/commitchecker /usr/bin/ +RUN dnf install --setopt=tsflags=nodocs -y git && \ + dnf clean all && rm -rf /var/cache/yum/* +ENTRYPOINT ["/usr/bin/commitchecker"] diff --git a/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go b/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go deleted file mode 100644 index d33c8890f..000000000 --- a/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.13 - -package poly1305 - -// Generic fallbacks for the math/bits intrinsics, copied from -// src/math/bits/bits.go. They were added in Go 1.12, but Add64 and Sum64 had -// variable time fallbacks until Go 1.13. - -func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) { - sum = x + y + carry - carryOut = ((x & y) | ((x | y) &^ sum)) >> 63 - return -} - -func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) { - diff = x - y - borrow - borrowOut = ((^x & y) | (^(x ^ y) & diff)) >> 63 - return -} - -func bitsMul64(x, y uint64) (hi, lo uint64) { - const mask32 = 1<<32 - 1 - x0 := x & mask32 - x1 := x >> 32 - y0 := y & mask32 - y1 := y >> 32 - w0 := x0 * y0 - t := x1*y0 + w0>>32 - w1 := t & mask32 - w2 := t >> 32 - w1 += x0 * y1 - hi = x1*y1 + w2 + w1>>32 - lo = x * y - return -} diff --git a/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go b/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go deleted file mode 100644 index 495c1fa69..000000000 --- a/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.13 - -package poly1305 - -import "math/bits" - -func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) { - return bits.Add64(x, y, carry) -} - -func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) { - return bits.Sub64(x, y, borrow) -} - -func bitsMul64(x, y uint64) (hi, lo uint64) { - return bits.Mul64(x, y) -} diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go index e041da5ea..ec2202bd7 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go @@ -7,7 +7,10 @@ package poly1305 -import "encoding/binary" +import ( + "encoding/binary" + "math/bits" +) // Poly1305 [RFC 7539] is a relatively simple algorithm: the authentication tag // for a 64 bytes message is approximately @@ -114,13 +117,13 @@ type uint128 struct { } func mul64(a, b uint64) uint128 { - hi, lo := bitsMul64(a, b) + hi, lo := bits.Mul64(a, b) return uint128{lo, hi} } func add128(a, b uint128) uint128 { - lo, c := bitsAdd64(a.lo, b.lo, 0) - hi, c := bitsAdd64(a.hi, b.hi, c) + lo, c := bits.Add64(a.lo, b.lo, 0) + hi, c := bits.Add64(a.hi, b.hi, c) if c != 0 { panic("poly1305: unexpected overflow") } @@ -155,8 +158,8 @@ func updateGeneric(state *macState, msg []byte) { // hide leading zeroes. For full chunks, that's 1 << 128, so we can just // add 1 to the most significant (2¹²⁸) limb, h2. if len(msg) >= TagSize { - h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(msg[0:8]), 0) - h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(msg[8:16]), c) + h0, c = bits.Add64(h0, binary.LittleEndian.Uint64(msg[0:8]), 0) + h1, c = bits.Add64(h1, binary.LittleEndian.Uint64(msg[8:16]), c) h2 += c + 1 msg = msg[TagSize:] @@ -165,8 +168,8 @@ func updateGeneric(state *macState, msg []byte) { copy(buf[:], msg) buf[len(msg)] = 1 - h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(buf[0:8]), 0) - h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(buf[8:16]), c) + h0, c = bits.Add64(h0, binary.LittleEndian.Uint64(buf[0:8]), 0) + h1, c = bits.Add64(h1, binary.LittleEndian.Uint64(buf[8:16]), c) h2 += c msg = nil @@ -219,9 +222,9 @@ func updateGeneric(state *macState, msg []byte) { m3 := h2r1 t0 := m0.lo - t1, c := bitsAdd64(m1.lo, m0.hi, 0) - t2, c := bitsAdd64(m2.lo, m1.hi, c) - t3, _ := bitsAdd64(m3.lo, m2.hi, c) + t1, c := bits.Add64(m1.lo, m0.hi, 0) + t2, c := bits.Add64(m2.lo, m1.hi, c) + t3, _ := bits.Add64(m3.lo, m2.hi, c) // Now we have the result as 4 64-bit limbs, and we need to reduce it // modulo 2¹³⁰ - 5. The special shape of this Crandall prime lets us do @@ -243,14 +246,14 @@ func updateGeneric(state *macState, msg []byte) { // To add c * 5 to h, we first add cc = c * 4, and then add (cc >> 2) = c. - h0, c = bitsAdd64(h0, cc.lo, 0) - h1, c = bitsAdd64(h1, cc.hi, c) + h0, c = bits.Add64(h0, cc.lo, 0) + h1, c = bits.Add64(h1, cc.hi, c) h2 += c cc = shiftRightBy2(cc) - h0, c = bitsAdd64(h0, cc.lo, 0) - h1, c = bitsAdd64(h1, cc.hi, c) + h0, c = bits.Add64(h0, cc.lo, 0) + h1, c = bits.Add64(h1, cc.hi, c) h2 += c // h2 is at most 3 + 1 + 1 = 5, making the whole of h at most @@ -287,9 +290,9 @@ func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) { // in constant time, we compute t = h - (2¹³⁰ - 5), and select h as the // result if the subtraction underflows, and t otherwise. - hMinusP0, b := bitsSub64(h0, p0, 0) - hMinusP1, b := bitsSub64(h1, p1, b) - _, b = bitsSub64(h2, p2, b) + hMinusP0, b := bits.Sub64(h0, p0, 0) + hMinusP1, b := bits.Sub64(h1, p1, b) + _, b = bits.Sub64(h2, p2, b) // h = h if h < p else h - p h0 = select64(b, h0, hMinusP0) @@ -301,8 +304,8 @@ func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) { // // by just doing a wide addition with the 128 low bits of h and discarding // the overflow. - h0, c := bitsAdd64(h0, s[0], 0) - h1, _ = bitsAdd64(h1, s[1], c) + h0, c := bits.Add64(h0, s[0], 0) + h1, _ = bits.Add64(h1, s[1], c) binary.LittleEndian.PutUint64(out[0:8], h0) binary.LittleEndian.PutUint64(out[8:16], h1) diff --git a/vendor/golang.org/x/net/html/token.go b/vendor/golang.org/x/net/html/token.go index de67f938a..3c57880d6 100644 --- a/vendor/golang.org/x/net/html/token.go +++ b/vendor/golang.org/x/net/html/token.go @@ -910,9 +910,6 @@ func (z *Tokenizer) readTagAttrKey() { return } switch c { - case ' ', '\n', '\r', '\t', '\f', '/': - z.pendingAttr[0].end = z.raw.end - 1 - return case '=': if z.pendingAttr[0].start+1 == z.raw.end { // WHATWG 13.2.5.32, if we see an equals sign before the attribute name @@ -920,7 +917,9 @@ func (z *Tokenizer) readTagAttrKey() { continue } fallthrough - case '>': + case ' ', '\n', '\r', '\t', '\f', '/', '>': + // WHATWG 13.2.5.33 Attribute name state + // We need to reconsume the char in the after attribute name state to support the / character z.raw.end-- z.pendingAttr[0].end = z.raw.end return @@ -939,6 +938,11 @@ func (z *Tokenizer) readTagAttrVal() { if z.err != nil { return } + if c == '/' { + // WHATWG 13.2.5.34 After attribute name state + // U+002F SOLIDUS (/) - Switch to the self-closing start tag state. + return + } if c != '=' { z.raw.end-- return diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index c1f6b90dc..e2b298d85 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -1510,13 +1510,12 @@ func (mh *MetaHeadersFrame) checkPseudos() error { } func (fr *Framer) maxHeaderStringLen() int { - v := fr.maxHeaderListSize() - if uint32(int(v)) == v { - return int(v) + v := int(fr.maxHeaderListSize()) + if v < 0 { + // If maxHeaderListSize overflows an int, use no limit (0). + return 0 } - // They had a crazy big number for MaxHeaderBytes anyway, - // so give them unlimited header lengths: - return 0 + return v } // readMetaFrame returns 0 or more CONTINUATION frames from fr and diff --git a/vendor/golang.org/x/oauth2/deviceauth.go b/vendor/golang.org/x/oauth2/deviceauth.go new file mode 100644 index 000000000..e99c92f39 --- /dev/null +++ b/vendor/golang.org/x/oauth2/deviceauth.go @@ -0,0 +1,198 @@ +package oauth2 + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "time" + + "golang.org/x/oauth2/internal" +) + +// https://datatracker.ietf.org/doc/html/rfc8628#section-3.5 +const ( + errAuthorizationPending = "authorization_pending" + errSlowDown = "slow_down" + errAccessDenied = "access_denied" + errExpiredToken = "expired_token" +) + +// DeviceAuthResponse describes a successful RFC 8628 Device Authorization Response +// https://datatracker.ietf.org/doc/html/rfc8628#section-3.2 +type DeviceAuthResponse struct { + // DeviceCode + DeviceCode string `json:"device_code"` + // UserCode is the code the user should enter at the verification uri + UserCode string `json:"user_code"` + // VerificationURI is where user should enter the user code + VerificationURI string `json:"verification_uri"` + // VerificationURIComplete (if populated) includes the user code in the verification URI. This is typically shown to the user in non-textual form, such as a QR code. + VerificationURIComplete string `json:"verification_uri_complete,omitempty"` + // Expiry is when the device code and user code expire + Expiry time.Time `json:"expires_in,omitempty"` + // Interval is the duration in seconds that Poll should wait between requests + Interval int64 `json:"interval,omitempty"` +} + +func (d DeviceAuthResponse) MarshalJSON() ([]byte, error) { + type Alias DeviceAuthResponse + var expiresIn int64 + if !d.Expiry.IsZero() { + expiresIn = int64(time.Until(d.Expiry).Seconds()) + } + return json.Marshal(&struct { + ExpiresIn int64 `json:"expires_in,omitempty"` + *Alias + }{ + ExpiresIn: expiresIn, + Alias: (*Alias)(&d), + }) + +} + +func (c *DeviceAuthResponse) UnmarshalJSON(data []byte) error { + type Alias DeviceAuthResponse + aux := &struct { + ExpiresIn int64 `json:"expires_in"` + // workaround misspelling of verification_uri + VerificationURL string `json:"verification_url"` + *Alias + }{ + Alias: (*Alias)(c), + } + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + if aux.ExpiresIn != 0 { + c.Expiry = time.Now().UTC().Add(time.Second * time.Duration(aux.ExpiresIn)) + } + if c.VerificationURI == "" { + c.VerificationURI = aux.VerificationURL + } + return nil +} + +// DeviceAuth returns a device auth struct which contains a device code +// and authorization information provided for users to enter on another device. +func (c *Config) DeviceAuth(ctx context.Context, opts ...AuthCodeOption) (*DeviceAuthResponse, error) { + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.1 + v := url.Values{ + "client_id": {c.ClientID}, + } + if len(c.Scopes) > 0 { + v.Set("scope", strings.Join(c.Scopes, " ")) + } + for _, opt := range opts { + opt.setValue(v) + } + return retrieveDeviceAuth(ctx, c, v) +} + +func retrieveDeviceAuth(ctx context.Context, c *Config, v url.Values) (*DeviceAuthResponse, error) { + if c.Endpoint.DeviceAuthURL == "" { + return nil, errors.New("endpoint missing DeviceAuthURL") + } + + req, err := http.NewRequest("POST", c.Endpoint.DeviceAuthURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + req.Header.Set("Accept", "application/json") + + t := time.Now() + r, err := internal.ContextClient(ctx).Do(req) + if err != nil { + return nil, err + } + + body, err := io.ReadAll(io.LimitReader(r.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot auth device: %v", err) + } + if code := r.StatusCode; code < 200 || code > 299 { + return nil, &RetrieveError{ + Response: r, + Body: body, + } + } + + da := &DeviceAuthResponse{} + err = json.Unmarshal(body, &da) + if err != nil { + return nil, fmt.Errorf("unmarshal %s", err) + } + + if !da.Expiry.IsZero() { + // Make a small adjustment to account for time taken by the request + da.Expiry = da.Expiry.Add(-time.Since(t)) + } + + return da, nil +} + +// DeviceAccessToken polls the server to exchange a device code for a token. +func (c *Config) DeviceAccessToken(ctx context.Context, da *DeviceAuthResponse, opts ...AuthCodeOption) (*Token, error) { + if !da.Expiry.IsZero() { + var cancel context.CancelFunc + ctx, cancel = context.WithDeadline(ctx, da.Expiry) + defer cancel() + } + + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.4 + v := url.Values{ + "client_id": {c.ClientID}, + "grant_type": {"urn:ietf:params:oauth:grant-type:device_code"}, + "device_code": {da.DeviceCode}, + } + if len(c.Scopes) > 0 { + v.Set("scope", strings.Join(c.Scopes, " ")) + } + for _, opt := range opts { + opt.setValue(v) + } + + // "If no value is provided, clients MUST use 5 as the default." + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.2 + interval := da.Interval + if interval == 0 { + interval = 5 + } + + ticker := time.NewTicker(time.Duration(interval) * time.Second) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-ticker.C: + tok, err := retrieveToken(ctx, c, v) + if err == nil { + return tok, nil + } + + e, ok := err.(*RetrieveError) + if !ok { + return nil, err + } + switch e.ErrorCode { + case errSlowDown: + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.5 + // "the interval MUST be increased by 5 seconds for this and all subsequent requests" + interval += 5 + ticker.Reset(time.Duration(interval) * time.Second) + case errAuthorizationPending: + // Do nothing. + case errAccessDenied, errExpiredToken: + fallthrough + default: + return tok, err + } + } + } +} diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index cc7c98c25..90a2c3d6d 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -75,8 +75,9 @@ type TokenSource interface { // Endpoint represents an OAuth 2.0 provider's authorization and token // endpoint URLs. type Endpoint struct { - AuthURL string - TokenURL string + AuthURL string + DeviceAuthURL string + TokenURL string // AuthStyle optionally specifies how the endpoint wants the // client ID & client secret sent. The zero value means to @@ -143,15 +144,19 @@ func SetAuthURLParam(key, value string) AuthCodeOption { // AuthCodeURL returns a URL to OAuth 2.0 provider's consent page // that asks for permissions for the required scopes explicitly. // -// State is a token to protect the user from CSRF attacks. You must -// always provide a non-empty string and validate that it matches the -// state query parameter on your redirect callback. -// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info. +// State is an opaque value used by the client to maintain state between the +// request and callback. The authorization server includes this value when +// redirecting the user agent back to the client. // // Opts may include AccessTypeOnline or AccessTypeOffline, as well // as ApprovalForce. -// It can also be used to pass the PKCE challenge. -// See https://www.oauth.com/oauth2-servers/pkce/ for more info. +// +// To protect against CSRF attacks, opts should include a PKCE challenge +// (S256ChallengeOption). Not all servers support PKCE. An alternative is to +// generate a random state parameter and verify it after exchange. +// See https://datatracker.ietf.org/doc/html/rfc6749#section-10.12 (predating +// PKCE), https://www.oauth.com/oauth2-servers/pkce/ and +// https://www.ietf.org/archive/id/draft-ietf-oauth-v2-1-09.html#name-cross-site-request-forgery (describing both approaches) func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { var buf bytes.Buffer buf.WriteString(c.Endpoint.AuthURL) @@ -166,7 +171,6 @@ func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { v.Set("scope", strings.Join(c.Scopes, " ")) } if state != "" { - // TODO(light): Docs say never to omit state; don't allow empty. v.Set("state", state) } for _, opt := range opts { @@ -211,10 +215,11 @@ func (c *Config) PasswordCredentialsToken(ctx context.Context, username, passwor // The provided context optionally controls which HTTP client is used. See the HTTPClient variable. // // The code will be in the *http.Request.FormValue("code"). Before -// calling Exchange, be sure to validate FormValue("state"). +// calling Exchange, be sure to validate FormValue("state") if you are +// using it to protect against CSRF attacks. // -// Opts may include the PKCE verifier code if previously used in AuthCodeURL. -// See https://www.oauth.com/oauth2-servers/pkce/ for more info. +// If using PKCE to protect against CSRF attacks, opts should include a +// VerifierOption. func (c *Config) Exchange(ctx context.Context, code string, opts ...AuthCodeOption) (*Token, error) { v := url.Values{ "grant_type": {"authorization_code"}, diff --git a/vendor/golang.org/x/oauth2/pkce.go b/vendor/golang.org/x/oauth2/pkce.go new file mode 100644 index 000000000..50593b6df --- /dev/null +++ b/vendor/golang.org/x/oauth2/pkce.go @@ -0,0 +1,68 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package oauth2 + +import ( + "crypto/rand" + "crypto/sha256" + "encoding/base64" + "net/url" +) + +const ( + codeChallengeKey = "code_challenge" + codeChallengeMethodKey = "code_challenge_method" + codeVerifierKey = "code_verifier" +) + +// GenerateVerifier generates a PKCE code verifier with 32 octets of randomness. +// This follows recommendations in RFC 7636. +// +// A fresh verifier should be generated for each authorization. +// S256ChallengeOption(verifier) should then be passed to Config.AuthCodeURL +// (or Config.DeviceAccess) and VerifierOption(verifier) to Config.Exchange +// (or Config.DeviceAccessToken). +func GenerateVerifier() string { + // "RECOMMENDED that the output of a suitable random number generator be + // used to create a 32-octet sequence. The octet sequence is then + // base64url-encoded to produce a 43-octet URL-safe string to use as the + // code verifier." + // https://datatracker.ietf.org/doc/html/rfc7636#section-4.1 + data := make([]byte, 32) + if _, err := rand.Read(data); err != nil { + panic(err) + } + return base64.RawURLEncoding.EncodeToString(data) +} + +// VerifierOption returns a PKCE code verifier AuthCodeOption. It should be +// passed to Config.Exchange or Config.DeviceAccessToken only. +func VerifierOption(verifier string) AuthCodeOption { + return setParam{k: codeVerifierKey, v: verifier} +} + +// S256ChallengeFromVerifier returns a PKCE code challenge derived from verifier with method S256. +// +// Prefer to use S256ChallengeOption where possible. +func S256ChallengeFromVerifier(verifier string) string { + sha := sha256.Sum256([]byte(verifier)) + return base64.RawURLEncoding.EncodeToString(sha[:]) +} + +// S256ChallengeOption derives a PKCE code challenge derived from verifier with +// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAccess +// only. +func S256ChallengeOption(verifier string) AuthCodeOption { + return challengeOption{ + challenge_method: "S256", + challenge: S256ChallengeFromVerifier(verifier), + } +} + +type challengeOption struct{ challenge_method, challenge string } + +func (p challengeOption) setValue(m url.Values) { + m.Set(codeChallengeMethodKey, p.challenge_method) + m.Set(codeChallengeKey, p.challenge) +} diff --git a/vendor/golang.org/x/sync/semaphore/semaphore.go b/vendor/golang.org/x/sync/semaphore/semaphore.go new file mode 100644 index 000000000..30f632c57 --- /dev/null +++ b/vendor/golang.org/x/sync/semaphore/semaphore.go @@ -0,0 +1,136 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package semaphore provides a weighted semaphore implementation. +package semaphore // import "golang.org/x/sync/semaphore" + +import ( + "container/list" + "context" + "sync" +) + +type waiter struct { + n int64 + ready chan<- struct{} // Closed when semaphore acquired. +} + +// NewWeighted creates a new weighted semaphore with the given +// maximum combined weight for concurrent access. +func NewWeighted(n int64) *Weighted { + w := &Weighted{size: n} + return w +} + +// Weighted provides a way to bound concurrent access to a resource. +// The callers can request access with a given weight. +type Weighted struct { + size int64 + cur int64 + mu sync.Mutex + waiters list.List +} + +// Acquire acquires the semaphore with a weight of n, blocking until resources +// are available or ctx is done. On success, returns nil. On failure, returns +// ctx.Err() and leaves the semaphore unchanged. +// +// If ctx is already done, Acquire may still succeed without blocking. +func (s *Weighted) Acquire(ctx context.Context, n int64) error { + s.mu.Lock() + if s.size-s.cur >= n && s.waiters.Len() == 0 { + s.cur += n + s.mu.Unlock() + return nil + } + + if n > s.size { + // Don't make other Acquire calls block on one that's doomed to fail. + s.mu.Unlock() + <-ctx.Done() + return ctx.Err() + } + + ready := make(chan struct{}) + w := waiter{n: n, ready: ready} + elem := s.waiters.PushBack(w) + s.mu.Unlock() + + select { + case <-ctx.Done(): + err := ctx.Err() + s.mu.Lock() + select { + case <-ready: + // Acquired the semaphore after we were canceled. Rather than trying to + // fix up the queue, just pretend we didn't notice the cancelation. + err = nil + default: + isFront := s.waiters.Front() == elem + s.waiters.Remove(elem) + // If we're at the front and there're extra tokens left, notify other waiters. + if isFront && s.size > s.cur { + s.notifyWaiters() + } + } + s.mu.Unlock() + return err + + case <-ready: + return nil + } +} + +// TryAcquire acquires the semaphore with a weight of n without blocking. +// On success, returns true. On failure, returns false and leaves the semaphore unchanged. +func (s *Weighted) TryAcquire(n int64) bool { + s.mu.Lock() + success := s.size-s.cur >= n && s.waiters.Len() == 0 + if success { + s.cur += n + } + s.mu.Unlock() + return success +} + +// Release releases the semaphore with a weight of n. +func (s *Weighted) Release(n int64) { + s.mu.Lock() + s.cur -= n + if s.cur < 0 { + s.mu.Unlock() + panic("semaphore: released more than held") + } + s.notifyWaiters() + s.mu.Unlock() +} + +func (s *Weighted) notifyWaiters() { + for { + next := s.waiters.Front() + if next == nil { + break // No more waiters blocked. + } + + w := next.Value.(waiter) + if s.size-s.cur < w.n { + // Not enough tokens for the next waiter. We could keep going (to try to + // find a waiter with a smaller request), but under load that could cause + // starvation for large requests; instead, we leave all remaining waiters + // blocked. + // + // Consider a semaphore used as a read-write lock, with N tokens, N + // readers, and one writer. Each reader can Acquire(1) to obtain a read + // lock. The writer can Acquire(N) to obtain a write lock, excluding all + // of the readers. If we allow the readers to jump ahead in the queue, + // the writer will starve — there is always one token available for every + // reader. + break + } + + s.cur += w.n + s.waiters.Remove(next) + close(w.ready) + } +} diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index c6492020e..fdcaa974d 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -584,7 +584,7 @@ ccflags="$@" $2 ~ /^KEY_(SPEC|REQKEY_DEFL)_/ || $2 ~ /^KEYCTL_/ || $2 ~ /^PERF_/ || - $2 ~ /^SECCOMP_MODE_/ || + $2 ~ /^SECCOMP_/ || $2 ~ /^SEEK_/ || $2 ~ /^SCHED_/ || $2 ~ /^SPLICE_/ || diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index a5d3ff8df..36bf8399f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -1785,6 +1785,8 @@ const ( LANDLOCK_ACCESS_FS_REMOVE_FILE = 0x20 LANDLOCK_ACCESS_FS_TRUNCATE = 0x4000 LANDLOCK_ACCESS_FS_WRITE_FILE = 0x2 + LANDLOCK_ACCESS_NET_BIND_TCP = 0x1 + LANDLOCK_ACCESS_NET_CONNECT_TCP = 0x2 LANDLOCK_CREATE_RULESET_VERSION = 0x1 LINUX_REBOOT_CMD_CAD_OFF = 0x0 LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef @@ -2465,6 +2467,7 @@ const ( PR_MCE_KILL_GET = 0x22 PR_MCE_KILL_LATE = 0x0 PR_MCE_KILL_SET = 0x1 + PR_MDWE_NO_INHERIT = 0x2 PR_MDWE_REFUSE_EXEC_GAIN = 0x1 PR_MPX_DISABLE_MANAGEMENT = 0x2c PR_MPX_ENABLE_MANAGEMENT = 0x2b @@ -2669,8 +2672,9 @@ const ( RTAX_FEATURES = 0xc RTAX_FEATURE_ALLFRAG = 0x8 RTAX_FEATURE_ECN = 0x1 - RTAX_FEATURE_MASK = 0xf + RTAX_FEATURE_MASK = 0x1f RTAX_FEATURE_SACK = 0x2 + RTAX_FEATURE_TCP_USEC_TS = 0x10 RTAX_FEATURE_TIMESTAMP = 0x4 RTAX_HOPLIMIT = 0xa RTAX_INITCWND = 0xb @@ -2913,9 +2917,38 @@ const ( SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x1d SC_LOG_FLUSH = 0x100000 + SECCOMP_ADDFD_FLAG_SEND = 0x2 + SECCOMP_ADDFD_FLAG_SETFD = 0x1 + SECCOMP_FILTER_FLAG_LOG = 0x2 + SECCOMP_FILTER_FLAG_NEW_LISTENER = 0x8 + SECCOMP_FILTER_FLAG_SPEC_ALLOW = 0x4 + SECCOMP_FILTER_FLAG_TSYNC = 0x1 + SECCOMP_FILTER_FLAG_TSYNC_ESRCH = 0x10 + SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV = 0x20 + SECCOMP_GET_ACTION_AVAIL = 0x2 + SECCOMP_GET_NOTIF_SIZES = 0x3 + SECCOMP_IOCTL_NOTIF_RECV = 0xc0502100 + SECCOMP_IOCTL_NOTIF_SEND = 0xc0182101 + SECCOMP_IOC_MAGIC = '!' SECCOMP_MODE_DISABLED = 0x0 SECCOMP_MODE_FILTER = 0x2 SECCOMP_MODE_STRICT = 0x1 + SECCOMP_RET_ACTION = 0x7fff0000 + SECCOMP_RET_ACTION_FULL = 0xffff0000 + SECCOMP_RET_ALLOW = 0x7fff0000 + SECCOMP_RET_DATA = 0xffff + SECCOMP_RET_ERRNO = 0x50000 + SECCOMP_RET_KILL = 0x0 + SECCOMP_RET_KILL_PROCESS = 0x80000000 + SECCOMP_RET_KILL_THREAD = 0x0 + SECCOMP_RET_LOG = 0x7ffc0000 + SECCOMP_RET_TRACE = 0x7ff00000 + SECCOMP_RET_TRAP = 0x30000 + SECCOMP_RET_USER_NOTIF = 0x7fc00000 + SECCOMP_SET_MODE_FILTER = 0x1 + SECCOMP_SET_MODE_STRICT = 0x0 + SECCOMP_USER_NOTIF_FD_SYNC_WAKE_UP = 0x1 + SECCOMP_USER_NOTIF_FLAG_CONTINUE = 0x1 SECRETMEM_MAGIC = 0x5345434d SECURITYFS_MAGIC = 0x73636673 SEEK_CUR = 0x1 @@ -3075,6 +3108,7 @@ const ( SOL_TIPC = 0x10f SOL_TLS = 0x11a SOL_UDP = 0x11 + SOL_VSOCK = 0x11f SOL_X25 = 0x106 SOL_XDP = 0x11b SOMAXCONN = 0x1000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 4920821cf..42ff8c3c1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index a0c1e4112..dca436004 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -282,6 +282,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index c63985560..5cca668ac 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -288,6 +288,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 47cc62e25..d8cae6d15 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -278,6 +278,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 27ac4a09e..28e39afdc 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -275,6 +275,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 54694642a..cd66e92cb 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 3adb81d75..c1595eba7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 2dfe98f0d..ee9456b0d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index f5398f84f..8cfca81e1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index c54f152d6..60b0deb3a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -336,6 +336,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 76057dc72..f90aa7281 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -340,6 +340,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index e0c3725e2..ba9e01503 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -340,6 +340,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 18f2813ed..07cdfd6e9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -272,6 +272,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 11619d4ec..2f1dd214a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -344,6 +344,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 396d994da..f40519d90 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -335,6 +335,9 @@ const ( SCM_TIMESTAMPNS = 0x21 SCM_TXTIME = 0x3f SCM_WIFI_STATUS = 0x25 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x400000 SFD_NONBLOCK = 0x4000 SF_FP = 0x38 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index fcf3ecbdd..0cc3ce496 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -448,4 +448,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index f56dc2504..856d92d69 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -371,4 +371,7 @@ const ( SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 974bf2467..8d467094c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -412,4 +412,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 39a2739e2..edc173244 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -315,4 +315,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index cf9c9d77e..445eba206 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -309,4 +309,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 10b7362ef..adba01bca 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -432,4 +432,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 4450 SYS_CACHESTAT = 4451 SYS_FCHMODAT2 = 4452 + SYS_MAP_SHADOW_STACK = 4453 + SYS_FUTEX_WAKE = 4454 + SYS_FUTEX_WAIT = 4455 + SYS_FUTEX_REQUEUE = 4456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index cd4d8b4fd..014c4e9c7 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -362,4 +362,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 5450 SYS_CACHESTAT = 5451 SYS_FCHMODAT2 = 5452 + SYS_MAP_SHADOW_STACK = 5453 + SYS_FUTEX_WAKE = 5454 + SYS_FUTEX_WAIT = 5455 + SYS_FUTEX_REQUEUE = 5456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 2c0efca81..ccc97d74d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -362,4 +362,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 5450 SYS_CACHESTAT = 5451 SYS_FCHMODAT2 = 5452 + SYS_MAP_SHADOW_STACK = 5453 + SYS_FUTEX_WAKE = 5454 + SYS_FUTEX_WAIT = 5455 + SYS_FUTEX_REQUEUE = 5456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index a72e31d39..ec2b64a95 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -432,4 +432,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 4450 SYS_CACHESTAT = 4451 SYS_FCHMODAT2 = 4452 + SYS_MAP_SHADOW_STACK = 4453 + SYS_FUTEX_WAKE = 4454 + SYS_FUTEX_WAIT = 4455 + SYS_FUTEX_REQUEUE = 4456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index c7d1e3747..21a839e33 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -439,4 +439,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index f4d4838c8..c11121ec3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -411,4 +411,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index b64f0e591..909b631fc 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -411,4 +411,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 95711195a..e49bed16e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -316,4 +316,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index f94e943bc..66017d2d3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -377,4 +377,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index ba0c2bc51..47bab18dc 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -390,4 +390,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index bbf8399ff..dc0c955ee 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -174,7 +174,8 @@ type FscryptPolicyV2 struct { Contents_encryption_mode uint8 Filenames_encryption_mode uint8 Flags uint8 - _ [4]uint8 + Log2_data_unit_size uint8 + _ [3]uint8 Master_key_identifier [16]uint8 } @@ -455,60 +456,63 @@ type Ucred struct { } type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 - Pacing_rate uint64 - Max_pacing_rate uint64 - Bytes_acked uint64 - Bytes_received uint64 - Segs_out uint32 - Segs_in uint32 - Notsent_bytes uint32 - Min_rtt uint32 - Data_segs_in uint32 - Data_segs_out uint32 - Delivery_rate uint64 - Busy_time uint64 - Rwnd_limited uint64 - Sndbuf_limited uint64 - Delivered uint32 - Delivered_ce uint32 - Bytes_sent uint64 - Bytes_retrans uint64 - Dsack_dups uint32 - Reord_seen uint32 - Rcv_ooopack uint32 - Snd_wnd uint32 - Rcv_wnd uint32 - Rehash uint32 + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 + Pacing_rate uint64 + Max_pacing_rate uint64 + Bytes_acked uint64 + Bytes_received uint64 + Segs_out uint32 + Segs_in uint32 + Notsent_bytes uint32 + Min_rtt uint32 + Data_segs_in uint32 + Data_segs_out uint32 + Delivery_rate uint64 + Busy_time uint64 + Rwnd_limited uint64 + Sndbuf_limited uint64 + Delivered uint32 + Delivered_ce uint32 + Bytes_sent uint64 + Bytes_retrans uint64 + Dsack_dups uint32 + Reord_seen uint32 + Rcv_ooopack uint32 + Snd_wnd uint32 + Rcv_wnd uint32 + Rehash uint32 + Total_rto uint16 + Total_rto_recoveries uint16 + Total_rto_time uint32 } type CanFilter struct { @@ -551,7 +555,7 @@ const ( SizeofIPv6MTUInfo = 0x20 SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc - SizeofTCPInfo = 0xf0 + SizeofTCPInfo = 0xf8 SizeofCanFilter = 0x8 SizeofTCPRepairOpt = 0x8 ) @@ -3399,7 +3403,7 @@ const ( DEVLINK_PORT_FN_ATTR_STATE = 0x2 DEVLINK_PORT_FN_ATTR_OPSTATE = 0x3 DEVLINK_PORT_FN_ATTR_CAPS = 0x4 - DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x4 + DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x5 ) type FsverityDigest struct { @@ -4183,7 +4187,8 @@ const ( ) type LandlockRulesetAttr struct { - Access_fs uint64 + Access_fs uint64 + Access_net uint64 } type LandlockPathBeneathAttr struct { @@ -5134,7 +5139,7 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x1b + NL80211_FREQUENCY_ATTR_MAX = 0x1c NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc @@ -5547,7 +5552,7 @@ const ( NL80211_REGDOM_TYPE_CUSTOM_WORLD = 0x2 NL80211_REGDOM_TYPE_INTERSECTION = 0x3 NL80211_REGDOM_TYPE_WORLD = 0x1 - NL80211_REG_RULE_ATTR_MAX = 0x7 + NL80211_REG_RULE_ATTR_MAX = 0x8 NL80211_REKEY_DATA_AKM = 0x4 NL80211_REKEY_DATA_KCK = 0x2 NL80211_REKEY_DATA_KEK = 0x1 diff --git a/vendor/golang.org/x/sys/windows/env_windows.go b/vendor/golang.org/x/sys/windows/env_windows.go index b8ad19250..d4577a423 100644 --- a/vendor/golang.org/x/sys/windows/env_windows.go +++ b/vendor/golang.org/x/sys/windows/env_windows.go @@ -37,14 +37,17 @@ func (token Token) Environ(inheritExisting bool) (env []string, err error) { return nil, err } defer DestroyEnvironmentBlock(block) - blockp := unsafe.Pointer(block) - for { - entry := UTF16PtrToString((*uint16)(blockp)) - if len(entry) == 0 { - break + size := unsafe.Sizeof(*block) + for *block != 0 { + // find NUL terminator + end := unsafe.Pointer(block) + for *(*uint16)(end) != 0 { + end = unsafe.Add(end, size) } - env = append(env, entry) - blockp = unsafe.Add(blockp, 2*(len(entry)+1)) + + entry := unsafe.Slice(block, (uintptr(end)-uintptr(unsafe.Pointer(block)))/size) + env = append(env, UTF16ToString(entry)) + block = (*uint16)(unsafe.Add(end, size)) } return env, nil } diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index ffb8708cc..6395a031d 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -125,8 +125,7 @@ func UTF16PtrToString(p *uint16) string { for ptr := unsafe.Pointer(p); *(*uint16)(ptr) != 0; n++ { ptr = unsafe.Pointer(uintptr(ptr) + unsafe.Sizeof(*p)) } - - return string(utf16.Decode(unsafe.Slice(p, n))) + return UTF16ToString(unsafe.Slice(p, n)) } func Getpagesize() int { return 4096 } diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go index 721053c20..0569f5dd4 100644 --- a/vendor/google.golang.org/appengine/internal/api.go +++ b/vendor/google.golang.org/appengine/internal/api.go @@ -2,12 +2,14 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build !appengine // +build !appengine package internal import ( "bytes" + "context" "errors" "fmt" "io/ioutil" @@ -24,7 +26,6 @@ import ( "time" "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" basepb "google.golang.org/appengine/internal/base" logpb "google.golang.org/appengine/internal/log" @@ -32,8 +33,7 @@ import ( ) const ( - apiPath = "/rpc_http" - defaultTicketSuffix = "/default.20150612t184001.0" + apiPath = "/rpc_http" ) var ( @@ -65,21 +65,22 @@ var ( IdleConnTimeout: 90 * time.Second, }, } - - defaultTicketOnce sync.Once - defaultTicket string - backgroundContextOnce sync.Once - backgroundContext netcontext.Context ) -func apiURL() *url.URL { +func apiURL(ctx context.Context) *url.URL { host, port := "appengine.googleapis.internal", "10001" if h := os.Getenv("API_HOST"); h != "" { host = h } + if hostOverride := ctx.Value(apiHostOverrideKey); hostOverride != nil { + host = hostOverride.(string) + } if p := os.Getenv("API_PORT"); p != "" { port = p } + if portOverride := ctx.Value(apiPortOverrideKey); portOverride != nil { + port = portOverride.(string) + } return &url.URL{ Scheme: "http", Host: host + ":" + port, @@ -87,82 +88,97 @@ func apiURL() *url.URL { } } -func handleHTTP(w http.ResponseWriter, r *http.Request) { - c := &context{ - req: r, - outHeader: w.Header(), - apiURL: apiURL(), - } - r = r.WithContext(withContext(r.Context(), c)) - c.req = r - - stopFlushing := make(chan int) +// Middleware wraps an http handler so that it can make GAE API calls +func Middleware(next http.Handler) http.Handler { + return handleHTTPMiddleware(executeRequestSafelyMiddleware(next)) +} - // Patch up RemoteAddr so it looks reasonable. - if addr := r.Header.Get(userIPHeader); addr != "" { - r.RemoteAddr = addr - } else if addr = r.Header.Get(remoteAddrHeader); addr != "" { - r.RemoteAddr = addr - } else { - // Should not normally reach here, but pick a sensible default anyway. - r.RemoteAddr = "127.0.0.1" - } - // The address in the headers will most likely be of these forms: - // 123.123.123.123 - // 2001:db8::1 - // net/http.Request.RemoteAddr is specified to be in "IP:port" form. - if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil { - // Assume the remote address is only a host; add a default port. - r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80") - } +func handleHTTPMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + c := &aeContext{ + req: r, + outHeader: w.Header(), + } + r = r.WithContext(withContext(r.Context(), c)) + c.req = r + + stopFlushing := make(chan int) + + // Patch up RemoteAddr so it looks reasonable. + if addr := r.Header.Get(userIPHeader); addr != "" { + r.RemoteAddr = addr + } else if addr = r.Header.Get(remoteAddrHeader); addr != "" { + r.RemoteAddr = addr + } else { + // Should not normally reach here, but pick a sensible default anyway. + r.RemoteAddr = "127.0.0.1" + } + // The address in the headers will most likely be of these forms: + // 123.123.123.123 + // 2001:db8::1 + // net/http.Request.RemoteAddr is specified to be in "IP:port" form. + if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil { + // Assume the remote address is only a host; add a default port. + r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80") + } - // Start goroutine responsible for flushing app logs. - // This is done after adding c to ctx.m (and stopped before removing it) - // because flushing logs requires making an API call. - go c.logFlusher(stopFlushing) + if logToLogservice() { + // Start goroutine responsible for flushing app logs. + // This is done after adding c to ctx.m (and stopped before removing it) + // because flushing logs requires making an API call. + go c.logFlusher(stopFlushing) + } - executeRequestSafely(c, r) - c.outHeader = nil // make sure header changes aren't respected any more + next.ServeHTTP(c, r) + c.outHeader = nil // make sure header changes aren't respected any more - stopFlushing <- 1 // any logging beyond this point will be dropped + flushed := make(chan struct{}) + if logToLogservice() { + stopFlushing <- 1 // any logging beyond this point will be dropped - // Flush any pending logs asynchronously. - c.pendingLogs.Lock() - flushes := c.pendingLogs.flushes - if len(c.pendingLogs.lines) > 0 { - flushes++ - } - c.pendingLogs.Unlock() - flushed := make(chan struct{}) - go func() { - defer close(flushed) - // Force a log flush, because with very short requests we - // may not ever flush logs. - c.flushLog(true) - }() - w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) + // Flush any pending logs asynchronously. + c.pendingLogs.Lock() + flushes := c.pendingLogs.flushes + if len(c.pendingLogs.lines) > 0 { + flushes++ + } + c.pendingLogs.Unlock() + go func() { + defer close(flushed) + // Force a log flush, because with very short requests we + // may not ever flush logs. + c.flushLog(true) + }() + w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) + } - // Avoid nil Write call if c.Write is never called. - if c.outCode != 0 { - w.WriteHeader(c.outCode) - } - if c.outBody != nil { - w.Write(c.outBody) - } - // Wait for the last flush to complete before returning, - // otherwise the security ticket will not be valid. - <-flushed + // Avoid nil Write call if c.Write is never called. + if c.outCode != 0 { + w.WriteHeader(c.outCode) + } + if c.outBody != nil { + w.Write(c.outBody) + } + if logToLogservice() { + // Wait for the last flush to complete before returning, + // otherwise the security ticket will not be valid. + <-flushed + } + }) } -func executeRequestSafely(c *context, r *http.Request) { - defer func() { - if x := recover(); x != nil { - logf(c, 4, "%s", renderPanic(x)) // 4 == critical - c.outCode = 500 - } - }() +func executeRequestSafelyMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer func() { + if x := recover(); x != nil { + c := w.(*aeContext) + logf(c, 4, "%s", renderPanic(x)) // 4 == critical + c.outCode = 500 + } + }() - http.DefaultServeMux.ServeHTTP(c, r) + next.ServeHTTP(w, r) + }) } func renderPanic(x interface{}) string { @@ -204,9 +220,9 @@ func renderPanic(x interface{}) string { return string(buf) } -// context represents the context of an in-flight HTTP request. +// aeContext represents the aeContext of an in-flight HTTP request. // It implements the appengine.Context and http.ResponseWriter interfaces. -type context struct { +type aeContext struct { req *http.Request outCode int @@ -218,8 +234,6 @@ type context struct { lines []*logpb.UserAppLogLine flushes int } - - apiURL *url.URL } var contextKey = "holds a *context" @@ -227,8 +241,8 @@ var contextKey = "holds a *context" // jointContext joins two contexts in a superficial way. // It takes values and timeouts from a base context, and only values from another context. type jointContext struct { - base netcontext.Context - valuesOnly netcontext.Context + base context.Context + valuesOnly context.Context } func (c jointContext) Deadline() (time.Time, bool) { @@ -252,94 +266,54 @@ func (c jointContext) Value(key interface{}) interface{} { // fromContext returns the App Engine context or nil if ctx is not // derived from an App Engine context. -func fromContext(ctx netcontext.Context) *context { - c, _ := ctx.Value(&contextKey).(*context) +func fromContext(ctx context.Context) *aeContext { + c, _ := ctx.Value(&contextKey).(*aeContext) return c } -func withContext(parent netcontext.Context, c *context) netcontext.Context { - ctx := netcontext.WithValue(parent, &contextKey, c) +func withContext(parent context.Context, c *aeContext) context.Context { + ctx := context.WithValue(parent, &contextKey, c) if ns := c.req.Header.Get(curNamespaceHeader); ns != "" { ctx = withNamespace(ctx, ns) } return ctx } -func toContext(c *context) netcontext.Context { - return withContext(netcontext.Background(), c) +func toContext(c *aeContext) context.Context { + return withContext(context.Background(), c) } -func IncomingHeaders(ctx netcontext.Context) http.Header { +func IncomingHeaders(ctx context.Context) http.Header { if c := fromContext(ctx); c != nil { return c.req.Header } return nil } -func ReqContext(req *http.Request) netcontext.Context { +func ReqContext(req *http.Request) context.Context { return req.Context() } -func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { +func WithContext(parent context.Context, req *http.Request) context.Context { return jointContext{ base: parent, valuesOnly: req.Context(), } } -// DefaultTicket returns a ticket used for background context or dev_appserver. -func DefaultTicket() string { - defaultTicketOnce.Do(func() { - if IsDevAppServer() { - defaultTicket = "testapp" + defaultTicketSuffix - return - } - appID := partitionlessAppID() - escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1) - majVersion := VersionID(nil) - if i := strings.Index(majVersion, "."); i > 0 { - majVersion = majVersion[:i] - } - defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID()) - }) - return defaultTicket -} - -func BackgroundContext() netcontext.Context { - backgroundContextOnce.Do(func() { - // Compute background security ticket. - ticket := DefaultTicket() - - c := &context{ - req: &http.Request{ - Header: http.Header{ - ticketHeader: []string{ticket}, - }, - }, - apiURL: apiURL(), - } - backgroundContext = toContext(c) - - // TODO(dsymonds): Wire up the shutdown handler to do a final flush. - go c.logFlusher(make(chan int)) - }) - - return backgroundContext -} - // RegisterTestRequest registers the HTTP request req for testing, such that -// any API calls are sent to the provided URL. It returns a closure to delete -// the registration. +// any API calls are sent to the provided URL. // It should only be used by aetest package. -func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) (*http.Request, func()) { - c := &context{ - req: req, - apiURL: apiURL, - } - ctx := withContext(decorate(req.Context()), c) - req = req.WithContext(ctx) - c.req = req - return req, func() {} +func RegisterTestRequest(req *http.Request, apiURL *url.URL, appID string) *http.Request { + ctx := req.Context() + ctx = withAPIHostOverride(ctx, apiURL.Hostname()) + ctx = withAPIPortOverride(ctx, apiURL.Port()) + ctx = WithAppIDOverride(ctx, appID) + + // use the unregistered request as a placeholder so that withContext can read the headers + c := &aeContext{req: req} + c.req = req.WithContext(withContext(ctx, c)) + return c.req } var errTimeout = &CallError{ @@ -348,7 +322,7 @@ var errTimeout = &CallError{ Timeout: true, } -func (c *context) Header() http.Header { return c.outHeader } +func (c *aeContext) Header() http.Header { return c.outHeader } // Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status // codes do not permit a response body (nor response entity headers such as @@ -365,7 +339,7 @@ func bodyAllowedForStatus(status int) bool { return true } -func (c *context) Write(b []byte) (int, error) { +func (c *aeContext) Write(b []byte) (int, error) { if c.outCode == 0 { c.WriteHeader(http.StatusOK) } @@ -376,7 +350,7 @@ func (c *context) Write(b []byte) (int, error) { return len(b), nil } -func (c *context) WriteHeader(code int) { +func (c *aeContext) WriteHeader(code int) { if c.outCode != 0 { logf(c, 3, "WriteHeader called multiple times on request.") // error level return @@ -384,10 +358,11 @@ func (c *context) WriteHeader(code int) { c.outCode = code } -func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) { +func post(ctx context.Context, body []byte, timeout time.Duration) (b []byte, err error) { + apiURL := apiURL(ctx) hreq := &http.Request{ Method: "POST", - URL: c.apiURL, + URL: apiURL, Header: http.Header{ apiEndpointHeader: apiEndpointHeaderValue, apiMethodHeader: apiMethodHeaderValue, @@ -396,13 +371,16 @@ func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) }, Body: ioutil.NopCloser(bytes.NewReader(body)), ContentLength: int64(len(body)), - Host: c.apiURL.Host, - } - if info := c.req.Header.Get(dapperHeader); info != "" { - hreq.Header.Set(dapperHeader, info) + Host: apiURL.Host, } - if info := c.req.Header.Get(traceHeader); info != "" { - hreq.Header.Set(traceHeader, info) + c := fromContext(ctx) + if c != nil { + if info := c.req.Header.Get(dapperHeader); info != "" { + hreq.Header.Set(dapperHeader, info) + } + if info := c.req.Header.Get(traceHeader); info != "" { + hreq.Header.Set(traceHeader, info) + } } tr := apiHTTPClient.Transport.(*http.Transport) @@ -444,7 +422,7 @@ func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) return hrespBody, nil } -func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { +func Call(ctx context.Context, service, method string, in, out proto.Message) error { if ns := NamespaceFromContext(ctx); ns != "" { if fn, ok := NamespaceMods[service]; ok { fn(in, ns) @@ -463,15 +441,11 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) } c := fromContext(ctx) - if c == nil { - // Give a good error message rather than a panic lower down. - return errNotAppEngineContext - } // Apply transaction modifications if we're in a transaction. if t := transactionFromContext(ctx); t != nil { if t.finished { - return errors.New("transaction context has expired") + return errors.New("transaction aeContext has expired") } applyTransaction(in, &t.transaction) } @@ -487,20 +461,13 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return err } - ticket := c.req.Header.Get(ticketHeader) - // Use a test ticket under test environment. - if ticket == "" { - if appid := ctx.Value(&appIDOverrideKey); appid != nil { - ticket = appid.(string) + defaultTicketSuffix + ticket := "" + if c != nil { + ticket = c.req.Header.Get(ticketHeader) + if dri := c.req.Header.Get(devRequestIdHeader); IsDevAppServer() && dri != "" { + ticket = dri } } - // Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver. - if ticket == "" { - ticket = DefaultTicket() - } - if dri := c.req.Header.Get(devRequestIdHeader); IsDevAppServer() && dri != "" { - ticket = dri - } req := &remotepb.Request{ ServiceName: &service, Method: &method, @@ -512,7 +479,7 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return err } - hrespBody, err := c.post(hreqBody, timeout) + hrespBody, err := post(ctx, hreqBody, timeout) if err != nil { return err } @@ -549,11 +516,11 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return proto.Unmarshal(res.Response, out) } -func (c *context) Request() *http.Request { +func (c *aeContext) Request() *http.Request { return c.req } -func (c *context) addLogLine(ll *logpb.UserAppLogLine) { +func (c *aeContext) addLogLine(ll *logpb.UserAppLogLine) { // Truncate long log lines. // TODO(dsymonds): Check if this is still necessary. const lim = 8 << 10 @@ -575,18 +542,20 @@ var logLevelName = map[int64]string{ 4: "CRITICAL", } -func logf(c *context, level int64, format string, args ...interface{}) { +func logf(c *aeContext, level int64, format string, args ...interface{}) { if c == nil { - panic("not an App Engine context") + panic("not an App Engine aeContext") } s := fmt.Sprintf(format, args...) s = strings.TrimRight(s, "\n") // Remove any trailing newline characters. - c.addLogLine(&logpb.UserAppLogLine{ - TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3), - Level: &level, - Message: &s, - }) - // Only duplicate log to stderr if not running on App Engine second generation + if logToLogservice() { + c.addLogLine(&logpb.UserAppLogLine{ + TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3), + Level: &level, + Message: &s, + }) + } + // Log to stdout if not deployed if !IsSecondGen() { log.Print(logLevelName[level] + ": " + s) } @@ -594,7 +563,7 @@ func logf(c *context, level int64, format string, args ...interface{}) { // flushLog attempts to flush any pending logs to the appserver. // It should not be called concurrently. -func (c *context) flushLog(force bool) (flushed bool) { +func (c *aeContext) flushLog(force bool) (flushed bool) { c.pendingLogs.Lock() // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious. n, rem := 0, 30<<20 @@ -655,7 +624,7 @@ const ( forceFlushInterval = 60 * time.Second ) -func (c *context) logFlusher(stop <-chan int) { +func (c *aeContext) logFlusher(stop <-chan int) { lastFlush := time.Now() tick := time.NewTicker(flushInterval) for { @@ -673,6 +642,12 @@ func (c *context) logFlusher(stop <-chan int) { } } -func ContextForTesting(req *http.Request) netcontext.Context { - return toContext(&context{req: req}) +func ContextForTesting(req *http.Request) context.Context { + return toContext(&aeContext{req: req}) +} + +func logToLogservice() bool { + // TODO: replace logservice with json structured logs to $LOG_DIR/app.log.json + // where $LOG_DIR is /var/log in prod and some tmpdir in dev + return os.Getenv("LOG_TO_LOGSERVICE") != "0" } diff --git a/vendor/google.golang.org/appengine/internal/api_classic.go b/vendor/google.golang.org/appengine/internal/api_classic.go index f0f40b2e3..87c33c798 100644 --- a/vendor/google.golang.org/appengine/internal/api_classic.go +++ b/vendor/google.golang.org/appengine/internal/api_classic.go @@ -2,11 +2,13 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appengine // +build appengine package internal import ( + "context" "errors" "fmt" "net/http" @@ -17,20 +19,19 @@ import ( basepb "appengine_internal/base" "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" ) var contextKey = "holds an appengine.Context" // fromContext returns the App Engine context or nil if ctx is not // derived from an App Engine context. -func fromContext(ctx netcontext.Context) appengine.Context { +func fromContext(ctx context.Context) appengine.Context { c, _ := ctx.Value(&contextKey).(appengine.Context) return c } // This is only for classic App Engine adapters. -func ClassicContextFromContext(ctx netcontext.Context) (appengine.Context, error) { +func ClassicContextFromContext(ctx context.Context) (appengine.Context, error) { c := fromContext(ctx) if c == nil { return nil, errNotAppEngineContext @@ -38,8 +39,8 @@ func ClassicContextFromContext(ctx netcontext.Context) (appengine.Context, error return c, nil } -func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context { - ctx := netcontext.WithValue(parent, &contextKey, c) +func withContext(parent context.Context, c appengine.Context) context.Context { + ctx := context.WithValue(parent, &contextKey, c) s := &basepb.StringProto{} c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil) @@ -50,7 +51,7 @@ func withContext(parent netcontext.Context, c appengine.Context) netcontext.Cont return ctx } -func IncomingHeaders(ctx netcontext.Context) http.Header { +func IncomingHeaders(ctx context.Context) http.Header { if c := fromContext(ctx); c != nil { if req, ok := c.Request().(*http.Request); ok { return req.Header @@ -59,11 +60,11 @@ func IncomingHeaders(ctx netcontext.Context) http.Header { return nil } -func ReqContext(req *http.Request) netcontext.Context { - return WithContext(netcontext.Background(), req) +func ReqContext(req *http.Request) context.Context { + return WithContext(context.Background(), req) } -func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { +func WithContext(parent context.Context, req *http.Request) context.Context { c := appengine.NewContext(req) return withContext(parent, c) } @@ -83,11 +84,11 @@ func (t *testingContext) Call(service, method string, _, _ appengine_internal.Pr } func (t *testingContext) Request() interface{} { return t.req } -func ContextForTesting(req *http.Request) netcontext.Context { - return withContext(netcontext.Background(), &testingContext{req: req}) +func ContextForTesting(req *http.Request) context.Context { + return withContext(context.Background(), &testingContext{req: req}) } -func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { +func Call(ctx context.Context, service, method string, in, out proto.Message) error { if ns := NamespaceFromContext(ctx); ns != "" { if fn, ok := NamespaceMods[service]; ok { fn(in, ns) @@ -144,8 +145,8 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return err } -func handleHTTP(w http.ResponseWriter, r *http.Request) { - panic("handleHTTP called; this should be impossible") +func Middleware(next http.Handler) http.Handler { + panic("Middleware called; this should be impossible") } func logf(c appengine.Context, level int64, format string, args ...interface{}) { diff --git a/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/google.golang.org/appengine/internal/api_common.go index e0c0b214b..5b95c13d9 100644 --- a/vendor/google.golang.org/appengine/internal/api_common.go +++ b/vendor/google.golang.org/appengine/internal/api_common.go @@ -5,20 +5,26 @@ package internal import ( + "context" "errors" "os" "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" ) +type ctxKey string + +func (c ctxKey) String() string { + return "appengine context key: " + string(c) +} + var errNotAppEngineContext = errors.New("not an App Engine context") -type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error +type CallOverrideFunc func(ctx context.Context, service, method string, in, out proto.Message) error var callOverrideKey = "holds []CallOverrideFunc" -func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context { +func WithCallOverride(ctx context.Context, f CallOverrideFunc) context.Context { // We avoid appending to any existing call override // so we don't risk overwriting a popped stack below. var cofs []CallOverrideFunc @@ -26,10 +32,10 @@ func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Con cofs = append(cofs, uf...) } cofs = append(cofs, f) - return netcontext.WithValue(ctx, &callOverrideKey, cofs) + return context.WithValue(ctx, &callOverrideKey, cofs) } -func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) { +func callOverrideFromContext(ctx context.Context) (CallOverrideFunc, context.Context, bool) { cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc) if len(cofs) == 0 { return nil, nil, false @@ -37,7 +43,7 @@ func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netconte // We found a list of overrides; grab the last, and reconstitute a // context that will hide it. f := cofs[len(cofs)-1] - ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1]) + ctx = context.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1]) return f, ctx, true } @@ -45,23 +51,35 @@ type logOverrideFunc func(level int64, format string, args ...interface{}) var logOverrideKey = "holds a logOverrideFunc" -func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context { - return netcontext.WithValue(ctx, &logOverrideKey, f) +func WithLogOverride(ctx context.Context, f logOverrideFunc) context.Context { + return context.WithValue(ctx, &logOverrideKey, f) } var appIDOverrideKey = "holds a string, being the full app ID" -func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context { - return netcontext.WithValue(ctx, &appIDOverrideKey, appID) +func WithAppIDOverride(ctx context.Context, appID string) context.Context { + return context.WithValue(ctx, &appIDOverrideKey, appID) +} + +var apiHostOverrideKey = ctxKey("holds a string, being the alternate API_HOST") + +func withAPIHostOverride(ctx context.Context, apiHost string) context.Context { + return context.WithValue(ctx, apiHostOverrideKey, apiHost) +} + +var apiPortOverrideKey = ctxKey("holds a string, being the alternate API_PORT") + +func withAPIPortOverride(ctx context.Context, apiPort string) context.Context { + return context.WithValue(ctx, apiPortOverrideKey, apiPort) } var namespaceKey = "holds the namespace string" -func withNamespace(ctx netcontext.Context, ns string) netcontext.Context { - return netcontext.WithValue(ctx, &namespaceKey, ns) +func withNamespace(ctx context.Context, ns string) context.Context { + return context.WithValue(ctx, &namespaceKey, ns) } -func NamespaceFromContext(ctx netcontext.Context) string { +func NamespaceFromContext(ctx context.Context) string { // If there's no namespace, return the empty string. ns, _ := ctx.Value(&namespaceKey).(string) return ns @@ -70,14 +88,14 @@ func NamespaceFromContext(ctx netcontext.Context) string { // FullyQualifiedAppID returns the fully-qualified application ID. // This may contain a partition prefix (e.g. "s~" for High Replication apps), // or a domain prefix (e.g. "example.com:"). -func FullyQualifiedAppID(ctx netcontext.Context) string { +func FullyQualifiedAppID(ctx context.Context) string { if id, ok := ctx.Value(&appIDOverrideKey).(string); ok { return id } return fullyQualifiedAppID(ctx) } -func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) { +func Logf(ctx context.Context, level int64, format string, args ...interface{}) { if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok { f(level, format, args...) return @@ -90,7 +108,7 @@ func Logf(ctx netcontext.Context, level int64, format string, args ...interface{ } // NamespacedContext wraps a Context to support namespaces. -func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context { +func NamespacedContext(ctx context.Context, namespace string) context.Context { return withNamespace(ctx, namespace) } diff --git a/vendor/google.golang.org/appengine/internal/identity.go b/vendor/google.golang.org/appengine/internal/identity.go index 9b4134e42..0f95aa91d 100644 --- a/vendor/google.golang.org/appengine/internal/identity.go +++ b/vendor/google.golang.org/appengine/internal/identity.go @@ -5,9 +5,8 @@ package internal import ( + "context" "os" - - netcontext "golang.org/x/net/context" ) var ( @@ -23,7 +22,7 @@ var ( // AppID is the implementation of the wrapper function of the same name in // ../identity.go. See that file for commentary. -func AppID(c netcontext.Context) string { +func AppID(c context.Context) string { return appID(FullyQualifiedAppID(c)) } @@ -35,7 +34,7 @@ func IsStandard() bool { return appengineStandard || IsSecondGen() } -// IsStandard is the implementation of the wrapper function of the same name in +// IsSecondGen is the implementation of the wrapper function of the same name in // ../appengine.go. See that file for commentary. func IsSecondGen() bool { // Second-gen runtimes set $GAE_ENV so we use that to check if we're on a second-gen runtime. diff --git a/vendor/google.golang.org/appengine/internal/identity_classic.go b/vendor/google.golang.org/appengine/internal/identity_classic.go index 4e979f45e..5ad3548bf 100644 --- a/vendor/google.golang.org/appengine/internal/identity_classic.go +++ b/vendor/google.golang.org/appengine/internal/identity_classic.go @@ -2,21 +2,22 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appengine // +build appengine package internal import ( - "appengine" + "context" - netcontext "golang.org/x/net/context" + "appengine" ) func init() { appengineStandard = true } -func DefaultVersionHostname(ctx netcontext.Context) string { +func DefaultVersionHostname(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) @@ -24,12 +25,12 @@ func DefaultVersionHostname(ctx netcontext.Context) string { return appengine.DefaultVersionHostname(c) } -func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() } -func ServerSoftware() string { return appengine.ServerSoftware() } -func InstanceID() string { return appengine.InstanceID() } -func IsDevAppServer() bool { return appengine.IsDevAppServer() } +func Datacenter(_ context.Context) string { return appengine.Datacenter() } +func ServerSoftware() string { return appengine.ServerSoftware() } +func InstanceID() string { return appengine.InstanceID() } +func IsDevAppServer() bool { return appengine.IsDevAppServer() } -func RequestID(ctx netcontext.Context) string { +func RequestID(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) @@ -37,14 +38,14 @@ func RequestID(ctx netcontext.Context) string { return appengine.RequestID(c) } -func ModuleName(ctx netcontext.Context) string { +func ModuleName(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) } return appengine.ModuleName(c) } -func VersionID(ctx netcontext.Context) string { +func VersionID(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) @@ -52,7 +53,7 @@ func VersionID(ctx netcontext.Context) string { return appengine.VersionID(c) } -func fullyQualifiedAppID(ctx netcontext.Context) string { +func fullyQualifiedAppID(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) diff --git a/vendor/google.golang.org/appengine/internal/identity_flex.go b/vendor/google.golang.org/appengine/internal/identity_flex.go index d5e2e7b5e..4201b6b58 100644 --- a/vendor/google.golang.org/appengine/internal/identity_flex.go +++ b/vendor/google.golang.org/appengine/internal/identity_flex.go @@ -2,6 +2,7 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appenginevm // +build appenginevm package internal diff --git a/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/google.golang.org/appengine/internal/identity_vm.go index 5d8067263..18ddda3a4 100644 --- a/vendor/google.golang.org/appengine/internal/identity_vm.go +++ b/vendor/google.golang.org/appengine/internal/identity_vm.go @@ -2,17 +2,17 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build !appengine // +build !appengine package internal import ( + "context" "log" "net/http" "os" "strings" - - netcontext "golang.org/x/net/context" ) // These functions are implementations of the wrapper functions @@ -24,7 +24,7 @@ const ( hDatacenter = "X-AppEngine-Datacenter" ) -func ctxHeaders(ctx netcontext.Context) http.Header { +func ctxHeaders(ctx context.Context) http.Header { c := fromContext(ctx) if c == nil { return nil @@ -32,15 +32,15 @@ func ctxHeaders(ctx netcontext.Context) http.Header { return c.Request().Header } -func DefaultVersionHostname(ctx netcontext.Context) string { +func DefaultVersionHostname(ctx context.Context) string { return ctxHeaders(ctx).Get(hDefaultVersionHostname) } -func RequestID(ctx netcontext.Context) string { +func RequestID(ctx context.Context) string { return ctxHeaders(ctx).Get(hRequestLogId) } -func Datacenter(ctx netcontext.Context) string { +func Datacenter(ctx context.Context) string { if dc := ctxHeaders(ctx).Get(hDatacenter); dc != "" { return dc } @@ -71,7 +71,7 @@ func ServerSoftware() string { // TODO(dsymonds): Remove the metadata fetches. -func ModuleName(_ netcontext.Context) string { +func ModuleName(_ context.Context) string { if s := os.Getenv("GAE_MODULE_NAME"); s != "" { return s } @@ -81,7 +81,7 @@ func ModuleName(_ netcontext.Context) string { return string(mustGetMetadata("instance/attributes/gae_backend_name")) } -func VersionID(_ netcontext.Context) string { +func VersionID(_ context.Context) string { if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" { return s1 + "." + s2 } @@ -112,7 +112,7 @@ func partitionlessAppID() string { return string(mustGetMetadata("instance/attributes/gae_project")) } -func fullyQualifiedAppID(_ netcontext.Context) string { +func fullyQualifiedAppID(_ context.Context) string { if s := os.Getenv("GAE_APPLICATION"); s != "" { return s } @@ -130,5 +130,5 @@ func fullyQualifiedAppID(_ netcontext.Context) string { } func IsDevAppServer() bool { - return os.Getenv("RUN_WITH_DEVAPPSERVER") != "" + return os.Getenv("RUN_WITH_DEVAPPSERVER") != "" || os.Getenv("GAE_ENV") == "localdev" } diff --git a/vendor/google.golang.org/appengine/internal/main.go b/vendor/google.golang.org/appengine/internal/main.go index 1e765312f..afd0ae84f 100644 --- a/vendor/google.golang.org/appengine/internal/main.go +++ b/vendor/google.golang.org/appengine/internal/main.go @@ -2,6 +2,7 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appengine // +build appengine package internal diff --git a/vendor/google.golang.org/appengine/internal/main_vm.go b/vendor/google.golang.org/appengine/internal/main_vm.go index ddb79a333..86a8caf06 100644 --- a/vendor/google.golang.org/appengine/internal/main_vm.go +++ b/vendor/google.golang.org/appengine/internal/main_vm.go @@ -2,6 +2,7 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build !appengine // +build !appengine package internal @@ -29,7 +30,7 @@ func Main() { if IsDevAppServer() { host = "127.0.0.1" } - if err := http.ListenAndServe(host+":"+port, http.HandlerFunc(handleHTTP)); err != nil { + if err := http.ListenAndServe(host+":"+port, Middleware(http.DefaultServeMux)); err != nil { log.Fatalf("http.ListenAndServe: %v", err) } } diff --git a/vendor/google.golang.org/appengine/internal/transaction.go b/vendor/google.golang.org/appengine/internal/transaction.go index 9006ae653..2ae8ab9fa 100644 --- a/vendor/google.golang.org/appengine/internal/transaction.go +++ b/vendor/google.golang.org/appengine/internal/transaction.go @@ -7,11 +7,11 @@ package internal // This file implements hooks for applying datastore transactions. import ( + "context" "errors" "reflect" "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" basepb "google.golang.org/appengine/internal/base" pb "google.golang.org/appengine/internal/datastore" @@ -38,13 +38,13 @@ func applyTransaction(pb proto.Message, t *pb.Transaction) { var transactionKey = "used for *Transaction" -func transactionFromContext(ctx netcontext.Context) *transaction { +func transactionFromContext(ctx context.Context) *transaction { t, _ := ctx.Value(&transactionKey).(*transaction) return t } -func withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context { - return netcontext.WithValue(ctx, &transactionKey, t) +func withTransaction(ctx context.Context, t *transaction) context.Context { + return context.WithValue(ctx, &transactionKey, t) } type transaction struct { @@ -54,7 +54,7 @@ type transaction struct { var ErrConcurrentTransaction = errors.New("internal: concurrent transaction") -func RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool, readOnly bool, previousTransaction *pb.Transaction) (*pb.Transaction, error) { +func RunTransactionOnce(c context.Context, f func(context.Context) error, xg bool, readOnly bool, previousTransaction *pb.Transaction) (*pb.Transaction, error) { if transactionFromContext(c) != nil { return nil, errors.New("nested transactions are not supported") } diff --git a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go index 6ffe1e6d9..6c0d72418 100644 --- a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go +++ b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go @@ -7,6 +7,7 @@ package urlfetch // import "google.golang.org/appengine/urlfetch" import ( + "context" "errors" "fmt" "io" @@ -18,7 +19,6 @@ import ( "time" "github.com/golang/protobuf/proto" - "golang.org/x/net/context" "google.golang.org/appengine/internal" pb "google.golang.org/appengine/internal/urlfetch" @@ -44,11 +44,10 @@ type Transport struct { var _ http.RoundTripper = (*Transport)(nil) // Client returns an *http.Client using a default urlfetch Transport. This -// client will have the default deadline of 5 seconds, and will check the -// validity of SSL certificates. +// client will check the validity of SSL certificates. // -// Any deadline of the provided context will be used for requests through this client; -// if the client does not have a deadline then a 5 second default is used. +// Any deadline of the provided context will be used for requests through this client. +// If the client does not have a deadline, then an App Engine default of 60 second is used. func Client(ctx context.Context) *http.Client { return &http.Client{ Transport: &Transport{ diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go index 83774fbcb..d5dccb933 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.12 +// protoc v4.24.4 // source: google/api/client.proto package annotations @@ -1033,6 +1033,18 @@ type MethodSettings struct { // total_poll_timeout: // seconds: 54000 # 90 minutes LongRunning *MethodSettings_LongRunning `protobuf:"bytes,2,opt,name=long_running,json=longRunning,proto3" json:"long_running,omitempty"` + // List of top-level fields of the request message, that should be + // automatically populated by the client libraries based on their + // (google.api.field_info).format. Currently supported format: UUID4. + // + // Example of a YAML configuration: + // + // publishing: + // method_settings: + // - selector: google.example.v1.ExampleService.CreateExample + // auto_populated_fields: + // - request_id + AutoPopulatedFields []string `protobuf:"bytes,3,rep,name=auto_populated_fields,json=autoPopulatedFields,proto3" json:"auto_populated_fields,omitempty"` } func (x *MethodSettings) Reset() { @@ -1081,6 +1093,13 @@ func (x *MethodSettings) GetLongRunning() *MethodSettings_LongRunning { return nil } +func (x *MethodSettings) GetAutoPopulatedFields() []string { + if x != nil { + return x.AutoPopulatedFields + } + return nil +} + // Describes settings to use when generating API methods that use the // long-running operation pattern. // All default values below are from those used in the client library @@ -1452,69 +1471,73 @@ var file_google_api_client_proto_rawDesc = []byte{ 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x8e, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xc2, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x49, 0x0a, 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, - 0x6e, 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x1a, - 0x94, 0x02, 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, - 0x47, 0x0a, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, - 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, - 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, - 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, - 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, - 0x61, 0x79, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, - 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x0c, 0x6d, 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, - 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, - 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, - 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, - 0x41, 0x44, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, - 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, - 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, - 0x12, 0x07, 0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, - 0x45, 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, 0x18, - 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, - 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45, - 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, - 0x4e, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, - 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a, - 0x12, 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, - 0x47, 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, - 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, - 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, - 0x74, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, - 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x42, 0x69, 0x0a, 0x0e, 0x63, - 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0b, 0x43, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, - 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, - 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x6e, 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, + 0x32, 0x0a, 0x15, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, + 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, + 0x61, 0x75, 0x74, 0x6f, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x73, 0x1a, 0x94, 0x02, 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, + 0x69, 0x6e, 0x67, 0x12, 0x47, 0x0a, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, + 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, + 0x69, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, + 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, + 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, + 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, + 0x12, 0x3f, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, + 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, + 0x79, 0x12, 0x47, 0x0a, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, + 0x6f, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, + 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, + 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, + 0x49, 0x5a, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, + 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, + 0x54, 0x4f, 0x53, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, + 0x56, 0x49, 0x45, 0x57, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, + 0x4e, 0x47, 0x10, 0x05, 0x12, 0x07, 0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, + 0x0d, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, + 0x2a, 0x67, 0x0a, 0x18, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, + 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, + 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, + 0x45, 0x53, 0x54, 0x49, 0x4e, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, + 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, + 0x55, 0x42, 0x10, 0x0a, 0x12, 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, + 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, + 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, + 0x75, 0x74, 0x68, 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x42, + 0x69, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go index dbe2e2d0c..6ce01ac9a 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.9 +// protoc v3.21.12 // source: google/api/field_behavior.proto package annotations @@ -78,6 +78,19 @@ const ( // a non-empty value will be returned. The user will not be aware of what // non-empty value to expect. FieldBehavior_NON_EMPTY_DEFAULT FieldBehavior = 7 + // Denotes that the field in a resource (a message annotated with + // google.api.resource) is used in the resource name to uniquely identify the + // resource. For AIP-compliant APIs, this should only be applied to the + // `name` field on the resource. + // + // This behavior should not be applied to references to other resources within + // the message. + // + // The identifier field of resources often have different field behavior + // depending on the request it is embedded in (e.g. for Create methods name + // is optional and unused, while for Update methods it is required). Instead + // of method-specific annotations, only `IDENTIFIER` is required. + FieldBehavior_IDENTIFIER FieldBehavior = 8 ) // Enum value maps for FieldBehavior. @@ -91,6 +104,7 @@ var ( 5: "IMMUTABLE", 6: "UNORDERED_LIST", 7: "NON_EMPTY_DEFAULT", + 8: "IDENTIFIER", } FieldBehavior_value = map[string]int32{ "FIELD_BEHAVIOR_UNSPECIFIED": 0, @@ -101,6 +115,7 @@ var ( "IMMUTABLE": 5, "UNORDERED_LIST": 6, "NON_EMPTY_DEFAULT": 7, + "IDENTIFIER": 8, } ) @@ -169,7 +184,7 @@ var file_google_api_field_behavior_proto_rawDesc = []byte{ 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2a, - 0xa6, 0x01, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, + 0xb6, 0x01, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x12, 0x1e, 0x0a, 0x1a, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x42, 0x45, 0x48, 0x41, 0x56, 0x49, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, @@ -179,7 +194,8 @@ var file_google_api_field_behavior_proto_rawDesc = []byte{ 0x0a, 0x09, 0x49, 0x4d, 0x4d, 0x55, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x05, 0x12, 0x12, 0x0a, 0x0e, 0x55, 0x4e, 0x4f, 0x52, 0x44, 0x45, 0x52, 0x45, 0x44, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11, 0x4e, 0x4f, 0x4e, 0x5f, 0x45, 0x4d, 0x50, 0x54, 0x59, 0x5f, 0x44, - 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x07, 0x3a, 0x60, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c, + 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x07, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4e, + 0x54, 0x49, 0x46, 0x49, 0x45, 0x52, 0x10, 0x08, 0x3a, 0x60, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9c, 0x08, 0x20, 0x03, 0x28, 0x0e, diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go new file mode 100644 index 000000000..d02e6bbc8 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go @@ -0,0 +1,295 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.21.12 +// source: google/api/field_info.proto + +package annotations + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The standard format of a field value. The supported formats are all backed +// by either an RFC defined by the IETF or a Google-defined AIP. +type FieldInfo_Format int32 + +const ( + // Default, unspecified value. + FieldInfo_FORMAT_UNSPECIFIED FieldInfo_Format = 0 + // Universally Unique Identifier, version 4, value as defined by + // https://datatracker.ietf.org/doc/html/rfc4122. The value may be + // normalized to entirely lowercase letters. For example, the value + // `F47AC10B-58CC-0372-8567-0E02B2C3D479` would be normalized to + // `f47ac10b-58cc-0372-8567-0e02b2c3d479`. + FieldInfo_UUID4 FieldInfo_Format = 1 + // Internet Protocol v4 value as defined by [RFC + // 791](https://datatracker.ietf.org/doc/html/rfc791). The value may be + // condensed, with leading zeros in each octet stripped. For example, + // `001.022.233.040` would be condensed to `1.22.233.40`. + FieldInfo_IPV4 FieldInfo_Format = 2 + // Internet Protocol v6 value as defined by [RFC + // 2460](https://datatracker.ietf.org/doc/html/rfc2460). The value may be + // normalized to entirely lowercase letters, and zero-padded partial and + // empty octets. For example, the value `2001:DB8::` would be normalized to + // `2001:0db8:0:0`. + FieldInfo_IPV6 FieldInfo_Format = 3 + // An IP address in either v4 or v6 format as described by the individual + // values defined herein. See the comments on the IPV4 and IPV6 types for + // allowed normalizations of each. + FieldInfo_IPV4_OR_IPV6 FieldInfo_Format = 4 +) + +// Enum value maps for FieldInfo_Format. +var ( + FieldInfo_Format_name = map[int32]string{ + 0: "FORMAT_UNSPECIFIED", + 1: "UUID4", + 2: "IPV4", + 3: "IPV6", + 4: "IPV4_OR_IPV6", + } + FieldInfo_Format_value = map[string]int32{ + "FORMAT_UNSPECIFIED": 0, + "UUID4": 1, + "IPV4": 2, + "IPV6": 3, + "IPV4_OR_IPV6": 4, + } +) + +func (x FieldInfo_Format) Enum() *FieldInfo_Format { + p := new(FieldInfo_Format) + *p = x + return p +} + +func (x FieldInfo_Format) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldInfo_Format) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_field_info_proto_enumTypes[0].Descriptor() +} + +func (FieldInfo_Format) Type() protoreflect.EnumType { + return &file_google_api_field_info_proto_enumTypes[0] +} + +func (x FieldInfo_Format) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use FieldInfo_Format.Descriptor instead. +func (FieldInfo_Format) EnumDescriptor() ([]byte, []int) { + return file_google_api_field_info_proto_rawDescGZIP(), []int{0, 0} +} + +// Rich semantic information of an API field beyond basic typing. +type FieldInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The standard format of a field value. This does not explicitly configure + // any API consumer, just documents the API's format for the field it is + // applied to. + Format FieldInfo_Format `protobuf:"varint,1,opt,name=format,proto3,enum=google.api.FieldInfo_Format" json:"format,omitempty"` +} + +func (x *FieldInfo) Reset() { + *x = FieldInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_field_info_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldInfo) ProtoMessage() {} + +func (x *FieldInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_api_field_info_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldInfo.ProtoReflect.Descriptor instead. +func (*FieldInfo) Descriptor() ([]byte, []int) { + return file_google_api_field_info_proto_rawDescGZIP(), []int{0} +} + +func (x *FieldInfo) GetFormat() FieldInfo_Format { + if x != nil { + return x.Format + } + return FieldInfo_FORMAT_UNSPECIFIED +} + +var file_google_api_field_info_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*FieldInfo)(nil), + Field: 291403980, + Name: "google.api.field_info", + Tag: "bytes,291403980,opt,name=field_info", + Filename: "google/api/field_info.proto", + }, +} + +// Extension fields to descriptorpb.FieldOptions. +var ( + // Rich semantic descriptor of an API field beyond the basic typing. + // + // Examples: + // + // string request_id = 1 [(google.api.field_info).format = UUID4]; + // string old_ip_address = 2 [(google.api.field_info).format = IPV4]; + // string new_ip_address = 3 [(google.api.field_info).format = IPV6]; + // string actual_ip_address = 4 [ + // (google.api.field_info).format = IPV4_OR_IPV6 + // ]; + // + // optional google.api.FieldInfo field_info = 291403980; + E_FieldInfo = &file_google_api_field_info_proto_extTypes[0] +) + +var File_google_api_field_info_proto protoreflect.FileDescriptor + +var file_google_api_field_info_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x94, 0x01, 0x0a, 0x09, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x34, 0x0a, 0x06, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, + 0x2e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, + 0x51, 0x0a, 0x06, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x16, 0x0a, 0x12, 0x46, 0x4f, 0x52, + 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, + 0x00, 0x12, 0x09, 0x0a, 0x05, 0x55, 0x55, 0x49, 0x44, 0x34, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, + 0x49, 0x50, 0x56, 0x34, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x50, 0x56, 0x36, 0x10, 0x03, + 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x50, 0x56, 0x34, 0x5f, 0x4f, 0x52, 0x5f, 0x49, 0x50, 0x56, 0x36, + 0x10, 0x04, 0x3a, 0x57, 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x66, 0x6f, + 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0xcc, 0xf1, 0xf9, 0x8a, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, + 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x6c, 0x0a, 0x0e, 0x63, + 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, + 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_google_api_field_info_proto_rawDescOnce sync.Once + file_google_api_field_info_proto_rawDescData = file_google_api_field_info_proto_rawDesc +) + +func file_google_api_field_info_proto_rawDescGZIP() []byte { + file_google_api_field_info_proto_rawDescOnce.Do(func() { + file_google_api_field_info_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_field_info_proto_rawDescData) + }) + return file_google_api_field_info_proto_rawDescData +} + +var file_google_api_field_info_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_api_field_info_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_api_field_info_proto_goTypes = []interface{}{ + (FieldInfo_Format)(0), // 0: google.api.FieldInfo.Format + (*FieldInfo)(nil), // 1: google.api.FieldInfo + (*descriptorpb.FieldOptions)(nil), // 2: google.protobuf.FieldOptions +} +var file_google_api_field_info_proto_depIdxs = []int32{ + 0, // 0: google.api.FieldInfo.format:type_name -> google.api.FieldInfo.Format + 2, // 1: google.api.field_info:extendee -> google.protobuf.FieldOptions + 1, // 2: google.api.field_info:type_name -> google.api.FieldInfo + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 2, // [2:3] is the sub-list for extension type_name + 1, // [1:2] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_google_api_field_info_proto_init() } +func file_google_api_field_info_proto_init() { + if File_google_api_field_info_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_field_info_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FieldInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_field_info_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_google_api_field_info_proto_goTypes, + DependencyIndexes: file_google_api_field_info_proto_depIdxs, + EnumInfos: file_google_api_field_info_proto_enumTypes, + MessageInfos: file_google_api_field_info_proto_msgTypes, + ExtensionInfos: file_google_api_field_info_proto_extTypes, + }.Build() + File_google_api_field_info_proto = out.File + file_google_api_field_info_proto_rawDesc = nil + file_google_api_field_info_proto_goTypes = nil + file_google_api_field_info_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index 1bc92248c..ab0fbb79b 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -1,8 +1,8 @@ # gRPC-Go -[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://pkg.go.dev/badge/google.golang.org/grpc)][API] [![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go) +[![codecov](https://codecov.io/gh/grpc/grpc-go/graph/badge.svg)](https://codecov.io/gh/grpc/grpc-go) The [Go][] implementation of [gRPC][]: A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go index 712fef4d0..52d530d7a 100644 --- a/vendor/google.golang.org/grpc/attributes/attributes.go +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -121,9 +121,9 @@ func (a *Attributes) String() string { return sb.String() } -func str(x any) string { +func str(x any) (s string) { if v, ok := x.(fmt.Stringer); ok { - return v.String() + return fmt.Sprint(v) } else if v, ok := x.(string); ok { return v } diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index b6377f445..d79560a2e 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -30,6 +30,7 @@ import ( "google.golang.org/grpc/channelz" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" @@ -39,6 +40,8 @@ import ( var ( // m is a map from name to balancer builder. m = make(map[string]Builder) + + logger = grpclog.Component("balancer") ) // Register registers the balancer builder to the balancer map. b.Name @@ -51,6 +54,12 @@ var ( // an init() function), and is not thread-safe. If multiple Balancers are // registered with the same name, the one registered last will take effect. func Register(b Builder) { + if strings.ToLower(b.Name()) != b.Name() { + // TODO: Skip the use of strings.ToLower() to index the map after v1.59 + // is released to switch to case sensitive balancer registry. Also, + // remove this warning and update the docstrings for Register and Get. + logger.Warningf("Balancer registered with name %q. grpc-go will be switching to case sensitive balancer registries soon", b.Name()) + } m[strings.ToLower(b.Name())] = b } @@ -70,6 +79,12 @@ func init() { // Note that the compare is done in a case-insensitive fashion. // If no builder is register with the name, nil will be returned. func Get(name string) Builder { + if strings.ToLower(name) != name { + // TODO: Skip the use of strings.ToLower() to index the map after v1.59 + // is released to switch to case sensitive balancer registry. Also, + // remove this warning and update the docstrings for Register and Get. + logger.Warningf("Balancer retrieved for name %q. grpc-go will be switching to case sensitive balancer registries soon", name) + } if b, ok := m[strings.ToLower(name)]; ok { return b } diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_wrapper.go similarity index 57% rename from vendor/google.golang.org/grpc/balancer_conn_wrappers.go rename to vendor/google.golang.org/grpc/balancer_wrapper.go index a4411c22b..b5e30cff0 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -32,21 +32,13 @@ import ( "google.golang.org/grpc/resolver" ) -type ccbMode int - -const ( - ccbModeActive = iota - ccbModeIdle - ccbModeClosed - ccbModeExitingIdle -) - // ccBalancerWrapper sits between the ClientConn and the Balancer. // // ccBalancerWrapper implements methods corresponding to the ones on the // balancer.Balancer interface. The ClientConn is free to call these methods // concurrently and the ccBalancerWrapper ensures that calls from the ClientConn -// to the Balancer happen synchronously and in order. +// to the Balancer happen in order by performing them in the serializer, without +// any mutexes held. // // ccBalancerWrapper also implements the balancer.ClientConn interface and is // passed to the Balancer implementations. It invokes unexported methods on the @@ -57,87 +49,75 @@ const ( type ccBalancerWrapper struct { // The following fields are initialized when the wrapper is created and are // read-only afterwards, and therefore can be accessed without a mutex. - cc *ClientConn - opts balancer.BuildOptions + cc *ClientConn + opts balancer.BuildOptions + serializer *grpcsync.CallbackSerializer + serializerCancel context.CancelFunc - // Outgoing (gRPC --> balancer) calls are guaranteed to execute in a - // mutually exclusive manner as they are scheduled in the serializer. Fields - // accessed *only* in these serializer callbacks, can therefore be accessed - // without a mutex. - balancer *gracefulswitch.Balancer + // The following fields are only accessed within the serializer or during + // initialization. curBalancerName string + balancer *gracefulswitch.Balancer - // mu guards access to the below fields. Access to the serializer and its - // cancel function needs to be mutex protected because they are overwritten - // when the wrapper exits idle mode. - mu sync.Mutex - serializer *grpcsync.CallbackSerializer // To serialize all outoing calls. - serializerCancel context.CancelFunc // To close the seralizer at close/enterIdle time. - mode ccbMode // Tracks the current mode of the wrapper. + // The following field is protected by mu. Caller must take cc.mu before + // taking mu. + mu sync.Mutex + closed bool } -// newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer -// is not created until the switchTo() method is invoked. -func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper { - ctx, cancel := context.WithCancel(context.Background()) +// newCCBalancerWrapper creates a new balancer wrapper in idle state. The +// underlying balancer is not created until the switchTo() method is invoked. +func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { + ctx, cancel := context.WithCancel(cc.ctx) ccb := &ccBalancerWrapper{ - cc: cc, - opts: bopts, + cc: cc, + opts: balancer.BuildOptions{ + DialCreds: cc.dopts.copts.TransportCredentials, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + Authority: cc.authority, + CustomUserAgent: cc.dopts.copts.UserAgent, + ChannelzParentID: cc.channelzID, + Target: cc.parsedTarget, + }, serializer: grpcsync.NewCallbackSerializer(ctx), serializerCancel: cancel, } - ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts) + ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts) return ccb } // updateClientConnState is invoked by grpc to push a ClientConnState update to -// the underlying balancer. +// the underlying balancer. This is always executed from the serializer, so +// it is safe to call into the balancer here. func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { - ccb.mu.Lock() - errCh := make(chan error, 1) - // Here and everywhere else where Schedule() is called, it is done with the - // lock held. But the lock guards only the scheduling part. The actual - // callback is called asynchronously without the lock being held. - ok := ccb.serializer.Schedule(func(_ context.Context) { - errCh <- ccb.balancer.UpdateClientConnState(*ccs) + errCh := make(chan error) + ok := ccb.serializer.Schedule(func(ctx context.Context) { + defer close(errCh) + if ctx.Err() != nil || ccb.balancer == nil { + return + } + err := ccb.balancer.UpdateClientConnState(*ccs) + if logger.V(2) && err != nil { + logger.Infof("error from balancer.UpdateClientConnState: %v", err) + } + errCh <- err }) if !ok { - // If we are unable to schedule a function with the serializer, it - // indicates that it has been closed. A serializer is only closed when - // the wrapper is closed or is in idle. - ccb.mu.Unlock() - return fmt.Errorf("grpc: cannot send state update to a closed or idle balancer") - } - ccb.mu.Unlock() - - // We get here only if the above call to Schedule succeeds, in which case it - // is guaranteed that the scheduled function will run. Therefore it is safe - // to block on this channel. - err := <-errCh - if logger.V(2) && err != nil { - logger.Infof("error from balancer.UpdateClientConnState: %v", err) + return nil } - return err -} - -// updateSubConnState is invoked by grpc to push a subConn state update to the -// underlying balancer. -func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) { - ccb.mu.Lock() - ccb.serializer.Schedule(func(_ context.Context) { - // Even though it is optional for balancers, gracefulswitch ensures - // opts.StateListener is set, so this cannot ever be nil. - sc.(*acBalancerWrapper).stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) - }) - ccb.mu.Unlock() + return <-errCh } +// resolverError is invoked by grpc to push a resolver error to the underlying +// balancer. The call to the balancer is executed from the serializer. func (ccb *ccBalancerWrapper) resolverError(err error) { - ccb.mu.Lock() - ccb.serializer.Schedule(func(_ context.Context) { + ccb.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || ccb.balancer == nil { + return + } ccb.balancer.ResolverError(err) }) - ccb.mu.Unlock() } // switchTo is invoked by grpc to instruct the balancer wrapper to switch to the @@ -151,8 +131,10 @@ func (ccb *ccBalancerWrapper) resolverError(err error) { // the ccBalancerWrapper keeps track of the current LB policy name, and skips // the graceful balancer switching process if the name does not change. func (ccb *ccBalancerWrapper) switchTo(name string) { - ccb.mu.Lock() - ccb.serializer.Schedule(func(_ context.Context) { + ccb.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || ccb.balancer == nil { + return + } // TODO: Other languages use case-sensitive balancer registries. We should // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. if strings.EqualFold(ccb.curBalancerName, name) { @@ -160,7 +142,6 @@ func (ccb *ccBalancerWrapper) switchTo(name string) { } ccb.buildLoadBalancingPolicy(name) }) - ccb.mu.Unlock() } // buildLoadBalancingPolicy performs the following: @@ -187,115 +168,49 @@ func (ccb *ccBalancerWrapper) buildLoadBalancingPolicy(name string) { ccb.curBalancerName = builder.Name() } +// close initiates async shutdown of the wrapper. cc.mu must be held when +// calling this function. To determine the wrapper has finished shutting down, +// the channel should block on ccb.serializer.Done() without cc.mu held. func (ccb *ccBalancerWrapper) close() { - channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing") - ccb.closeBalancer(ccbModeClosed) -} - -// enterIdleMode is invoked by grpc when the channel enters idle mode upon -// expiry of idle_timeout. This call blocks until the balancer is closed. -func (ccb *ccBalancerWrapper) enterIdleMode() { - channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: entering idle mode") - ccb.closeBalancer(ccbModeIdle) -} - -// closeBalancer is invoked when the channel is being closed or when it enters -// idle mode upon expiry of idle_timeout. -func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) { ccb.mu.Lock() - if ccb.mode == ccbModeClosed || ccb.mode == ccbModeIdle { - ccb.mu.Unlock() - return - } - - ccb.mode = m - done := ccb.serializer.Done() - b := ccb.balancer - ok := ccb.serializer.Schedule(func(_ context.Context) { - // Close the serializer to ensure that no more calls from gRPC are sent - // to the balancer. - ccb.serializerCancel() - // Empty the current balancer name because we don't have a balancer - // anymore and also so that we act on the next call to switchTo by - // creating a new balancer specified by the new resolver. - ccb.curBalancerName = "" - }) - if !ok { - ccb.mu.Unlock() - return - } + ccb.closed = true ccb.mu.Unlock() - - // Give enqueued callbacks a chance to finish before closing the balancer. - <-done - b.Close() -} - -// exitIdleMode is invoked by grpc when the channel exits idle mode either -// because of an RPC or because of an invocation of the Connect() API. This -// recreates the balancer that was closed previously when entering idle mode. -// -// If the channel is not in idle mode, we know for a fact that we are here as a -// result of the user calling the Connect() method on the ClientConn. In this -// case, we can simply forward the call to the underlying balancer, instructing -// it to reconnect to the backends. -func (ccb *ccBalancerWrapper) exitIdleMode() { - ccb.mu.Lock() - if ccb.mode == ccbModeClosed { - // Request to exit idle is a no-op when wrapper is already closed. - ccb.mu.Unlock() - return - } - - if ccb.mode == ccbModeIdle { - // Recreate the serializer which was closed when we entered idle. - ctx, cancel := context.WithCancel(context.Background()) - ccb.serializer = grpcsync.NewCallbackSerializer(ctx) - ccb.serializerCancel = cancel - } - - // The ClientConn guarantees that mutual exclusion between close() and - // exitIdleMode(), and since we just created a new serializer, we can be - // sure that the below function will be scheduled. - done := make(chan struct{}) - ccb.serializer.Schedule(func(_ context.Context) { - defer close(done) - - ccb.mu.Lock() - defer ccb.mu.Unlock() - - if ccb.mode != ccbModeIdle { - ccb.balancer.ExitIdle() + channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing") + ccb.serializer.Schedule(func(context.Context) { + if ccb.balancer == nil { return } - - // Gracefulswitch balancer does not support a switchTo operation after - // being closed. Hence we need to create a new one here. - ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts) - ccb.mode = ccbModeActive - channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: exiting idle mode") - + ccb.balancer.Close() + ccb.balancer = nil }) - ccb.mu.Unlock() - - <-done + ccb.serializerCancel() } -func (ccb *ccBalancerWrapper) isIdleOrClosed() bool { - ccb.mu.Lock() - defer ccb.mu.Unlock() - return ccb.mode == ccbModeIdle || ccb.mode == ccbModeClosed +// exitIdle invokes the balancer's exitIdle method in the serializer. +func (ccb *ccBalancerWrapper) exitIdle() { + ccb.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || ccb.balancer == nil { + return + } + ccb.balancer.ExitIdle() + }) } func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { - if ccb.isIdleOrClosed() { - return nil, fmt.Errorf("grpc: cannot create SubConn when balancer is closed or idle") + ccb.cc.mu.Lock() + defer ccb.cc.mu.Unlock() + + ccb.mu.Lock() + if ccb.closed { + ccb.mu.Unlock() + return nil, fmt.Errorf("balancer is being closed; no new SubConns allowed") } + ccb.mu.Unlock() if len(addrs) == 0 { return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") } - ac, err := ccb.cc.newAddrConn(addrs, opts) + ac, err := ccb.cc.newAddrConnLocked(addrs, opts) if err != nil { channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) return nil, err @@ -316,10 +231,6 @@ func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { } func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { - if ccb.isIdleOrClosed() { - return - } - acbw, ok := sc.(*acBalancerWrapper) if !ok { return @@ -328,25 +239,39 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol } func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { - if ccb.isIdleOrClosed() { + ccb.cc.mu.Lock() + defer ccb.cc.mu.Unlock() + + ccb.mu.Lock() + if ccb.closed { + ccb.mu.Unlock() return } - + ccb.mu.Unlock() // Update picker before updating state. Even though the ordering here does // not matter, it can lead to multiple calls of Pick in the common start-up // case where we wait for ready and then perform an RPC. If the picker is // updated later, we could call the "connecting" picker when the state is // updated, and then call the "ready" picker after the picker gets updated. - ccb.cc.blockingpicker.updatePicker(s.Picker) + + // Note that there is no need to check if the balancer wrapper was closed, + // as we know the graceful switch LB policy will not call cc if it has been + // closed. + ccb.cc.pickerWrapper.updatePicker(s.Picker) ccb.cc.csMgr.updateState(s.ConnectivityState) } func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) { - if ccb.isIdleOrClosed() { + ccb.cc.mu.RLock() + defer ccb.cc.mu.RUnlock() + + ccb.mu.Lock() + if ccb.closed { + ccb.mu.Unlock() return } - - ccb.cc.resolveNow(o) + ccb.mu.Unlock() + ccb.cc.resolveNowLocked(o) } func (ccb *ccBalancerWrapper) Target() string { @@ -364,6 +289,20 @@ type acBalancerWrapper struct { producers map[balancer.ProducerBuilder]*refCountedProducer } +// updateState is invoked by grpc to push a subConn state update to the +// underlying balancer. +func (acbw *acBalancerWrapper) updateState(s connectivity.State, err error) { + acbw.ccb.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || acbw.ccb.balancer == nil { + return + } + // Even though it is optional for balancers, gracefulswitch ensures + // opts.StateListener is set, so this cannot ever be nil. + // TODO: delete this comment when UpdateSubConnState is removed. + acbw.stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) + }) +} + func (acbw *acBalancerWrapper) String() string { return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelzID.Int()) } @@ -377,20 +316,7 @@ func (acbw *acBalancerWrapper) Connect() { } func (acbw *acBalancerWrapper) Shutdown() { - ccb := acbw.ccb - if ccb.isIdleOrClosed() { - // It it safe to ignore this call when the balancer is closed or in idle - // because the ClientConn takes care of closing the connections. - // - // Not returning early from here when the balancer is closed or in idle - // leads to a deadlock though, because of the following sequence of - // calls when holding cc.mu: - // cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close --> - // ccb.RemoveAddrConn --> cc.removeAddrConn - return - } - - ccb.cc.removeAddrConn(acbw.ac, errConnDrain) + acbw.ccb.cc.removeAddrConn(acbw.ac, errConnDrain) } // NewStream begins a streaming RPC on the addrConn. If the addrConn is not diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index 595480112..856c75dd4 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,8 +18,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 -// protoc v4.22.0 +// protoc-gen-go v1.32.0 +// protoc v4.25.2 // source: grpc/binlog/v1/binarylog.proto package grpc_binarylog_v1 @@ -430,7 +430,7 @@ type ClientHeader struct { MethodName string `protobuf:"bytes,2,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"` // A single process may be used to run multiple virtual // servers with different identities. - // The authority is the name of such a server identitiy. + // The authority is the name of such a server identity. // It is typically a portion of the URI in the form of // or : . Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"` diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index ff7fea102..f0b7f3200 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -33,9 +33,7 @@ import ( "google.golang.org/grpc/balancer/base" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal" - "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/idle" @@ -48,9 +46,9 @@ import ( "google.golang.org/grpc/status" _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. - _ "google.golang.org/grpc/internal/resolver/dns" // To register dns resolver. _ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver. _ "google.golang.org/grpc/internal/resolver/unix" // To register unix resolver. + _ "google.golang.org/grpc/resolver/dns" // To register dns resolver. ) const ( @@ -119,23 +117,8 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires }, nil } -// DialContext creates a client connection to the given target. By default, it's -// a non-blocking dial (the function won't wait for connections to be -// established, and connecting happens in the background). To make it a blocking -// dial, use WithBlock() dial option. -// -// In the non-blocking case, the ctx does not act against the connection. It -// only controls the setup steps. -// -// In the blocking case, ctx can be used to cancel or expire the pending -// connection. Once this function returns, the cancellation and expiration of -// ctx will be noop. Users should call ClientConn.Close to terminate all the -// pending operations after this function returns. -// -// The target name syntax is defined in -// https://github.com/grpc/grpc/blob/master/doc/naming.md. -// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. -func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { +// newClient returns a new client in idle mode. +func newClient(target string, opts ...DialOption) (conn *ClientConn, err error) { cc := &ClientConn{ target: target, conns: make(map[*addrConn]struct{}), @@ -143,23 +126,11 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * czData: new(channelzData), } - // We start the channel off in idle mode, but kick it out of idle at the end - // of this method, instead of waiting for the first RPC. Other gRPC - // implementations do wait for the first RPC to kick the channel out of - // idle. But doing so would be a major behavior change for our users who are - // used to seeing the channel active after Dial. - // - // Taking this approach of kicking it out of idle at the end of this method - // allows us to share the code between channel creation and exiting idle - // mode. This will also make it easy for us to switch to starting the - // channel off in idle, if at all we ever get to do that. - cc.idlenessState = ccIdlenessStateIdle - cc.retryThrottler.Store((*retryThrottler)(nil)) cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) cc.ctx, cc.cancel = context.WithCancel(context.Background()) - cc.exitIdleCond = sync.NewCond(&cc.mu) + // Apply dial options. disableGlobalOpts := false for _, opt := range opts { if _, ok := opt.(*disableGlobalDialOptions); ok { @@ -177,21 +148,9 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * for _, opt := range opts { opt.apply(&cc.dopts) } - chainUnaryClientInterceptors(cc) chainStreamClientInterceptors(cc) - defer func() { - if err != nil { - cc.Close() - } - }() - - // Register ClientConn with channelz. - cc.channelzRegistration(target) - - cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelzID) - if err := cc.validateTransportCredentials(); err != nil { return nil, err } @@ -205,10 +164,80 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } cc.mkp = cc.dopts.copts.KeepaliveParams - if cc.dopts.copts.UserAgent != "" { - cc.dopts.copts.UserAgent += " " + grpcUA - } else { - cc.dopts.copts.UserAgent = grpcUA + // Register ClientConn with channelz. + cc.channelzRegistration(target) + + // TODO: Ideally it should be impossible to error from this function after + // channelz registration. This will require removing some channelz logs + // from the following functions that can error. Errors can be returned to + // the user, and successful logs can be emitted here, after the checks have + // passed and channelz is subsequently registered. + + // Determine the resolver to use. + if err := cc.parseTargetAndFindResolver(); err != nil { + channelz.RemoveEntry(cc.channelzID) + return nil, err + } + if err = cc.determineAuthority(); err != nil { + channelz.RemoveEntry(cc.channelzID) + return nil, err + } + + cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelzID) + cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers) + + cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc. + cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout) + return cc, nil +} + +// DialContext creates a client connection to the given target. By default, it's +// a non-blocking dial (the function won't wait for connections to be +// established, and connecting happens in the background). To make it a blocking +// dial, use WithBlock() dial option. +// +// In the non-blocking case, the ctx does not act against the connection. It +// only controls the setup steps. +// +// In the blocking case, ctx can be used to cancel or expire the pending +// connection. Once this function returns, the cancellation and expiration of +// ctx will be noop. Users should call ClientConn.Close to terminate all the +// pending operations after this function returns. +// +// The target name syntax is defined in +// https://github.com/grpc/grpc/blob/master/doc/naming.md. +// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. +func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { + cc, err := newClient(target, opts...) + if err != nil { + return nil, err + } + + // We start the channel off in idle mode, but kick it out of idle now, + // instead of waiting for the first RPC. Other gRPC implementations do wait + // for the first RPC to kick the channel out of idle. But doing so would be + // a major behavior change for our users who are used to seeing the channel + // active after Dial. + // + // Taking this approach of kicking it out of idle at the end of this method + // allows us to share the code between channel creation and exiting idle + // mode. This will also make it easy for us to switch to starting the + // channel off in idle, i.e. by making newClient exported. + + defer func() { + if err != nil { + cc.Close() + } + }() + + // This creates the name resolver, load balancer, etc. + if err := cc.idlenessMgr.ExitIdleMode(); err != nil { + return nil, err + } + + // Return now for non-blocking dials. + if !cc.dopts.block { + return cc, nil } if cc.dopts.timeout > 0 { @@ -231,49 +260,6 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() - if cc.dopts.bs == nil { - cc.dopts.bs = backoff.DefaultExponential - } - - // Determine the resolver to use. - if err := cc.parseTargetAndFindResolver(); err != nil { - return nil, err - } - if err = cc.determineAuthority(); err != nil { - return nil, err - } - - if cc.dopts.scChan != nil { - // Blocking wait for the initial service config. - select { - case sc, ok := <-cc.dopts.scChan: - if ok { - cc.sc = &sc - cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) - } - case <-ctx.Done(): - return nil, ctx.Err() - } - } - if cc.dopts.scChan != nil { - go cc.scWatcher() - } - - // This creates the name resolver, load balancer, blocking picker etc. - if err := cc.exitIdleMode(); err != nil { - return nil, err - } - - // Configure idleness support with configured idle timeout or default idle - // timeout duration. Idleness can be explicitly disabled by the user, by - // setting the dial option to 0. - cc.idlenessMgr = idle.NewManager(idle.ManagerOptions{Enforcer: (*idler)(cc), Timeout: cc.dopts.idleTimeout, Logger: logger}) - - // Return early for non-blocking dials. - if !cc.dopts.block { - return cc, nil - } - // A blocking dial blocks until the clientConn is ready. for { s := cc.GetState() @@ -320,8 +306,8 @@ func (cc *ClientConn) addTraceEvent(msg string) { type idler ClientConn -func (i *idler) EnterIdleMode() error { - return (*ClientConn)(i).enterIdleMode() +func (i *idler) EnterIdleMode() { + (*ClientConn)(i).enterIdleMode() } func (i *idler) ExitIdleMode() error { @@ -329,117 +315,71 @@ func (i *idler) ExitIdleMode() error { } // exitIdleMode moves the channel out of idle mode by recreating the name -// resolver and load balancer. -func (cc *ClientConn) exitIdleMode() error { +// resolver and load balancer. This should never be called directly; use +// cc.idlenessMgr.ExitIdleMode instead. +func (cc *ClientConn) exitIdleMode() (err error) { cc.mu.Lock() if cc.conns == nil { cc.mu.Unlock() return errConnClosing } - if cc.idlenessState != ccIdlenessStateIdle { - cc.mu.Unlock() - channelz.Infof(logger, cc.channelzID, "ClientConn asked to exit idle mode, current mode is %v", cc.idlenessState) - return nil - } - - defer func() { - // When Close() and exitIdleMode() race against each other, one of the - // following two can happen: - // - Close() wins the race and runs first. exitIdleMode() runs after, and - // sees that the ClientConn is already closed and hence returns early. - // - exitIdleMode() wins the race and runs first and recreates the balancer - // and releases the lock before recreating the resolver. If Close() runs - // in this window, it will wait for exitIdleMode to complete. - // - // We achieve this synchronization using the below condition variable. - cc.mu.Lock() - cc.idlenessState = ccIdlenessStateActive - cc.exitIdleCond.Signal() - cc.mu.Unlock() - }() - - cc.idlenessState = ccIdlenessStateExitingIdle - exitedIdle := false - if cc.blockingpicker == nil { - cc.blockingpicker = newPickerWrapper(cc.dopts.copts.StatsHandlers) - } else { - cc.blockingpicker.exitIdleMode() - exitedIdle = true - } - - var credsClone credentials.TransportCredentials - if creds := cc.dopts.copts.TransportCredentials; creds != nil { - credsClone = creds.Clone() - } - if cc.balancerWrapper == nil { - cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{ - DialCreds: credsClone, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - Authority: cc.authority, - CustomUserAgent: cc.dopts.copts.UserAgent, - ChannelzParentID: cc.channelzID, - Target: cc.parsedTarget, - }) - } else { - cc.balancerWrapper.exitIdleMode() - } - cc.firstResolveEvent = grpcsync.NewEvent() cc.mu.Unlock() // This needs to be called without cc.mu because this builds a new resolver - // which might update state or report error inline which needs to be handled - // by cc.updateResolverState() which also grabs cc.mu. - if err := cc.initResolverWrapper(credsClone); err != nil { + // which might update state or report error inline, which would then need to + // acquire cc.mu. + if err := cc.resolverWrapper.start(); err != nil { return err } - if exitedIdle { - cc.addTraceEvent("exiting idle mode") - } + cc.addTraceEvent("exiting idle mode") return nil } +// initIdleStateLocked initializes common state to how it should be while idle. +func (cc *ClientConn) initIdleStateLocked() { + cc.resolverWrapper = newCCResolverWrapper(cc) + cc.balancerWrapper = newCCBalancerWrapper(cc) + cc.firstResolveEvent = grpcsync.NewEvent() + // cc.conns == nil is a proxy for the ClientConn being closed. So, instead + // of setting it to nil here, we recreate the map. This also means that we + // don't have to do this when exiting idle mode. + cc.conns = make(map[*addrConn]struct{}) +} + // enterIdleMode puts the channel in idle mode, and as part of it shuts down the -// name resolver, load balancer and any subchannels. -func (cc *ClientConn) enterIdleMode() error { +// name resolver, load balancer, and any subchannels. This should never be +// called directly; use cc.idlenessMgr.EnterIdleMode instead. +func (cc *ClientConn) enterIdleMode() { cc.mu.Lock() + if cc.conns == nil { cc.mu.Unlock() - return ErrClientConnClosing - } - if cc.idlenessState != ccIdlenessStateActive { - channelz.Errorf(logger, cc.channelzID, "ClientConn asked to enter idle mode, current mode is %v", cc.idlenessState) - cc.mu.Unlock() - return nil + return } - // cc.conns == nil is a proxy for the ClientConn being closed. So, instead - // of setting it to nil here, we recreate the map. This also means that we - // don't have to do this when exiting idle mode. conns := cc.conns - cc.conns = make(map[*addrConn]struct{}) - // TODO: Currently, we close the resolver wrapper upon entering idle mode - // and create a new one upon exiting idle mode. This means that the - // `cc.resolverWrapper` field would be overwritten everytime we exit idle - // mode. While this means that we need to hold `cc.mu` when accessing - // `cc.resolverWrapper`, it makes the code simpler in the wrapper. We should - // try to do the same for the balancer and picker wrappers too. - cc.resolverWrapper.close() - cc.blockingpicker.enterIdleMode() - cc.balancerWrapper.enterIdleMode() + rWrapper := cc.resolverWrapper + rWrapper.close() + cc.pickerWrapper.reset() + bWrapper := cc.balancerWrapper + bWrapper.close() cc.csMgr.updateState(connectivity.Idle) - cc.idlenessState = ccIdlenessStateIdle + cc.addTraceEvent("entering idle mode") + + cc.initIdleStateLocked() + cc.mu.Unlock() - go func() { - cc.addTraceEvent("entering idle mode") - for ac := range conns { - ac.tearDown(errConnIdling) - } - }() - return nil + // Block until the name resolver and LB policy are closed. + <-rWrapper.serializer.Done() + <-bWrapper.serializer.Done() + + // Close all subchannels after the LB policy is closed. + for ac := range conns { + ac.tearDown(errConnIdling) + } } // validateTransportCredentials performs a series of checks on the configured @@ -649,66 +589,35 @@ type ClientConn struct { dopts dialOptions // Default and user specified dial options. channelzID *channelz.Identifier // Channelz identifier for the channel. resolverBuilder resolver.Builder // See parseTargetAndFindResolver(). - balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath. - idlenessMgr idle.Manager + idlenessMgr *idle.Manager // The following provide their own synchronization, and therefore don't // require cc.mu to be held to access them. csMgr *connectivityStateManager - blockingpicker *pickerWrapper + pickerWrapper *pickerWrapper safeConfigSelector iresolver.SafeConfigSelector czData *channelzData retryThrottler atomic.Value // Updated from service config. - // firstResolveEvent is used to track whether the name resolver sent us at - // least one update. RPCs block on this event. - firstResolveEvent *grpcsync.Event - // mu protects the following fields. // TODO: split mu so the same mutex isn't used for everything. mu sync.RWMutex - resolverWrapper *ccResolverWrapper // Initialized in Dial; cleared in Close. + resolverWrapper *ccResolverWrapper // Always recreated whenever entering idle to simplify Close. + balancerWrapper *ccBalancerWrapper // Always recreated whenever entering idle to simplify Close. sc *ServiceConfig // Latest service config received from the resolver. conns map[*addrConn]struct{} // Set to nil on close. mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway. - idlenessState ccIdlenessState // Tracks idleness state of the channel. - exitIdleCond *sync.Cond // Signalled when channel exits idle. + // firstResolveEvent is used to track whether the name resolver sent us at + // least one update. RPCs block on this event. May be accessed without mu + // if we know we cannot be asked to enter idle mode while accessing it (e.g. + // when the idle manager has already been closed, or if we are already + // entering idle mode). + firstResolveEvent *grpcsync.Event lceMu sync.Mutex // protects lastConnectionError lastConnectionError error } -// ccIdlenessState tracks the idleness state of the channel. -// -// Channels start off in `active` and move to `idle` after a period of -// inactivity. When moving back to `active` upon an incoming RPC, they -// transition through `exiting_idle`. This state is useful for synchronization -// with Close(). -// -// This state tracking is mostly for self-protection. The idlenessManager is -// expected to keep track of the state as well, and is expected not to call into -// the ClientConn unnecessarily. -type ccIdlenessState int8 - -const ( - ccIdlenessStateActive ccIdlenessState = iota - ccIdlenessStateIdle - ccIdlenessStateExitingIdle -) - -func (s ccIdlenessState) String() string { - switch s { - case ccIdlenessStateActive: - return "active" - case ccIdlenessStateIdle: - return "idle" - case ccIdlenessStateExitingIdle: - return "exitingIdle" - default: - return "unknown" - } -} - // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. // @@ -748,29 +657,15 @@ func (cc *ClientConn) GetState() connectivity.State { // Notice: This API is EXPERIMENTAL and may be changed or removed in a later // release. func (cc *ClientConn) Connect() { - cc.exitIdleMode() + if err := cc.idlenessMgr.ExitIdleMode(); err != nil { + cc.addTraceEvent(err.Error()) + return + } // If the ClientConn was not in idle mode, we need to call ExitIdle on the // LB policy so that connections can be created. - cc.balancerWrapper.exitIdleMode() -} - -func (cc *ClientConn) scWatcher() { - for { - select { - case sc, ok := <-cc.dopts.scChan: - if !ok { - return - } - cc.mu.Lock() - // TODO: load balance policy runtime change is ignored. - // We may revisit this decision in the future. - cc.sc = &sc - cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) - cc.mu.Unlock() - case <-cc.ctx.Done(): - return - } - } + cc.mu.Lock() + cc.balancerWrapper.exitIdle() + cc.mu.Unlock() } // waitForResolvedAddrs blocks until the resolver has provided addresses or the @@ -804,6 +699,12 @@ func init() { internal.SubscribeToConnectivityStateChanges = func(cc *ClientConn, s grpcsync.Subscriber) func() { return cc.csMgr.pubSub.Subscribe(s) } + internal.EnterIdleModeForTesting = func(cc *ClientConn) { + cc.idlenessMgr.EnterIdleModeForTesting() + } + internal.ExitIdleModeForTesting = func(cc *ClientConn) error { + return cc.idlenessMgr.ExitIdleMode() + } } func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { @@ -818,9 +719,8 @@ func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { } } -func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { +func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) error { defer cc.firstResolveEvent.Fire() - cc.mu.Lock() // Check if the ClientConn is already closed. Some fields (e.g. // balancerWrapper) are set to nil when closing the ClientConn, and could // cause nil pointer panic if we don't have this check. @@ -866,7 +766,7 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { if cc.sc == nil { // Apply the failing LB only if we haven't received valid service config // from the name resolver in the past. - cc.applyFailingLB(s.ServiceConfig) + cc.applyFailingLBLocked(s.ServiceConfig) cc.mu.Unlock() return ret } @@ -888,15 +788,13 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { return ret } -// applyFailingLB is akin to configuring an LB policy on the channel which +// applyFailingLBLocked is akin to configuring an LB policy on the channel which // always fails RPCs. Here, an actual LB policy is not configured, but an always // erroring picker is configured, which returns errors with information about // what was invalid in the received service config. A config selector with no // service config is configured, and the connectivity state of the channel is // set to TransientFailure. -// -// Caller must hold cc.mu. -func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) { +func (cc *ClientConn) applyFailingLBLocked(sc *serviceconfig.ParseResult) { var err error if sc.Err != nil { err = status.Errorf(codes.Unavailable, "error parsing service config: %v", sc.Err) @@ -904,14 +802,10 @@ func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) { err = status.Errorf(codes.Unavailable, "illegal service config type: %T", sc.Config) } cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) - cc.blockingpicker.updatePicker(base.NewErrPicker(err)) + cc.pickerWrapper.updatePicker(base.NewErrPicker(err)) cc.csMgr.updateState(connectivity.TransientFailure) } -func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { - cc.balancerWrapper.updateSubConnState(sc, s, err) -} - // Makes a copy of the input addresses slice and clears out the balancer // attributes field. Addresses are passed during subconn creation and address // update operations. In both cases, we will clear the balancer attributes by @@ -926,10 +820,14 @@ func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Ad return out } -// newAddrConn creates an addrConn for addrs and adds it to cc.conns. +// newAddrConnLocked creates an addrConn for addrs and adds it to cc.conns. // // Caller needs to make sure len(addrs) > 0. -func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { +func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { + if cc.conns == nil { + return nil, ErrClientConnClosing + } + ac := &addrConn{ state: connectivity.Idle, cc: cc, @@ -941,12 +839,6 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub stateChan: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) - // Track ac in cc. This needs to be done before any getTransport(...) is called. - cc.mu.Lock() - defer cc.mu.Unlock() - if cc.conns == nil { - return nil, ErrClientConnClosing - } var err error ac.channelzID, err = channelz.RegisterSubChannel(ac, cc.channelzID, "") @@ -962,6 +854,7 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub }, }) + // Track ac in cc. This needs to be done before any getTransport(...) is called. cc.conns[ac] = struct{}{} return ac, nil } @@ -1168,7 +1061,7 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { } func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) { - return cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ + return cc.pickerWrapper.pick(ctx, failfast, balancer.PickInfo{ Ctx: ctx, FullMethodName: method, }) @@ -1210,12 +1103,12 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { cc.mu.RLock() - r := cc.resolverWrapper + cc.resolverWrapper.resolveNow(o) cc.mu.RUnlock() - if r == nil { - return - } - go r.resolveNow(o) +} + +func (cc *ClientConn) resolveNowLocked(o resolver.ResolveNowOptions) { + cc.resolverWrapper.resolveNow(o) } // ResetConnectBackoff wakes up all subchannels in transient failure and causes @@ -1247,40 +1140,32 @@ func (cc *ClientConn) Close() error { <-cc.csMgr.pubSub.Done() }() + // Prevent calls to enter/exit idle immediately, and ensure we are not + // currently entering/exiting idle mode. + cc.idlenessMgr.Close() + cc.mu.Lock() if cc.conns == nil { cc.mu.Unlock() return ErrClientConnClosing } - for cc.idlenessState == ccIdlenessStateExitingIdle { - cc.exitIdleCond.Wait() - } - conns := cc.conns cc.conns = nil cc.csMgr.updateState(connectivity.Shutdown) - pWrapper := cc.blockingpicker - rWrapper := cc.resolverWrapper - bWrapper := cc.balancerWrapper - idlenessMgr := cc.idlenessMgr + // We can safely unlock and continue to access all fields now as + // cc.conns==nil, preventing any further operations on cc. cc.mu.Unlock() + cc.resolverWrapper.close() // The order of closing matters here since the balancer wrapper assumes the // picker is closed before it is closed. - if pWrapper != nil { - pWrapper.close() - } - if bWrapper != nil { - bWrapper.close() - } - if rWrapper != nil { - rWrapper.close() - } - if idlenessMgr != nil { - idlenessMgr.Close() - } + cc.pickerWrapper.close() + cc.balancerWrapper.close() + + <-cc.resolverWrapper.serializer.Done() + <-cc.balancerWrapper.serializer.Done() for ac := range conns { ac.tearDown(ErrClientConnClosing) @@ -1301,7 +1186,7 @@ type addrConn struct { cc *ClientConn dopts dialOptions - acbw balancer.SubConn + acbw *acBalancerWrapper scopts balancer.NewSubConnOptions // transport is set when there's a viable transport (note: ac state may not be READY as LB channel @@ -1339,7 +1224,7 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) } else { channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v, last error: %s", s, lastErr) } - ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr) + ac.acbw.updateState(s, lastErr) } // adjustParams updates parameters used to create transports upon @@ -1843,7 +1728,7 @@ func (cc *ClientConn) parseTargetAndFindResolver() error { if err != nil { channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err) } else { - channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) + channelz.Infof(logger, cc.channelzID, "parsed dial target is: %#v", parsedTarget) rb = cc.getResolver(parsedTarget.URL.Scheme) if rb != nil { cc.parsedTarget = parsedTarget @@ -1887,6 +1772,8 @@ func parseTarget(target string) (resolver.Target, error) { return resolver.Target{URL: *u}, nil } +// encodeAuthority escapes the authority string based on valid chars defined in +// https://datatracker.ietf.org/doc/html/rfc3986#section-3.2. func encodeAuthority(authority string) string { const upperhex = "0123456789ABCDEF" @@ -1975,58 +1862,17 @@ func (cc *ClientConn) determineAuthority() error { } endpoint := cc.parsedTarget.Endpoint() - target := cc.target - switch { - case authorityFromDialOption != "": + if authorityFromDialOption != "" { cc.authority = authorityFromDialOption - case authorityFromCreds != "": + } else if authorityFromCreds != "" { cc.authority = authorityFromCreds - case strings.HasPrefix(target, "unix:") || strings.HasPrefix(target, "unix-abstract:"): - // TODO: remove when the unix resolver implements optional interface to - // return channel authority. - cc.authority = "localhost" - case strings.HasPrefix(endpoint, ":"): + } else if auth, ok := cc.resolverBuilder.(resolver.AuthorityOverrider); ok { + cc.authority = auth.OverrideAuthority(cc.parsedTarget) + } else if strings.HasPrefix(endpoint, ":") { cc.authority = "localhost" + endpoint - default: - // TODO: Define an optional interface on the resolver builder to return - // the channel authority given the user's dial target. For resolvers - // which don't implement this interface, we will use the endpoint from - // "scheme://authority/endpoint" as the default authority. - // Escape the endpoint to handle use cases where the endpoint - // might not be a valid authority by default. - // For example an endpoint which has multiple paths like - // 'a/b/c', which is not a valid authority by default. + } else { cc.authority = encodeAuthority(endpoint) } channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) return nil } - -// initResolverWrapper creates a ccResolverWrapper, which builds the name -// resolver. This method grabs the lock to assign the newly built resolver -// wrapper to the cc.resolverWrapper field. -func (cc *ClientConn) initResolverWrapper(creds credentials.TransportCredentials) error { - rw, err := newCCResolverWrapper(cc, ccResolverWrapperOpts{ - target: cc.parsedTarget, - builder: cc.resolverBuilder, - bOpts: resolver.BuildOptions{ - DisableServiceConfig: cc.dopts.disableServiceConfig, - DialCreds: creds, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - }, - channelzID: cc.channelzID, - }) - if err != nil { - return fmt.Errorf("failed to build resolver: %v", err) - } - // Resolver implementations may report state update or error inline when - // built (or right after), and this is handled in cc.updateResolverState. - // Also, an error from the resolver might lead to a re-resolution request - // from the balancer, which is handled in resolveNow() where - // `cc.resolverWrapper` is accessed. Hence, we need to hold the lock here. - cc.mu.Lock() - cc.resolverWrapper = rw - cc.mu.Unlock() - return nil -} diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go index 11b106182..08476ad1f 100644 --- a/vendor/google.golang.org/grpc/codes/codes.go +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -25,7 +25,13 @@ import ( "strconv" ) -// A Code is an unsigned 32-bit error code as defined in the gRPC spec. +// A Code is a status code defined according to the [gRPC documentation]. +// +// Only the codes defined as consts in this package are valid codes. Do not use +// other code values. Behavior of other codes is implementation-specific and +// interoperability between implementations is not guaranteed. +// +// [gRPC documentation]: https://github.com/grpc/grpc/blob/master/doc/statuscodes.md type Code uint32 const ( diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index 877b7cd21..5dafd34ed 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -44,10 +44,25 @@ func (t TLSInfo) AuthType() string { return "tls" } +// cipherSuiteLookup returns the string version of a TLS cipher suite ID. +func cipherSuiteLookup(cipherSuiteID uint16) string { + for _, s := range tls.CipherSuites() { + if s.ID == cipherSuiteID { + return s.Name + } + } + for _, s := range tls.InsecureCipherSuites() { + if s.ID == cipherSuiteID { + return s.Name + } + } + return fmt.Sprintf("unknown ID: %v", cipherSuiteID) +} + // GetSecurityValue returns security info requested by channelz. func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue { v := &TLSChannelzSecurityValue{ - StandardName: cipherSuiteLookup[t.State.CipherSuite], + StandardName: cipherSuiteLookup(t.State.CipherSuite), } // Currently there's no way to get LocalCertificate info from tls package. if len(t.State.PeerCertificates) > 0 { @@ -138,10 +153,39 @@ func (c *tlsCreds) OverrideServerName(serverNameOverride string) error { return nil } +// The following cipher suites are forbidden for use with HTTP/2 by +// https://datatracker.ietf.org/doc/html/rfc7540#appendix-A +var tls12ForbiddenCipherSuites = map[uint16]struct{}{ + tls.TLS_RSA_WITH_AES_128_CBC_SHA: {}, + tls.TLS_RSA_WITH_AES_256_CBC_SHA: {}, + tls.TLS_RSA_WITH_AES_128_GCM_SHA256: {}, + tls.TLS_RSA_WITH_AES_256_GCM_SHA384: {}, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: {}, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: {}, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: {}, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: {}, +} + // NewTLS uses c to construct a TransportCredentials based on TLS. func NewTLS(c *tls.Config) TransportCredentials { tc := &tlsCreds{credinternal.CloneTLSConfig(c)} tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos) + // If the user did not configure a MinVersion and did not configure a + // MaxVersion < 1.2, use MinVersion=1.2, which is required by + // https://datatracker.ietf.org/doc/html/rfc7540#section-9.2 + if tc.config.MinVersion == 0 && (tc.config.MaxVersion == 0 || tc.config.MaxVersion >= tls.VersionTLS12) { + tc.config.MinVersion = tls.VersionTLS12 + } + // If the user did not configure CipherSuites, use all "secure" cipher + // suites reported by the TLS package, but remove some explicitly forbidden + // by https://datatracker.ietf.org/doc/html/rfc7540#appendix-A + if tc.config.CipherSuites == nil { + for _, cs := range tls.CipherSuites() { + if _, ok := tls12ForbiddenCipherSuites[cs.ID]; !ok { + tc.config.CipherSuites = append(tc.config.CipherSuites, cs.ID) + } + } + } return tc } @@ -205,32 +249,3 @@ type TLSChannelzSecurityValue struct { LocalCertificate []byte RemoteCertificate []byte } - -var cipherSuiteLookup = map[uint16]string{ - tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA", - tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", - tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA", - tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA", - tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256", - tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384", - tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA", - tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", - tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV", - tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", - tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", - tls.TLS_AES_128_GCM_SHA256: "TLS_AES_128_GCM_SHA256", - tls.TLS_AES_256_GCM_SHA384: "TLS_AES_256_GCM_SHA384", - tls.TLS_CHACHA20_POLY1305_SHA256: "TLS_CHACHA20_POLY1305_SHA256", -} diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 1fd0d5c12..ba2426180 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -46,6 +46,7 @@ func init() { internal.WithBinaryLogger = withBinaryLogger internal.JoinDialOptions = newJoinDialOption internal.DisableGlobalDialOptions = newDisableGlobalDialOptions + internal.WithRecvBufferPool = withRecvBufferPool } // dialOptions configure a Dial call. dialOptions are set by the DialOption @@ -63,7 +64,6 @@ type dialOptions struct { block bool returnLastError bool timeout time.Duration - scChan <-chan ServiceConfig authority string binaryLogger binarylog.Logger copts transport.ConnectOptions @@ -250,19 +250,6 @@ func WithDecompressor(dc Decompressor) DialOption { }) } -// WithServiceConfig returns a DialOption which has a channel to read the -// service configuration. -// -// Deprecated: service config should be received through name resolver or via -// WithDefaultServiceConfig, as specified at -// https://github.com/grpc/grpc/blob/master/doc/service_config.md. Will be -// removed in a future 1.x release. -func WithServiceConfig(c <-chan ServiceConfig) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.scChan = c - }) -} - // WithConnectParams configures the ClientConn to use the provided ConnectParams // for creating and maintaining connections to servers. // @@ -413,6 +400,17 @@ func WithTimeout(d time.Duration) DialOption { // connections. If FailOnNonTempDialError() is set to true, and an error is // returned by f, gRPC checks the error's Temporary() method to decide if it // should try to reconnect to the network address. +// +// Note: All supported releases of Go (as of December 2023) override the OS +// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive +// with OS defaults for keepalive time and interval, use a net.Dialer that sets +// the KeepAlive field to a negative value, and sets the SO_KEEPALIVE socket +// option to true from the Control field. For a concrete example of how to do +// this, see internal.NetDialerWithTCPKeepalive(). +// +// For more information, please see [issue 23459] in the Go github repo. +// +// [issue 23459]: https://github.com/golang/go/issues/23459 func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.Dialer = f @@ -487,7 +485,7 @@ func FailOnNonTempDialError(f bool) DialOption { // the RPCs. func WithUserAgent(s string) DialOption { return newFuncDialOption(func(o *dialOptions) { - o.copts.UserAgent = s + o.copts.UserAgent = s + " " + grpcUA }) } @@ -637,13 +635,16 @@ func withHealthCheckFunc(f internal.HealthChecker) DialOption { func defaultDialOptions() dialOptions { return dialOptions{ - healthCheckFunc: internal.HealthCheckFunc, copts: transport.ConnectOptions{ - WriteBufferSize: defaultWriteBufSize, ReadBufferSize: defaultReadBufSize, + WriteBufferSize: defaultWriteBufSize, UseProxy: true, + UserAgent: grpcUA, }, - recvBufferPool: nopBufferPool{}, + bs: internalbackoff.DefaultExponential, + healthCheckFunc: internal.HealthCheckFunc, + idleTimeout: 30 * time.Minute, + recvBufferPool: nopBufferPool{}, } } @@ -680,8 +681,8 @@ func WithResolvers(rs ...resolver.Builder) DialOption { // channel will exit idle mode when the Connect() method is called or when an // RPC is initiated. // -// By default this feature is disabled, which can also be explicitly configured -// by passing zero to this function. +// A default timeout of 30 minutes will be used if this dial option is not set +// at dial time and idleness can be disabled by passing a timeout of zero. // // # Experimental // @@ -704,11 +705,13 @@ func WithIdleTimeout(d time.Duration) DialOption { // options are used: WithStatsHandler, EnableTracing, or binary logging. In such // cases, the shared buffer pool will be ignored. // -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. +// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in +// v1.60.0 or later. func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption { + return withRecvBufferPool(bufferPool) +} + +func withRecvBufferPool(bufferPool SharedBufferPool) DialOption { return newFuncDialOption(func(o *dialOptions) { o.recvBufferPool = bufferPool }) diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 69d5580b6..5ebf88d71 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -38,6 +38,10 @@ const Identity = "identity" // Compressor is used for compressing and decompressing when sending or // receiving messages. +// +// If a Compressor implements `DecompressedSize(compressedBytes []byte) int`, +// gRPC will invoke it to determine the size of the buffer allocated for the +// result of decompression. A return value of -1 indicates unknown size. type Compressor interface { // Compress writes the data written to wc to w after compressing it. If an // error occurs while initializing the compressor, that error is returned @@ -51,15 +55,6 @@ type Compressor interface { // coding header. The result must be static; the result cannot change // between calls. Name() string - // If a Compressor implements - // DecompressedSize(compressedBytes []byte) int, gRPC will call it - // to determine the size of the buffer allocated for the result of decompression. - // Return -1 to indicate unknown size. - // - // Experimental - // - // Notice: This API is EXPERIMENTAL and may be changed or removed in a - // later release. } var registeredCompressor = make(map[string]Compressor) diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go index 0ee3d3bae..66d5cdf03 100644 --- a/vendor/google.golang.org/grpc/encoding/proto/proto.go +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -23,8 +23,9 @@ package proto import ( "fmt" - "github.com/golang/protobuf/proto" "google.golang.org/grpc/encoding" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/protoadapt" ) // Name is the name registered for the proto compressor. @@ -38,21 +39,34 @@ func init() { type codec struct{} func (codec) Marshal(v any) ([]byte, error) { - vv, ok := v.(proto.Message) - if !ok { + vv := messageV2Of(v) + if vv == nil { return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) } + return proto.Marshal(vv) } func (codec) Unmarshal(data []byte, v any) error { - vv, ok := v.(proto.Message) - if !ok { + vv := messageV2Of(v) + if vv == nil { return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v) } + return proto.Unmarshal(data, vv) } +func messageV2Of(v any) proto.Message { + switch v := v.(type) { + case protoadapt.MessageV1: + return protoadapt.MessageV2Of(v) + case protoadapt.MessageV2: + return v + } + + return nil +} + func (codec) Name() string { return Name } diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index 24299efd6..5bf880d41 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 -// protoc v4.22.0 +// protoc-gen-go v1.32.0 +// protoc v4.25.2 // source: grpc/health/v1/health.proto package grpc_health_v1 diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go index a01a1b4d5..4c46c098d 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.3.0 -// - protoc v4.22.0 +// - protoc v4.25.2 // source: grpc/health/v1/health.proto package grpc_health_v1 @@ -44,8 +44,15 @@ const ( // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type HealthClient interface { - // If the requested service is unknown, the call will fail with status - // NOT_FOUND. + // Check gets the health of the specified service. If the requested service + // is unknown, the call will fail with status NOT_FOUND. If the caller does + // not specify a service name, the server should respond with its overall + // health status. + // + // Clients should set a deadline when calling Check, and can declare the + // server unhealthy if they do not receive a timely response. + // + // Check implementations should be idempotent and side effect free. Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) // Performs a watch for the serving status of the requested service. // The server will immediately send back a message indicating the current @@ -118,8 +125,15 @@ func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) { // All implementations should embed UnimplementedHealthServer // for forward compatibility type HealthServer interface { - // If the requested service is unknown, the call will fail with status - // NOT_FOUND. + // Check gets the health of the specified service. If the requested service + // is unknown, the call will fail with status NOT_FOUND. If the caller does + // not specify a service name, the server should respond with its overall + // health status. + // + // Clients should set a deadline when calling Check, and can declare the + // server unhealthy if they do not receive a timely response. + // + // Check implementations should be idempotent and side effect free. Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) // Performs a watch for the serving status of the requested service. // The server will immediately send back a message indicating the current diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go index 5fc0ee3da..fed1c011a 100644 --- a/vendor/google.golang.org/grpc/internal/backoff/backoff.go +++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -23,6 +23,8 @@ package backoff import ( + "context" + "errors" "time" grpcbackoff "google.golang.org/grpc/backoff" @@ -71,3 +73,37 @@ func (bc Exponential) Backoff(retries int) time.Duration { } return time.Duration(backoff) } + +// ErrResetBackoff is the error to be returned by the function executed by RunF, +// to instruct the latter to reset its backoff state. +var ErrResetBackoff = errors.New("reset backoff state") + +// RunF provides a convenient way to run a function f repeatedly until the +// context expires or f returns a non-nil error that is not ErrResetBackoff. +// When f returns ErrResetBackoff, RunF continues to run f, but resets its +// backoff state before doing so. backoff accepts an integer representing the +// number of retries, and returns the amount of time to backoff. +func RunF(ctx context.Context, f func() error, backoff func(int) time.Duration) { + attempt := 0 + timer := time.NewTimer(0) + for ctx.Err() == nil { + select { + case <-timer.C: + case <-ctx.Done(): + timer.Stop() + return + } + + err := f() + if errors.Is(err, ErrResetBackoff) { + timer.Reset(0) + attempt = 0 + continue + } + if err != nil { + return + } + timer.Reset(backoff(attempt)) + attempt++ + } +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index 0f31274a3..e8456a77c 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -25,11 +25,12 @@ import ( "sync/atomic" "time" - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" ) type callIDGenerator struct { @@ -88,7 +89,7 @@ func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger { // in TruncatingMethodLogger as possible. func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry { m := c.toProto() - timestamp, _ := ptypes.TimestampProto(time.Now()) + timestamp := timestamppb.Now() m.Timestamp = timestamp m.CallId = ml.callID m.SequenceIdWithinCall = ml.idWithinCallGen.next() @@ -178,7 +179,7 @@ func (c *ClientHeader) toProto() *binlogpb.GrpcLogEntry { Authority: c.Authority, } if c.Timeout > 0 { - clientHeader.Timeout = ptypes.DurationProto(c.Timeout) + clientHeader.Timeout = durationpb.New(c.Timeout) } ret := &binlogpb.GrpcLogEntry{ Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go index 264de387c..9ea598b14 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/sink.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go @@ -25,8 +25,8 @@ import ( "sync" "time" - "github.com/golang/protobuf/proto" binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + "google.golang.org/protobuf/proto" ) var ( diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go index 4399c3df4..11f91668a 100644 --- a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go +++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go @@ -18,7 +18,10 @@ // Package buffer provides an implementation of an unbounded buffer. package buffer -import "sync" +import ( + "errors" + "sync" +) // Unbounded is an implementation of an unbounded buffer which does not use // extra goroutines. This is typically used for passing updates from one entity @@ -36,6 +39,7 @@ import "sync" type Unbounded struct { c chan any closed bool + closing bool mu sync.Mutex backlog []any } @@ -45,32 +49,32 @@ func NewUnbounded() *Unbounded { return &Unbounded{c: make(chan any, 1)} } +var errBufferClosed = errors.New("Put called on closed buffer.Unbounded") + // Put adds t to the unbounded buffer. -func (b *Unbounded) Put(t any) { +func (b *Unbounded) Put(t any) error { b.mu.Lock() defer b.mu.Unlock() - if b.closed { - return + if b.closing { + return errBufferClosed } if len(b.backlog) == 0 { select { case b.c <- t: - return + return nil default: } } b.backlog = append(b.backlog, t) + return nil } -// Load sends the earliest buffered data, if any, onto the read channel -// returned by Get(). Users are expected to call this every time they read a +// Load sends the earliest buffered data, if any, onto the read channel returned +// by Get(). Users are expected to call this every time they successfully read a // value from the read channel. func (b *Unbounded) Load() { b.mu.Lock() defer b.mu.Unlock() - if b.closed { - return - } if len(b.backlog) > 0 { select { case b.c <- b.backlog[0]: @@ -78,6 +82,8 @@ func (b *Unbounded) Load() { b.backlog = b.backlog[1:] default: } + } else if b.closing && !b.closed { + close(b.c) } } @@ -88,18 +94,23 @@ func (b *Unbounded) Load() { // send the next buffered value onto the channel if there is any. // // If the unbounded buffer is closed, the read channel returned by this method -// is closed. +// is closed after all data is drained. func (b *Unbounded) Get() <-chan any { return b.c } -// Close closes the unbounded buffer. +// Close closes the unbounded buffer. No subsequent data may be Put(), and the +// channel returned from Get() will be closed after all the data is read and +// Load() is called for the final time. func (b *Unbounded) Close() { b.mu.Lock() defer b.mu.Unlock() - if b.closed { + if b.closing { return } - b.closed = true - close(b.c) + b.closing = true + if len(b.backlog) == 0 { + b.closed = true + close(b.c) + } } diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index 5395e7752..fc094f344 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -31,6 +31,7 @@ import ( "time" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" ) const ( @@ -58,6 +59,12 @@ func TurnOn() { } } +func init() { + internal.ChannelzTurnOffForTesting = func() { + atomic.StoreInt32(&curState, 0) + } +} + // IsOn returns whether channelz data collection is on. func IsOn() bool { return atomic.LoadInt32(&curState) == 1 diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 3cf10ddfb..685a3cb41 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -36,9 +36,6 @@ var ( // "GRPC_RING_HASH_CAP". This does not override the default bounds // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) - // PickFirstLBConfig is set if we should support configuration of the - // pick_first LB policy. - PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", true) // LeastRequestLB is set if we should support the least_request_experimental // LB policy, which can be enabled by setting the environment variable // "GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST" to "true". diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go index 02b4b6a1c..29f234acb 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -50,46 +50,7 @@ var ( // // When both bootstrap FileName and FileContent are set, FileName is used. XDSBootstrapFileContent = os.Getenv(XDSBootstrapFileContentEnv) - // XDSRingHash indicates whether ring hash support is enabled, which can be - // disabled by setting the environment variable - // "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false". - XDSRingHash = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH", true) - // XDSClientSideSecurity is used to control processing of security - // configuration on the client-side. - // - // Note that there is no env var protection for the server-side because we - // have a brand new API on the server-side and users explicitly need to use - // the new API to get security integration on the server. - XDSClientSideSecurity = boolFromEnv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", true) - // XDSAggregateAndDNS indicates whether processing of aggregated cluster and - // DNS cluster is enabled, which can be disabled by setting the environment - // variable "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" - // to "false". - XDSAggregateAndDNS = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER", true) - - // XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled, - // which can be disabled by setting the environment variable - // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false". - XDSRBAC = boolFromEnv("GRPC_XDS_EXPERIMENTAL_RBAC", true) - // XDSOutlierDetection indicates whether outlier detection support is - // enabled, which can be disabled by setting the environment variable - // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "false". - XDSOutlierDetection = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION", true) - // XDSFederation indicates whether federation support is enabled, which can - // be enabled by setting the environment variable - // "GRPC_EXPERIMENTAL_XDS_FEDERATION" to "true". - XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", true) - - // XDSRLS indicates whether processing of Cluster Specifier plugins and - // support for the RLS CLuster Specifier is enabled, which can be disabled by - // setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to - // "false". - XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", true) // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI") - // XDSCustomLBPolicy indicates whether Custom LB Policies are enabled, which - // can be disabled by setting the environment variable - // "GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG" to "false". - XDSCustomLBPolicy = boolFromEnv("GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG", true) ) diff --git a/vendor/google.golang.org/grpc/internal/experimental.go b/vendor/google.golang.org/grpc/internal/experimental.go new file mode 100644 index 000000000..7f7044e17 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/experimental.go @@ -0,0 +1,28 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +var ( + // WithRecvBufferPool is implemented by the grpc package and returns a dial + // option to configure a shared buffer pool for a grpc.ClientConn. + WithRecvBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption + + // RecvBufferPool is implemented by the grpc package and returns a server + // option to configure a shared buffer pool for a grpc.Server. + RecvBufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption +) diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go index aa97273e7..0126d6b51 100644 --- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go +++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go @@ -1,3 +1,8 @@ +//go:build !go1.21 + +// TODO: when this file is deleted (after Go 1.20 support is dropped), delete +// all of grpcrand and call the rand package directly. + /* * * Copyright 2018 gRPC authors. diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand_go1.21.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand_go1.21.go new file mode 100644 index 000000000..c37299af1 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand_go1.21.go @@ -0,0 +1,73 @@ +//go:build go1.21 + +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcrand implements math/rand functions in a concurrent-safe way +// with a global random source, independent of math/rand's global source. +package grpcrand + +import "math/rand" + +// This implementation will be used for Go version 1.21 or newer. +// For older versions, the original implementation with mutex will be used. + +// Int implements rand.Int on the grpcrand global source. +func Int() int { + return rand.Int() +} + +// Int63n implements rand.Int63n on the grpcrand global source. +func Int63n(n int64) int64 { + return rand.Int63n(n) +} + +// Intn implements rand.Intn on the grpcrand global source. +func Intn(n int) int { + return rand.Intn(n) +} + +// Int31n implements rand.Int31n on the grpcrand global source. +func Int31n(n int32) int32 { + return rand.Int31n(n) +} + +// Float64 implements rand.Float64 on the grpcrand global source. +func Float64() float64 { + return rand.Float64() +} + +// Uint64 implements rand.Uint64 on the grpcrand global source. +func Uint64() uint64 { + return rand.Uint64() +} + +// Uint32 implements rand.Uint32 on the grpcrand global source. +func Uint32() uint32 { + return rand.Uint32() +} + +// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source. +func ExpFloat64() float64 { + return rand.ExpFloat64() +} + +// Shuffle implements rand.Shuffle on the grpcrand global source. +var Shuffle = func(n int, f func(int, int)) { + rand.Shuffle(n, f) +} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go index 900917dbe..f7f40a16a 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -20,7 +20,6 @@ package grpcsync import ( "context" - "sync" "google.golang.org/grpc/internal/buffer" ) @@ -38,8 +37,6 @@ type CallbackSerializer struct { done chan struct{} callbacks *buffer.Unbounded - closedMu sync.Mutex - closed bool } // NewCallbackSerializer returns a new CallbackSerializer instance. The provided @@ -65,56 +62,34 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { // callbacks to be executed by the serializer. It is not possible to add // callbacks once the context passed to NewCallbackSerializer is cancelled. func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { - cs.closedMu.Lock() - defer cs.closedMu.Unlock() - - if cs.closed { - return false - } - cs.callbacks.Put(f) - return true + return cs.callbacks.Put(f) == nil } func (cs *CallbackSerializer) run(ctx context.Context) { - var backlog []func(context.Context) - defer close(cs.done) + + // TODO: when Go 1.21 is the oldest supported version, this loop and Close + // can be replaced with: + // + // context.AfterFunc(ctx, cs.callbacks.Close) for ctx.Err() == nil { select { case <-ctx.Done(): // Do nothing here. Next iteration of the for loop will not happen, // since ctx.Err() would be non-nil. - case callback, ok := <-cs.callbacks.Get(): - if !ok { - return - } + case cb := <-cs.callbacks.Get(): cs.callbacks.Load() - callback.(func(ctx context.Context))(ctx) + cb.(func(context.Context))(ctx) } } - // Fetch pending callbacks if any, and execute them before returning from - // this method and closing cs.done. - cs.closedMu.Lock() - cs.closed = true - backlog = cs.fetchPendingCallbacks() + // Close the buffer to prevent new callbacks from being added. cs.callbacks.Close() - cs.closedMu.Unlock() - for _, b := range backlog { - b(ctx) - } -} -func (cs *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) { - var backlog []func(context.Context) - for { - select { - case b := <-cs.callbacks.Get(): - backlog = append(backlog, b.(func(context.Context))) - cs.callbacks.Load() - default: - return backlog - } + // Run all pending callbacks. + for cb := range cs.callbacks.Get() { + cs.callbacks.Load() + cb.(func(context.Context))(ctx) } } diff --git a/vendor/google.golang.org/grpc/internal/idle/idle.go b/vendor/google.golang.org/grpc/internal/idle/idle.go index 6c272476e..fe49cb74c 100644 --- a/vendor/google.golang.org/grpc/internal/idle/idle.go +++ b/vendor/google.golang.org/grpc/internal/idle/idle.go @@ -26,8 +26,6 @@ import ( "sync" "sync/atomic" "time" - - "google.golang.org/grpc/grpclog" ) // For overriding in unit tests. @@ -39,27 +37,12 @@ var timeAfterFunc = func(d time.Duration, f func()) *time.Timer { // and exit from idle mode. type Enforcer interface { ExitIdleMode() error - EnterIdleMode() error -} - -// Manager defines the functionality required to track RPC activity on a -// channel. -type Manager interface { - OnCallBegin() error - OnCallEnd() - Close() + EnterIdleMode() } -type noopManager struct{} - -func (noopManager) OnCallBegin() error { return nil } -func (noopManager) OnCallEnd() {} -func (noopManager) Close() {} - -// manager implements the Manager interface. It uses atomic operations to -// synchronize access to shared state and a mutex to guarantee mutual exclusion -// in a critical section. -type manager struct { +// Manager implements idleness detection and calls the configured Enforcer to +// enter/exit idle mode when appropriate. Must be created by NewManager. +type Manager struct { // State accessed atomically. lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed. activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there. @@ -69,8 +52,7 @@ type manager struct { // Can be accessed without atomics or mutex since these are set at creation // time and read-only after that. enforcer Enforcer // Functionality provided by grpc.ClientConn. - timeout int64 // Idle timeout duration nanos stored as an int64. - logger grpclog.LoggerV2 + timeout time.Duration // idleMu is used to guarantee mutual exclusion in two scenarios: // - Opposing intentions: @@ -88,57 +70,48 @@ type manager struct { timer *time.Timer } -// ManagerOptions is a collection of options used by -// NewManager. -type ManagerOptions struct { - Enforcer Enforcer - Timeout time.Duration - Logger grpclog.LoggerV2 +// NewManager creates a new idleness manager implementation for the +// given idle timeout. It begins in idle mode. +func NewManager(enforcer Enforcer, timeout time.Duration) *Manager { + return &Manager{ + enforcer: enforcer, + timeout: timeout, + actuallyIdle: true, + activeCallsCount: -math.MaxInt32, + } } -// NewManager creates a new idleness manager implementation for the -// given idle timeout. -func NewManager(opts ManagerOptions) Manager { - if opts.Timeout == 0 { - return noopManager{} +// resetIdleTimerLocked resets the idle timer to the given duration. Called +// when exiting idle mode or when the timer fires and we need to reset it. +func (m *Manager) resetIdleTimerLocked(d time.Duration) { + if m.isClosed() || m.timeout == 0 || m.actuallyIdle { + return } - m := &manager{ - enforcer: opts.Enforcer, - timeout: int64(opts.Timeout), - logger: opts.Logger, + // It is safe to ignore the return value from Reset() because this method is + // only ever called from the timer callback or when exiting idle mode. + if m.timer != nil { + m.timer.Stop() } - m.timer = timeAfterFunc(opts.Timeout, m.handleIdleTimeout) - return m + m.timer = timeAfterFunc(d, m.handleIdleTimeout) } -// resetIdleTimer resets the idle timer to the given duration. This method -// should only be called from the timer callback. -func (m *manager) resetIdleTimer(d time.Duration) { +func (m *Manager) resetIdleTimer(d time.Duration) { m.idleMu.Lock() defer m.idleMu.Unlock() - - if m.timer == nil { - // Only close sets timer to nil. We are done. - return - } - - // It is safe to ignore the return value from Reset() because this method is - // only ever called from the timer callback, which means the timer has - // already fired. - m.timer.Reset(d) + m.resetIdleTimerLocked(d) } // handleIdleTimeout is the timer callback that is invoked upon expiry of the // configured idle timeout. The channel is considered inactive if there are no // ongoing calls and no RPC activity since the last time the timer fired. -func (m *manager) handleIdleTimeout() { +func (m *Manager) handleIdleTimeout() { if m.isClosed() { return } if atomic.LoadInt32(&m.activeCallsCount) > 0 { - m.resetIdleTimer(time.Duration(m.timeout)) + m.resetIdleTimer(m.timeout) return } @@ -148,24 +121,12 @@ func (m *manager) handleIdleTimeout() { // Set the timer to fire after a duration of idle timeout, calculated // from the time the most recent RPC completed. atomic.StoreInt32(&m.activeSinceLastTimerCheck, 0) - m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime) + m.timeout - time.Now().UnixNano())) + m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime)-time.Now().UnixNano()) + m.timeout) return } - // This CAS operation is extremely likely to succeed given that there has - // been no activity since the last time we were here. Setting the - // activeCallsCount to -math.MaxInt32 indicates to OnCallBegin() that the - // channel is either in idle mode or is trying to get there. - if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) { - // This CAS operation can fail if an RPC started after we checked for - // activity at the top of this method, or one was ongoing from before - // the last time we were here. In both case, reset the timer and return. - m.resetIdleTimer(time.Duration(m.timeout)) - return - } - - // Now that we've set the active calls count to -math.MaxInt32, it's time to - // actually move to idle mode. + // Now that we've checked that there has been no activity, attempt to enter + // idle mode, which is very likely to succeed. if m.tryEnterIdleMode() { // Successfully entered idle mode. No timer needed until we exit idle. return @@ -174,8 +135,7 @@ func (m *manager) handleIdleTimeout() { // Failed to enter idle mode due to a concurrent RPC that kept the channel // active, or because of an error from the channel. Undo the attempt to // enter idle, and reset the timer to try again later. - atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) - m.resetIdleTimer(time.Duration(m.timeout)) + m.resetIdleTimer(m.timeout) } // tryEnterIdleMode instructs the channel to enter idle mode. But before @@ -185,36 +145,49 @@ func (m *manager) handleIdleTimeout() { // Return value indicates whether or not the channel moved to idle mode. // // Holds idleMu which ensures mutual exclusion with exitIdleMode. -func (m *manager) tryEnterIdleMode() bool { +func (m *Manager) tryEnterIdleMode() bool { + // Setting the activeCallsCount to -math.MaxInt32 indicates to OnCallBegin() + // that the channel is either in idle mode or is trying to get there. + if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) { + // This CAS operation can fail if an RPC started after we checked for + // activity in the timer handler, or one was ongoing from before the + // last time the timer fired, or if a test is attempting to enter idle + // mode without checking. In all cases, abort going into idle mode. + return false + } + // N.B. if we fail to enter idle mode after this, we must re-add + // math.MaxInt32 to m.activeCallsCount. + m.idleMu.Lock() defer m.idleMu.Unlock() if atomic.LoadInt32(&m.activeCallsCount) != -math.MaxInt32 { // We raced and lost to a new RPC. Very rare, but stop entering idle. + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) return false } if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { - // An very short RPC could have come in (and also finished) after we + // A very short RPC could have come in (and also finished) after we // checked for calls count and activity in handleIdleTimeout(), but // before the CAS operation. So, we need to check for activity again. + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) return false } - // No new RPCs have come in since we last set the active calls count value - // -math.MaxInt32 in the timer callback. And since we have the lock, it is - // safe to enter idle mode now. - if err := m.enforcer.EnterIdleMode(); err != nil { - m.logger.Errorf("Failed to enter idle mode: %v", err) - return false - } - - // Successfully entered idle mode. + // No new RPCs have come in since we set the active calls count value to + // -math.MaxInt32. And since we have the lock, it is safe to enter idle mode + // unconditionally now. + m.enforcer.EnterIdleMode() m.actuallyIdle = true return true } +func (m *Manager) EnterIdleModeForTesting() { + m.tryEnterIdleMode() +} + // OnCallBegin is invoked at the start of every RPC. -func (m *manager) OnCallBegin() error { +func (m *Manager) OnCallBegin() error { if m.isClosed() { return nil } @@ -227,7 +200,7 @@ func (m *manager) OnCallBegin() error { // Channel is either in idle mode or is in the process of moving to idle // mode. Attempt to exit idle mode to allow this RPC. - if err := m.exitIdleMode(); err != nil { + if err := m.ExitIdleMode(); err != nil { // Undo the increment to calls count, and return an error causing the // RPC to fail. atomic.AddInt32(&m.activeCallsCount, -1) @@ -238,28 +211,30 @@ func (m *manager) OnCallBegin() error { return nil } -// exitIdleMode instructs the channel to exit idle mode. -// -// Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. -func (m *manager) exitIdleMode() error { +// ExitIdleMode instructs m to call the enforcer's ExitIdleMode and update m's +// internal state. +func (m *Manager) ExitIdleMode() error { + // Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. m.idleMu.Lock() defer m.idleMu.Unlock() - if !m.actuallyIdle { - // This can happen in two scenarios: + if m.isClosed() || !m.actuallyIdle { + // This can happen in three scenarios: // - handleIdleTimeout() set the calls count to -math.MaxInt32 and called // tryEnterIdleMode(). But before the latter could grab the lock, an RPC // came in and OnCallBegin() noticed that the calls count is negative. // - Channel is in idle mode, and multiple new RPCs come in at the same // time, all of them notice a negative calls count in OnCallBegin and get // here. The first one to get the lock would got the channel to exit idle. + // - Channel is not in idle mode, and the user calls Connect which calls + // m.ExitIdleMode. // - // Either way, nothing to do here. + // In any case, there is nothing to do here. return nil } if err := m.enforcer.ExitIdleMode(); err != nil { - return fmt.Errorf("channel failed to exit idle mode: %v", err) + return fmt.Errorf("failed to exit idle mode: %w", err) } // Undo the idle entry process. This also respects any new RPC attempts. @@ -267,12 +242,12 @@ func (m *manager) exitIdleMode() error { m.actuallyIdle = false // Start a new timer to fire after the configured idle timeout. - m.timer = timeAfterFunc(time.Duration(m.timeout), m.handleIdleTimeout) + m.resetIdleTimerLocked(m.timeout) return nil } // OnCallEnd is invoked at the end of every RPC. -func (m *manager) OnCallEnd() { +func (m *Manager) OnCallEnd() { if m.isClosed() { return } @@ -287,15 +262,17 @@ func (m *manager) OnCallEnd() { atomic.AddInt32(&m.activeCallsCount, -1) } -func (m *manager) isClosed() bool { +func (m *Manager) isClosed() bool { return atomic.LoadInt32(&m.closed) == 1 } -func (m *manager) Close() { +func (m *Manager) Close() { atomic.StoreInt32(&m.closed, 1) m.idleMu.Lock() - m.timer.Stop() - m.timer = nil + if m.timer != nil { + m.timer.Stop() + m.timer = nil + } m.idleMu.Unlock() } diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index c8a8c76d6..6c7ea6a53 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -57,7 +57,7 @@ var ( // GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo // stored in the passed in attributes. This is set by // credentials/xds/xds.go. - GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *xds.HandshakeInfo + GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *unsafe.Pointer // GetServerCredentials returns the transport credentials configured on a // gRPC server. An xDS-enabled server needs to know what type of credentials // is configured on the underlying gRPC server. This is set by server.go. @@ -68,11 +68,11 @@ var ( // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. CanonicalString any // func (codes.Code) string - // DrainServerTransports initiates a graceful close of existing connections - // on a gRPC server accepted on the provided listener address. An - // xDS-enabled server invokes this method on a grpc.Server when a particular - // listener moves to "not-serving" mode. - DrainServerTransports any // func(*grpc.Server, string) + // IsRegisteredMethod returns whether the passed in method is registered as + // a method on the server. + IsRegisteredMethod any // func(*grpc.Server, string) bool + // ServerFromContext returns the server from the context. + ServerFromContext any // func(context.Context) *grpc.Server // AddGlobalServerOptions adds an array of ServerOption that will be // effective globally for newly created servers. The priority will be: 1. // user-provided; 2. this method; 3. default values. @@ -175,6 +175,27 @@ var ( // GRPCResolverSchemeExtraMetadata determines when gRPC will add extra // metadata to RPCs. GRPCResolverSchemeExtraMetadata string = "xds" + + // EnterIdleModeForTesting gets the ClientConn to enter IDLE mode. + EnterIdleModeForTesting any // func(*grpc.ClientConn) + + // ExitIdleModeForTesting gets the ClientConn to exit IDLE mode. + ExitIdleModeForTesting any // func(*grpc.ClientConn) error + + ChannelzTurnOffForTesting func() + + // TriggerXDSResourceNameNotFoundForTesting triggers the resource-not-found + // error for a given resource type and name. This is usually triggered when + // the associated watch timer fires. For testing purposes, having this + // function makes events more predictable than relying on timer events. + TriggerXDSResourceNameNotFoundForTesting any // func(func(xdsresource.Type, string), string, string) error + + // TriggerXDSResourceNotFoundClient invokes the testing xDS Client singleton + // to invoke resource not found for a resource type name and resource name. + TriggerXDSResourceNameNotFoundClient any // func(string, string) error + + // FromOutgoingContextRaw returns the un-merged, intermediary contents of metadata.rawMD. + FromOutgoingContextRaw any // func(context.Context) (metadata.MD, [][]string, bool) ) // HealthChecker defines the signature of the client-side LB channel health checking function. diff --git a/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/vendor/google.golang.org/grpc/internal/pretty/pretty.go index 703319137..52cfab1b9 100644 --- a/vendor/google.golang.org/grpc/internal/pretty/pretty.go +++ b/vendor/google.golang.org/grpc/internal/pretty/pretty.go @@ -24,7 +24,6 @@ import ( "encoding/json" "fmt" - "github.com/golang/protobuf/jsonpb" protov1 "github.com/golang/protobuf/proto" "google.golang.org/protobuf/encoding/protojson" protov2 "google.golang.org/protobuf/proto" @@ -38,15 +37,15 @@ const jsonIndent = " " func ToJSON(e any) string { switch ee := e.(type) { case protov1.Message: - mm := jsonpb.Marshaler{Indent: jsonIndent} - ret, err := mm.MarshalToString(ee) + mm := protojson.MarshalOptions{Indent: jsonIndent} + ret, err := mm.Marshal(protov1.MessageV2(ee)) if err != nil { // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 // messages are not imported, and this will fail because the message // is not found. return fmt.Sprintf("%+v", ee) } - return ret + return string(ret) case protov2.Message: mm := protojson.MarshalOptions{ Multiline: true, diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 99e1e5b36..b66dcb213 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -23,7 +23,6 @@ package dns import ( "context" "encoding/json" - "errors" "fmt" "net" "os" @@ -37,6 +36,7 @@ import ( "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/resolver/dns/internal" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) @@ -47,15 +47,11 @@ var EnableSRVLookups = false var logger = grpclog.Component("dns") -// Globals to stub out in tests. TODO: Perhaps these two can be combined into a -// single variable for testing the resolver? -var ( - newTimer = time.NewTimer - newTimerDNSResRate = time.NewTimer -) - func init() { resolver.Register(NewBuilder()) + internal.TimeAfterFunc = time.After + internal.NewNetResolver = newNetResolver + internal.AddressDialer = addressDialer } const ( @@ -70,23 +66,6 @@ const ( txtAttribute = "grpc_config=" ) -var ( - errMissingAddr = errors.New("dns resolver: missing address") - - // Addresses ending with a colon that is supposed to be the separator - // between host and port is not allowed. E.g. "::" is a valid address as - // it is an IPv6 address (host only) and "[::]:" is invalid as it ends with - // a colon as the host and port separator - errEndsWithColon = errors.New("dns resolver: missing port after port-separator colon") -) - -var ( - defaultResolver netResolver = net.DefaultResolver - // To prevent excessive re-resolution, we enforce a rate limit on DNS - // resolution requests. - minDNSResRate = 30 * time.Second -) - var addressDialer = func(address string) func(context.Context, string, string) (net.Conn, error) { return func(ctx context.Context, network, _ string) (net.Conn, error) { var dialer net.Dialer @@ -94,7 +73,11 @@ var addressDialer = func(address string) func(context.Context, string, string) ( } } -var newNetResolver = func(authority string) (netResolver, error) { +var newNetResolver = func(authority string) (internal.NetResolver, error) { + if authority == "" { + return net.DefaultResolver, nil + } + host, port, err := parseTarget(authority, defaultDNSSvrPort) if err != nil { return nil, err @@ -104,7 +87,7 @@ var newNetResolver = func(authority string) (netResolver, error) { return &net.Resolver{ PreferGo: true, - Dial: addressDialer(authorityWithPort), + Dial: internal.AddressDialer(authorityWithPort), }, nil } @@ -142,13 +125,9 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts disableServiceConfig: opts.DisableServiceConfig, } - if target.URL.Host == "" { - d.resolver = defaultResolver - } else { - d.resolver, err = newNetResolver(target.URL.Host) - if err != nil { - return nil, err - } + d.resolver, err = internal.NewNetResolver(target.URL.Host) + if err != nil { + return nil, err } d.wg.Add(1) @@ -161,12 +140,6 @@ func (b *dnsBuilder) Scheme() string { return "dns" } -type netResolver interface { - LookupHost(ctx context.Context, host string) (addrs []string, err error) - LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) - LookupTXT(ctx context.Context, name string) (txts []string, err error) -} - // deadResolver is a resolver that does nothing. type deadResolver struct{} @@ -178,7 +151,7 @@ func (deadResolver) Close() {} type dnsResolver struct { host string port string - resolver netResolver + resolver internal.NetResolver ctx context.Context cancel context.CancelFunc cc resolver.ClientConn @@ -223,29 +196,27 @@ func (d *dnsResolver) watcher() { err = d.cc.UpdateState(*state) } - var timer *time.Timer + var waitTime time.Duration if err == nil { // Success resolving, wait for the next ResolveNow. However, also wait 30 // seconds at the very least to prevent constantly re-resolving. backoffIndex = 1 - timer = newTimerDNSResRate(minDNSResRate) + waitTime = internal.MinResolutionRate select { case <-d.ctx.Done(): - timer.Stop() return case <-d.rn: } } else { // Poll on an error found in DNS Resolver or an error received from // ClientConn. - timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex)) + waitTime = backoff.DefaultExponential.Backoff(backoffIndex) backoffIndex++ } select { case <-d.ctx.Done(): - timer.Stop() return - case <-timer.C: + case <-internal.TimeAfterFunc(waitTime): } } } @@ -387,7 +358,7 @@ func formatIP(addr string) (addrIP string, ok bool) { // target: ":80" defaultPort: "443" returns host: "localhost", port: "80" func parseTarget(target, defaultPort string) (host, port string, err error) { if target == "" { - return "", "", errMissingAddr + return "", "", internal.ErrMissingAddr } if ip := net.ParseIP(target); ip != nil { // target is an IPv4 or IPv6(without brackets) address @@ -397,7 +368,7 @@ func parseTarget(target, defaultPort string) (host, port string, err error) { if port == "" { // If the port field is empty (target ends with colon), e.g. "[::1]:", // this is an error. - return "", "", errEndsWithColon + return "", "", internal.ErrEndsWithColon } // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port if host == "" { diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go new file mode 100644 index 000000000..c7fc557d0 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains functionality internal to the dns resolver package. +package internal + +import ( + "context" + "errors" + "net" + "time" +) + +// NetResolver groups the methods on net.Resolver that are used by the DNS +// resolver implementation. This allows the default net.Resolver instance to be +// overidden from tests. +type NetResolver interface { + LookupHost(ctx context.Context, host string) (addrs []string, err error) + LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) + LookupTXT(ctx context.Context, name string) (txts []string, err error) +} + +var ( + // ErrMissingAddr is the error returned when building a DNS resolver when + // the provided target name is empty. + ErrMissingAddr = errors.New("dns resolver: missing address") + + // ErrEndsWithColon is the error returned when building a DNS resolver when + // the provided target name ends with a colon that is supposed to be the + // separator between host and port. E.g. "::" is a valid address as it is + // an IPv6 address (host only) and "[::]:" is invalid as it ends with a + // colon as the host and port separator + ErrEndsWithColon = errors.New("dns resolver: missing port after port-separator colon") +) + +// The following vars are overridden from tests. +var ( + // MinResolutionRate is the minimum rate at which re-resolutions are + // allowed. This helps to prevent excessive re-resolution. + MinResolutionRate = 30 * time.Second + + // TimeAfterFunc is used by the DNS resolver to wait for the given duration + // to elapse. In non-test code, this is implemented by time.After. In test + // code, this can be used to control the amount of time the resolver is + // blocked waiting for the duration to elapse. + TimeAfterFunc func(time.Duration) <-chan time.Time + + // NewNetResolver returns the net.Resolver instance for the given target. + NewNetResolver func(string) (NetResolver, error) + + // AddressDialer is the dialer used to dial the DNS server. It accepts the + // Host portion of the URL corresponding to the user's dial target and + // returns a dial function. + AddressDialer func(address string) func(context.Context, string, string) (net.Conn, error) +) diff --git a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go index 160911687..27cd81af9 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go +++ b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go @@ -61,6 +61,10 @@ func (b *builder) Scheme() string { return b.scheme } +func (b *builder) OverrideAuthority(resolver.Target) string { + return "localhost" +} + type nopResolver struct { } diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index 4cf85cad9..c7dbc8205 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -31,10 +31,11 @@ import ( "errors" "fmt" - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/protoadapt" + "google.golang.org/protobuf/types/known/anypb" ) // Status represents an RPC status code, message, and details. It is immutable @@ -43,6 +44,34 @@ type Status struct { s *spb.Status } +// NewWithProto returns a new status including details from statusProto. This +// is meant to be used by the gRPC library only. +func NewWithProto(code codes.Code, message string, statusProto []string) *Status { + if len(statusProto) != 1 { + // No grpc-status-details bin header, or multiple; just ignore. + return &Status{s: &spb.Status{Code: int32(code), Message: message}} + } + st := &spb.Status{} + if err := proto.Unmarshal([]byte(statusProto[0]), st); err != nil { + // Probably not a google.rpc.Status proto; do not provide details. + return &Status{s: &spb.Status{Code: int32(code), Message: message}} + } + if st.Code == int32(code) { + // The codes match between the grpc-status header and the + // grpc-status-details-bin header; use the full details proto. + return &Status{s: st} + } + return &Status{ + s: &spb.Status{ + Code: int32(codes.Internal), + Message: fmt.Sprintf( + "grpc-status-details-bin mismatch: grpc-status=%v, grpc-message=%q, grpc-status-details-bin=%+v", + code, message, st, + ), + }, + } +} + // New returns a Status representing c and msg. func New(c codes.Code, msg string) *Status { return &Status{s: &spb.Status{Code: int32(c), Message: msg}} @@ -102,14 +131,14 @@ func (s *Status) Err() error { // WithDetails returns a new status with the provided details messages appended to the status. // If any errors are encountered, it returns nil and the first error encountered. -func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { +func (s *Status) WithDetails(details ...protoadapt.MessageV1) (*Status, error) { if s.Code() == codes.OK { return nil, errors.New("no error details for status with code OK") } // s.Code() != OK implies that s.Proto() != nil. p := s.Proto() for _, detail := range details { - any, err := ptypes.MarshalAny(detail) + any, err := anypb.New(protoadapt.MessageV2Of(detail)) if err != nil { return nil, err } @@ -126,12 +155,12 @@ func (s *Status) Details() []any { } details := make([]any, 0, len(s.s.Details)) for _, any := range s.s.Details { - detail := &ptypes.DynamicAny{} - if err := ptypes.UnmarshalAny(any, detail); err != nil { + detail, err := any.UnmarshalNew() + if err != nil { details = append(details, err) continue } - details = append(details, detail.Message) + details = append(details, detail) } return details } diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go new file mode 100644 index 000000000..4f347edd4 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go @@ -0,0 +1,29 @@ +//go:build !unix && !windows + +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "net" +) + +// NetDialerWithTCPKeepalive returns a vanilla net.Dialer on non-unix platforms. +func NetDialerWithTCPKeepalive() *net.Dialer { + return &net.Dialer{} +} diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go new file mode 100644 index 000000000..078137b7f --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go @@ -0,0 +1,54 @@ +//go:build unix + +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "net" + "syscall" + "time" + + "golang.org/x/sys/unix" +) + +// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on +// the underlying connection with OS default values for keepalive parameters. +// +// TODO: Once https://github.com/golang/go/issues/62254 lands, and the +// appropriate Go version becomes less than our least supported Go version, we +// should look into using the new API to make things more straightforward. +func NetDialerWithTCPKeepalive() *net.Dialer { + return &net.Dialer{ + // Setting a negative value here prevents the Go stdlib from overriding + // the values of TCP keepalive time and interval. It also prevents the + // Go stdlib from enabling TCP keepalives by default. + KeepAlive: time.Duration(-1), + // This method is called after the underlying network socket is created, + // but before dialing the socket (or calling its connect() method). The + // combination of unconditionally enabling TCP keepalives here, and + // disabling the overriding of TCP keepalive parameters by setting the + // KeepAlive field to a negative value above, results in OS defaults for + // the TCP keealive interval and time parameters. + Control: func(_, _ string, c syscall.RawConn) error { + return c.Control(func(fd uintptr) { + unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1) + }) + }, + } +} diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go new file mode 100644 index 000000000..fd7d43a89 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go @@ -0,0 +1,54 @@ +//go:build windows + +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "net" + "syscall" + "time" + + "golang.org/x/sys/windows" +) + +// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on +// the underlying connection with OS default values for keepalive parameters. +// +// TODO: Once https://github.com/golang/go/issues/62254 lands, and the +// appropriate Go version becomes less than our least supported Go version, we +// should look into using the new API to make things more straightforward. +func NetDialerWithTCPKeepalive() *net.Dialer { + return &net.Dialer{ + // Setting a negative value here prevents the Go stdlib from overriding + // the values of TCP keepalive time and interval. It also prevents the + // Go stdlib from enabling TCP keepalives by default. + KeepAlive: time.Duration(-1), + // This method is called after the underlying network socket is created, + // but before dialing the socket (or calling its connect() method). The + // combination of unconditionally enabling TCP keepalives here, and + // disabling the overriding of TCP keepalive parameters by setting the + // KeepAlive field to a negative value above, results in OS defaults for + // the TCP keealive interval and time parameters. + Control: func(_, _ string, c syscall.RawConn) error { + return c.Control(func(fd uintptr) { + windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_KEEPALIVE, 1) + }) + }, + } +} diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index b330ccedc..83c382982 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -535,8 +535,8 @@ const minBatchSize = 1000 // size is too low to give stream goroutines a chance to fill it up. // // Upon exiting, if the error causing the exit is not an I/O error, run() -// flushes and closes the underlying connection. Otherwise, the connection is -// left open to allow the I/O error to be encountered by the reader instead. +// flushes the underlying connection. The connection is always left open to +// allow different closing behavior on the client and server. func (l *loopyWriter) run() (err error) { defer func() { if l.logger.V(logLevel) { @@ -544,7 +544,6 @@ func (l *loopyWriter) run() (err error) { } if !isIOError(err) { l.framer.writer.Flush() - l.conn.Close() } l.cbuf.finish() }() diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 98f80e3fa..bd39ff9a2 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -35,7 +35,6 @@ import ( "sync" "time" - "github.com/golang/protobuf/proto" "golang.org/x/net/http2" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" @@ -45,6 +44,7 @@ import ( "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" ) // NewServerHandlerTransport returns a ServerTransport handling gRPC from @@ -75,11 +75,25 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s return nil, errors.New(msg) } + var localAddr net.Addr + if la := r.Context().Value(http.LocalAddrContextKey); la != nil { + localAddr, _ = la.(net.Addr) + } + var authInfo credentials.AuthInfo + if r.TLS != nil { + authInfo = credentials.TLSInfo{State: *r.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}} + } + p := peer.Peer{ + Addr: strAddr(r.RemoteAddr), + LocalAddr: localAddr, + AuthInfo: authInfo, + } st := &serverHandlerTransport{ rw: w, req: r, closedCh: make(chan struct{}), writes: make(chan func()), + peer: p, contentType: contentType, contentSubtype: contentSubtype, stats: stats, @@ -134,6 +148,8 @@ type serverHandlerTransport struct { headerMD metadata.MD + peer peer.Peer + closeOnce sync.Once closedCh chan struct{} // closed on Close @@ -165,7 +181,13 @@ func (ht *serverHandlerTransport) Close(err error) { }) } -func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } +func (ht *serverHandlerTransport) Peer() *peer.Peer { + return &peer.Peer{ + Addr: ht.peer.Addr, + LocalAddr: ht.peer.LocalAddr, + AuthInfo: ht.peer.AuthInfo, + } +} // strAddr is a net.Addr backed by either a TCP "ip:port" string, or // the empty string if unknown. @@ -220,18 +242,20 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro h.Set("Grpc-Message", encodeGrpcMessage(m)) } + s.hdrMu.Lock() if p := st.Proto(); p != nil && len(p.Details) > 0 { + delete(s.trailer, grpcStatusDetailsBinHeader) stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. panic(err) } - h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes)) + h.Set(grpcStatusDetailsBinHeader, encodeBinHeader(stBytes)) } - if md := s.Trailer(); len(md) > 0 { - for k, vv := range md { + if len(s.trailer) > 0 { + for k, vv := range s.trailer { // Clients don't tolerate reading restricted headers after some non restricted ones were sent. if isReservedHeader(k) { continue @@ -243,6 +267,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro } } } + s.hdrMu.Unlock() }) if err == nil { // transport has not been closed @@ -287,7 +312,7 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { } // writeCustomHeaders sets custom headers set on the stream via SetHeader -// on the first write call (Write, WriteHeader, or WriteStatus). +// on the first write call (Write, WriteHeader, or WriteStatus) func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { h := ht.rw.Header() @@ -344,10 +369,8 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { return err } -func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) { +func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*Stream)) { // With this transport type there will be exactly 1 stream: this HTTP request. - - ctx := ht.req.Context() var cancel context.CancelFunc if ht.timeoutSet { ctx, cancel = context.WithTimeout(ctx, ht.timeout) @@ -367,34 +390,19 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace ht.Close(errors.New("request is done processing")) }() + ctx = metadata.NewIncomingContext(ctx, ht.headerMD) req := ht.req - s := &Stream{ - id: 0, // irrelevant - requestRead: func(int) {}, - cancel: cancel, - buf: newRecvBuffer(), - st: ht, - method: req.URL.Path, - recvCompress: req.Header.Get("grpc-encoding"), - contentSubtype: ht.contentSubtype, - } - pr := &peer.Peer{ - Addr: ht.RemoteAddr(), - } - if req.TLS != nil { - pr.AuthInfo = credentials.TLSInfo{State: *req.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}} - } - ctx = metadata.NewIncomingContext(ctx, ht.headerMD) - s.ctx = peer.NewContext(ctx, pr) - for _, sh := range ht.stats { - s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) - inHeader := &stats.InHeader{ - FullMethod: s.method, - RemoteAddr: ht.RemoteAddr(), - Compression: s.recvCompress, - } - sh.HandleRPC(s.ctx, inHeader) + id: 0, // irrelevant + ctx: ctx, + requestRead: func(int) {}, + cancel: cancel, + buf: newRecvBuffer(), + st: ht, + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), + contentSubtype: ht.contentSubtype, + headerWireLength: 0, // won't have access to header wire length until golang/go#18997. } s.trReader = &transportReader{ reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index badab8acf..eff879964 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -36,6 +36,7 @@ import ( "golang.org/x/net/http2/hpack" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/channelz" icredentials "google.golang.org/grpc/internal/credentials" "google.golang.org/grpc/internal/grpclog" @@ -43,7 +44,7 @@ import ( "google.golang.org/grpc/internal/grpcutil" imetadata "google.golang.org/grpc/internal/metadata" istatus "google.golang.org/grpc/internal/status" - "google.golang.org/grpc/internal/syscall" + isyscall "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/internal/transport/networktype" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" @@ -58,6 +59,8 @@ import ( // atomically. var clientConnectionCounter uint64 +var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool)) + // http2Client implements the ClientTransport interface with HTTP2. type http2Client struct { lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. @@ -176,7 +179,7 @@ func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error if networkType == "tcp" && useProxy { return proxyDial(ctx, address, grpcUA) } - return (&net.Dialer{}).DialContext(ctx, networkType, address) + return internal.NetDialerWithTCPKeepalive().DialContext(ctx, networkType, address) } func isTemporary(err error) bool { @@ -262,7 +265,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } keepaliveEnabled := false if kp.Time != infinity { - if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + if err = isyscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) } keepaliveEnabled = true @@ -448,7 +451,13 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } go func() { t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) - t.loopy.run() + if err := t.loopy.run(); !isIOError(err) { + // Immediately close the connection, as the loopy writer returns + // when there are no more active streams and we were draining (the + // server sent a GOAWAY). For I/O errors, the reader will hit it + // after draining any remaining incoming data. + t.conn.Close() + } close(t.writerDone) }() return t, nil @@ -493,8 +502,9 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { func (t *http2Client) getPeer() *peer.Peer { return &peer.Peer{ - Addr: t.remoteAddr, - AuthInfo: t.authInfo, // Can be nil + Addr: t.remoteAddr, + AuthInfo: t.authInfo, // Can be nil + LocalAddr: t.localAddr, } } @@ -566,7 +576,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) } - if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { + if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok { var k string for k, vv := range md { // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. @@ -1321,10 +1331,8 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { for streamID, stream := range t.activeStreams { if streamID > id && streamID <= upperLimit { // The stream was unprocessed by the server. - if streamID > id && streamID <= upperLimit { - atomic.StoreUint32(&stream.unprocessed, 1) - streamsToClose = append(streamsToClose, stream) - } + atomic.StoreUint32(&stream.unprocessed, 1) + streamsToClose = append(streamsToClose, stream) } } t.mu.Unlock() @@ -1399,7 +1407,6 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { mdata = make(map[string][]string) contentTypeErr = "malformed header: missing HTTP content-type" grpcMessage string - statusGen *status.Status recvCompress string httpStatusCode *int httpStatusErr string @@ -1434,12 +1441,6 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { rawStatusCode = codes.Code(uint32(code)) case "grpc-message": grpcMessage = decodeGrpcMessage(hf.Value) - case "grpc-status-details-bin": - var err error - statusGen, err = decodeGRPCStatusDetails(hf.Value) - if err != nil { - headerError = fmt.Sprintf("transport: malformed grpc-status-details-bin: %v", err) - } case ":status": if hf.Value == "200" { httpStatusErr = "" @@ -1548,14 +1549,12 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } - if statusGen == nil { - statusGen = status.New(rawStatusCode, grpcMessage) - } + status := istatus.NewWithProto(rawStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader]) // If client received END_STREAM from server while stream was still active, // send RST_STREAM. rstStream := s.getState() == streamActive - t.closeStream(s, io.EOF, rstStream, http2.ErrCodeNo, statusGen, mdata, true) + t.closeStream(s, io.EOF, rstStream, http2.ErrCodeNo, status, mdata, true) } // readServerPreface reads and handles the initial settings frame from the diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index c06db679d..3839c1ade 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -32,13 +32,13 @@ import ( "sync/atomic" "time" - "github.com/golang/protobuf/proto" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/syscall" + "google.golang.org/protobuf/proto" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" @@ -68,18 +68,15 @@ var serverConnectionCounter uint64 // http2Server implements the ServerTransport interface with HTTP2. type http2Server struct { - lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. - ctx context.Context - done chan struct{} - conn net.Conn - loopy *loopyWriter - readerDone chan struct{} // sync point to enable testing. - writerDone chan struct{} // sync point to enable testing. - remoteAddr net.Addr - localAddr net.Addr - authInfo credentials.AuthInfo // auth info about the connection - inTapHandle tap.ServerInHandle - framer *framer + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + done chan struct{} + conn net.Conn + loopy *loopyWriter + readerDone chan struct{} // sync point to enable testing. + loopyWriterDone chan struct{} + peer peer.Peer + inTapHandle tap.ServerInHandle + framer *framer // The max number of concurrent streams. maxStreams uint32 // controlBuf delivers all the control related tasks (e.g., window @@ -243,16 +240,18 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, } done := make(chan struct{}) + peer := peer.Peer{ + Addr: conn.RemoteAddr(), + LocalAddr: conn.LocalAddr(), + AuthInfo: authInfo, + } t := &http2Server{ - ctx: setConnection(context.Background(), rawConn), done: done, conn: conn, - remoteAddr: conn.RemoteAddr(), - localAddr: conn.LocalAddr(), - authInfo: authInfo, + peer: peer, framer: framer, readerDone: make(chan struct{}), - writerDone: make(chan struct{}), + loopyWriterDone: make(chan struct{}), maxStreams: config.MaxStreams, inTapHandle: config.InTapHandle, fc: &trInFlow{limit: uint32(icwz)}, @@ -267,8 +266,6 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, bufferPool: newBufferPool(), } t.logger = prefixLoggerForServerTransport(t) - // Add peer information to the http2server context. - t.ctx = peer.NewContext(t.ctx, t.getPeer()) t.controlBuf = newControlBuffer(t.done) if dynamicWindow { @@ -277,15 +274,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, updateFlowControl: t.updateFlowControl, } } - for _, sh := range t.stats { - t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - }) - connBegin := &stats.ConnBegin{} - sh.HandleConn(t.ctx, connBegin) - } - t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.peer.Addr, t.peer.LocalAddr)) if err != nil { return nil, err } @@ -333,8 +322,24 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, go func() { t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler - t.loopy.run() - close(t.writerDone) + err := t.loopy.run() + close(t.loopyWriterDone) + if !isIOError(err) { + // Close the connection if a non-I/O error occurs (for I/O errors + // the reader will also encounter the error and close). Wait 1 + // second before closing the connection, or when the reader is done + // (i.e. the client already closed the connection or a connection + // error occurred). This avoids the potential problem where there + // is unread data on the receive side of the connection, which, if + // closed, would lead to a TCP RST instead of FIN, and the client + // encountering errors. For more info: + // https://github.com/grpc/grpc-go/issues/5358 + select { + case <-t.readerDone: + case <-time.After(time.Second): + } + t.conn.Close() + } }() go t.keepalive() return t, nil @@ -342,7 +347,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, // operateHeaders takes action on the decoded headers. Returns an error if fatal // error encountered and transport needs to close, otherwise returns nil. -func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) error { +func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*Stream)) error { // Acquire max stream ID lock for entire duration t.maxStreamMu.Lock() defer t.maxStreamMu.Unlock() @@ -369,10 +374,11 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( buf := newRecvBuffer() s := &Stream{ - id: streamID, - st: t, - buf: buf, - fc: &inFlow{limit: uint32(t.initialWindowSize)}, + id: streamID, + st: t, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + headerWireLength: int(frame.Header().Length), } var ( // if false, content-type was missing or invalid @@ -511,9 +517,9 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( s.state = streamReadDone } if timeoutSet { - s.ctx, s.cancel = context.WithTimeout(t.ctx, timeout) + s.ctx, s.cancel = context.WithTimeout(ctx, timeout) } else { - s.ctx, s.cancel = context.WithCancel(t.ctx) + s.ctx, s.cancel = context.WithCancel(ctx) } // Attach the received metadata to the context. @@ -561,7 +567,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } if t.inTapHandle != nil { var err error - if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil { + if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method, Header: mdata}); err != nil { t.mu.Unlock() if t.logger.V(logLevel) { t.logger.Infof("Aborting the stream early due to InTapHandle failure: %v", err) @@ -592,19 +598,6 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( s.requestRead = func(n int) { t.adjustWindow(s, uint32(n)) } - s.ctx = traceCtx(s.ctx, s.method) - for _, sh := range t.stats { - s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) - inHeader := &stats.InHeader{ - FullMethod: s.method, - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - Compression: s.recvCompress, - WireLength: int(frame.Header().Length), - Header: mdata.Copy(), - } - sh.HandleRPC(s.ctx, inHeader) - } s.ctxDone = s.ctx.Done() s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) s.trReader = &transportReader{ @@ -630,8 +623,11 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( // HandleStreams receives incoming streams using the given handler. This is // typically run in a separate goroutine. // traceCtx attaches trace to ctx and returns the new context. -func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) { - defer close(t.readerDone) +func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) { + defer func() { + close(t.readerDone) + <-t.loopyWriterDone + }() for { t.controlBuf.throttle() frame, err := t.framer.fr.ReadFrame() @@ -656,16 +652,12 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. } continue } - if err == io.EOF || err == io.ErrUnexpectedEOF { - t.Close(err) - return - } t.Close(err) return } switch frame := frame.(type) { case *http2.MetaHeadersFrame: - if err := t.operateHeaders(frame, handle, traceCtx); err != nil { + if err := t.operateHeaders(ctx, frame, handle); err != nil { t.Close(err) break } @@ -980,7 +972,12 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { } } if err := t.writeHeaderLocked(s); err != nil { - return status.Convert(err).Err() + switch e := err.(type) { + case ConnectionError: + return status.Error(codes.Unavailable, e.Desc) + default: + return status.Convert(err).Err() + } } return nil } @@ -1053,12 +1050,15 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) if p := st.Proto(); p != nil && len(p.Details) > 0 { + // Do not use the user's grpc-status-details-bin (if present) if we are + // even attempting to set our own. + delete(s.trailer, grpcStatusDetailsBinHeader) stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. t.logger.Errorf("Failed to marshal rpc status: %s, error: %v", pretty.ToJSON(p), err) } else { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) + headerFields = append(headerFields, hpack.HeaderField{Name: grpcStatusDetailsBinHeader, Value: encodeBinHeader(stBytes)}) } } @@ -1240,10 +1240,6 @@ func (t *http2Server) Close(err error) { for _, s := range streams { s.cancel() } - for _, sh := range t.stats { - connEnd := &stats.ConnEnd{} - sh.HandleConn(t.ctx, connEnd) - } } // deleteStream deletes the stream s from transport's active streams. @@ -1309,10 +1305,6 @@ func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eo }) } -func (t *http2Server) RemoteAddr() net.Addr { - return t.remoteAddr -} - func (t *http2Server) Drain(debugData string) { t.mu.Lock() defer t.mu.Unlock() @@ -1349,6 +1341,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil { return false, err } + t.framer.writer.Flush() if retErr != nil { return false, retErr } @@ -1369,7 +1362,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { return false, err } go func() { - timer := time.NewTimer(time.Minute) + timer := time.NewTimer(5 * time.Second) defer timer.Stop() select { case <-t.drainEvent.Done(): @@ -1395,11 +1388,11 @@ func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric { LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), LocalFlowControlWindow: int64(t.fc.getSize()), SocketOptions: channelz.GetSocketOption(t.conn), - LocalAddr: t.localAddr, - RemoteAddr: t.remoteAddr, + LocalAddr: t.peer.LocalAddr, + RemoteAddr: t.peer.Addr, // RemoteName : } - if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { + if au, ok := t.peer.AuthInfo.(credentials.ChannelzSecurityInfo); ok { s.Security = au.GetSecurityValue() } s.RemoteFlowControlWindow = t.getOutFlowWindow() @@ -1431,10 +1424,12 @@ func (t *http2Server) getOutFlowWindow() int64 { } } -func (t *http2Server) getPeer() *peer.Peer { +// Peer returns the peer of the transport. +func (t *http2Server) Peer() *peer.Peer { return &peer.Peer{ - Addr: t.remoteAddr, - AuthInfo: t.authInfo, // Can be nil + Addr: t.peer.Addr, + LocalAddr: t.peer.LocalAddr, + AuthInfo: t.peer.AuthInfo, // Can be nil } } @@ -1459,6 +1454,6 @@ func GetConnection(ctx context.Context) net.Conn { // SetConnection adds the connection to the context to be able to get // information about the destination ip and port for an incoming RPC. This also // allows any unary or streaming interceptors to see the connection. -func setConnection(ctx context.Context, conn net.Conn) context.Context { +func SetConnection(ctx context.Context, conn net.Conn) context.Context { return context.WithValue(ctx, connectionKey{}, conn) } diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index 195814008..dc29d590e 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -34,12 +34,9 @@ import ( "time" "unicode/utf8" - "github.com/golang/protobuf/proto" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" - spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" ) const ( @@ -88,6 +85,8 @@ var ( } ) +var grpcStatusDetailsBinHeader = "grpc-status-details-bin" + // isReservedHeader checks whether hdr belongs to HTTP2 headers // reserved by gRPC protocol. Any other headers are classified as the // user-specified metadata. @@ -103,7 +102,6 @@ func isReservedHeader(hdr string) bool { "grpc-message", "grpc-status", "grpc-timeout", - "grpc-status-details-bin", // Intentionally exclude grpc-previous-rpc-attempts and // grpc-retry-pushback-ms, which are "reserved", but their API // intentionally works via metadata. @@ -154,18 +152,6 @@ func decodeMetadataHeader(k, v string) (string, error) { return v, nil } -func decodeGRPCStatusDetails(rawDetails string) (*status.Status, error) { - v, err := decodeBinHeader(rawDetails) - if err != nil { - return nil, err - } - st := &spb.Status{} - if err = proto.Unmarshal(v, st); err != nil { - return nil, err - } - return status.FromProto(st), nil -} - type timeoutUnit uint8 const ( diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go index 415961987..24fa10325 100644 --- a/vendor/google.golang.org/grpc/internal/transport/proxy.go +++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go @@ -28,6 +28,8 @@ import ( "net/http" "net/http/httputil" "net/url" + + "google.golang.org/grpc/internal" ) const proxyAuthHeaderKey = "Proxy-Authorization" @@ -112,7 +114,7 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri // proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy // is necessary, dials, does the HTTP CONNECT handshake, and returns the // connection. -func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, err error) { +func proxyDial(ctx context.Context, addr string, grpcUA string) (net.Conn, error) { newAddr := addr proxyURL, err := mapAddress(addr) if err != nil { @@ -122,15 +124,15 @@ func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, newAddr = proxyURL.Host } - conn, err = (&net.Dialer{}).DialContext(ctx, "tcp", newAddr) + conn, err := internal.NetDialerWithTCPKeepalive().DialContext(ctx, "tcp", newAddr) if err != nil { - return + return nil, err } - if proxyURL != nil { + if proxyURL == nil { // proxy is disabled if proxyURL is nil. - conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA) + return conn, err } - return + return doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA) } func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 74a811fc0..d3796c256 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -28,6 +28,7 @@ import ( "fmt" "io" "net" + "strings" "sync" "sync/atomic" "time" @@ -37,6 +38,7 @@ import ( "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" @@ -265,7 +267,8 @@ type Stream struct { // headerValid indicates whether a valid header was received. Only // meaningful after headerChan is closed (always call waitOnHeader() before // reading its value). Not valid on server side. - headerValid bool + headerValid bool + headerWireLength int // Only set on server side. // hdrMu protects header and trailer metadata on the server-side. hdrMu sync.Mutex @@ -360,8 +363,12 @@ func (s *Stream) SendCompress() string { // ClientAdvertisedCompressors returns the compressor names advertised by the // client via grpc-accept-encoding header. -func (s *Stream) ClientAdvertisedCompressors() string { - return s.clientAdvertisedCompressors +func (s *Stream) ClientAdvertisedCompressors() []string { + values := strings.Split(s.clientAdvertisedCompressors, ",") + for i, v := range values { + values[i] = strings.TrimSpace(v) + } + return values } // Done returns a channel which is closed when it receives the final status @@ -425,6 +432,12 @@ func (s *Stream) Context() context.Context { return s.ctx } +// SetContext sets the context of the stream. This will be deleted once the +// stats handler callouts all move to gRPC layer. +func (s *Stream) SetContext(ctx context.Context) { + s.ctx = ctx +} + // Method returns the method for the stream. func (s *Stream) Method() string { return s.method @@ -437,6 +450,12 @@ func (s *Stream) Status() *status.Status { return s.status } +// HeaderWireLength returns the size of the headers of the stream as received +// from the wire. Valid only on the server. +func (s *Stream) HeaderWireLength() int { + return s.headerWireLength +} + // SetHeader sets the header metadata. This can be called multiple times. // Server side only. // This should not be called in parallel to other data writes. @@ -698,7 +717,7 @@ type ClientTransport interface { // Write methods for a given Stream will be called serially. type ServerTransport interface { // HandleStreams receives incoming streams using the given handler. - HandleStreams(func(*Stream), func(context.Context, string) context.Context) + HandleStreams(context.Context, func(*Stream)) // WriteHeader sends the header metadata for the given stream. // WriteHeader may not be called on all streams. @@ -717,8 +736,8 @@ type ServerTransport interface { // handlers will be terminated asynchronously. Close(err error) - // RemoteAddr returns the remote network address. - RemoteAddr() net.Addr + // Peer returns the peer of the server transport. + Peer() *peer.Peer // Drain notifies the client this ServerTransport stops accepting new RPCs. Drain(debugData string) diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index a2cdcaf12..1e9485fd6 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -25,8 +25,14 @@ import ( "context" "fmt" "strings" + + "google.golang.org/grpc/internal" ) +func init() { + internal.FromOutgoingContextRaw = fromOutgoingContextRaw +} + // DecodeKeyValue returns k, v, nil. // // Deprecated: use k and v directly instead. @@ -153,14 +159,16 @@ func Join(mds ...MD) MD { type mdIncomingKey struct{} type mdOutgoingKey struct{} -// NewIncomingContext creates a new context with incoming md attached. +// NewIncomingContext creates a new context with incoming md attached. md must +// not be modified after calling this function. func NewIncomingContext(ctx context.Context, md MD) context.Context { return context.WithValue(ctx, mdIncomingKey{}, md) } // NewOutgoingContext creates a new context with outgoing md attached. If used // in conjunction with AppendToOutgoingContext, NewOutgoingContext will -// overwrite any previously-appended metadata. +// overwrite any previously-appended metadata. md must not be modified after +// calling this function. func NewOutgoingContext(ctx context.Context, md MD) context.Context { return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md}) } @@ -203,7 +211,8 @@ func FromIncomingContext(ctx context.Context) (MD, bool) { } // ValueFromIncomingContext returns the metadata value corresponding to the metadata -// key from the incoming metadata if it exists. Key must be lower-case. +// key from the incoming metadata if it exists. Keys are matched in a case insensitive +// manner. // // # Experimental // @@ -219,33 +228,29 @@ func ValueFromIncomingContext(ctx context.Context, key string) []string { return copyOf(v) } for k, v := range md { - // We need to manually convert all keys to lower case, because MD is a - // map, and there's no guarantee that the MD attached to the context is - // created using our helper functions. - if strings.ToLower(k) == key { + // Case insenitive comparison: MD is a map, and there's no guarantee + // that the MD attached to the context is created using our helper + // functions. + if strings.EqualFold(k, key) { return copyOf(v) } } return nil } -// the returned slice must not be modified in place func copyOf(v []string) []string { vals := make([]string, len(v)) copy(vals, v) return vals } -// FromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD. +// fromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD. // // Remember to perform strings.ToLower on the keys, for both the returned MD (MD // is a map, there's no guarantee it's created using our helper functions) and // the extra kv pairs (AppendToOutgoingContext doesn't turn them into // lowercase). -// -// This is intended for gRPC-internal use ONLY. Users should use -// FromOutgoingContext instead. -func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { +func fromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) if !ok { return nil, nil, false diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go index e01d219ff..a821ff9b2 100644 --- a/vendor/google.golang.org/grpc/peer/peer.go +++ b/vendor/google.golang.org/grpc/peer/peer.go @@ -32,6 +32,8 @@ import ( type Peer struct { // Addr is the peer address. Addr net.Addr + // LocalAddr is the local address. + LocalAddr net.Addr // AuthInfo is the authentication information of the transport. // It is nil if there is no transport security being used. AuthInfo credentials.AuthInfo diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index 236837f41..bf56faa76 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -37,7 +37,6 @@ import ( type pickerWrapper struct { mu sync.Mutex done bool - idle bool blockingCh chan struct{} picker balancer.Picker statsHandlers []stats.Handler // to record blocking picker calls @@ -53,11 +52,7 @@ func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper { // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. func (pw *pickerWrapper) updatePicker(p balancer.Picker) { pw.mu.Lock() - if pw.done || pw.idle { - // There is a small window where a picker update from the LB policy can - // race with the channel going to idle mode. If the picker is idle here, - // it is because the channel asked it to do so, and therefore it is sage - // to ignore the update from the LB policy. + if pw.done { pw.mu.Unlock() return } @@ -210,23 +205,15 @@ func (pw *pickerWrapper) close() { close(pw.blockingCh) } -func (pw *pickerWrapper) enterIdleMode() { - pw.mu.Lock() - defer pw.mu.Unlock() - if pw.done { - return - } - pw.idle = true -} - -func (pw *pickerWrapper) exitIdleMode() { +// reset clears the pickerWrapper and prepares it for being used again when idle +// mode is exited. +func (pw *pickerWrapper) reset() { pw.mu.Lock() defer pw.mu.Unlock() if pw.done { return } pw.blockingCh = make(chan struct{}) - pw.idle = false } // dropError is a wrapper error that indicates the LB policy wishes to drop the diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index 2e9cf66b4..5128f9364 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -25,7 +25,6 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/internal/envconfig" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/pretty" @@ -65,19 +64,6 @@ type pfConfig struct { } func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - if !envconfig.PickFirstLBConfig { - // Prior to supporting loadbalancing configuration, the pick_first LB - // policy did not implement the balancer.ConfigParser interface. This - // meant that if a non-empty configuration was passed to it, the service - // config unmarshaling code would throw a warning log, but would - // continue using the pick_first LB policy. The code below ensures the - // same behavior is retained if the env var is not set. - if string(js) != "{}" { - logger.Warningf("Ignoring non-empty balancer configuration %q for the pick_first LB policy", string(js)) - } - return nil, nil - } - var cfg pfConfig if err := json.Unmarshal(js, &cfg); err != nil { return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go new file mode 100644 index 000000000..14aa6f20a --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go @@ -0,0 +1,36 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package dns implements a dns resolver to be installed as the default resolver +// in grpc. +// +// Deprecated: this package is imported by grpc and should not need to be +// imported directly by users. +package dns + +import ( + "google.golang.org/grpc/internal/resolver/dns" + "google.golang.org/grpc/resolver" +) + +// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. +// +// Deprecated: import grpc and use resolver.Get("dns") instead. +func NewBuilder() resolver.Builder { + return dns.NewBuilder() +} diff --git a/vendor/google.golang.org/grpc/resolver/manual/manual.go b/vendor/google.golang.org/grpc/resolver/manual/manual.go index e6b0f14cd..f2efa2a2c 100644 --- a/vendor/google.golang.org/grpc/resolver/manual/manual.go +++ b/vendor/google.golang.org/grpc/resolver/manual/manual.go @@ -26,7 +26,9 @@ import ( "google.golang.org/grpc/resolver" ) -// NewBuilderWithScheme creates a new test resolver builder with the given scheme. +// NewBuilderWithScheme creates a new manual resolver builder with the given +// scheme. Every instance of the manual resolver may only ever be used with a +// single grpc.ClientConn. Otherwise, bad things will happen. func NewBuilderWithScheme(scheme string) *Resolver { return &Resolver{ BuildCallback: func(resolver.Target, resolver.ClientConn, resolver.BuildOptions) {}, @@ -58,30 +60,34 @@ type Resolver struct { scheme string // Fields actually belong to the resolver. - mu sync.Mutex // Guards access to CC. - CC resolver.ClientConn - bootstrapState *resolver.State + // Guards access to below fields. + mu sync.Mutex + CC resolver.ClientConn + // Storing the most recent state update makes this resolver resilient to + // restarts, which is possible with channel idleness. + lastSeenState *resolver.State } // InitialState adds initial state to the resolver so that UpdateState doesn't // need to be explicitly called after Dial. func (r *Resolver) InitialState(s resolver.State) { - r.bootstrapState = &s + r.lastSeenState = &s } // Build returns itself for Resolver, because it's both a builder and a resolver. func (r *Resolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + r.BuildCallback(target, cc, opts) r.mu.Lock() + defer r.mu.Unlock() r.CC = cc - r.mu.Unlock() - r.BuildCallback(target, cc, opts) - if r.bootstrapState != nil { - r.UpdateState(*r.bootstrapState) + if r.lastSeenState != nil { + err := r.CC.UpdateState(*r.lastSeenState) + go r.UpdateStateCallback(err) } return r, nil } -// Scheme returns the test scheme. +// Scheme returns the manual resolver's scheme. func (r *Resolver) Scheme() string { return r.scheme } @@ -99,14 +105,22 @@ func (r *Resolver) Close() { // UpdateState calls CC.UpdateState. func (r *Resolver) UpdateState(s resolver.State) { r.mu.Lock() - err := r.CC.UpdateState(s) - r.mu.Unlock() + defer r.mu.Unlock() + var err error + if r.CC == nil { + panic("cannot update state as grpc.Dial with resolver has not been called") + } + err = r.CC.UpdateState(s) + r.lastSeenState = &s r.UpdateStateCallback(err) } // ReportError calls CC.ReportError. func (r *Resolver) ReportError(err error) { r.mu.Lock() + defer r.mu.Unlock() + if r.CC == nil { + panic("cannot report error as grpc.Dial with resolver has not been called") + } r.CC.ReportError(err) - r.mu.Unlock() } diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go index 804be887d..ada5b9bb7 100644 --- a/vendor/google.golang.org/grpc/resolver/map.go +++ b/vendor/google.golang.org/grpc/resolver/map.go @@ -136,3 +136,116 @@ func (a *AddressMap) Values() []any { } return ret } + +type endpointNode struct { + addrs map[string]struct{} +} + +// Equal returns whether the unordered set of addrs are the same between the +// endpoint nodes. +func (en *endpointNode) Equal(en2 *endpointNode) bool { + if len(en.addrs) != len(en2.addrs) { + return false + } + for addr := range en.addrs { + if _, ok := en2.addrs[addr]; !ok { + return false + } + } + return true +} + +func toEndpointNode(endpoint Endpoint) endpointNode { + en := make(map[string]struct{}) + for _, addr := range endpoint.Addresses { + en[addr.Addr] = struct{}{} + } + return endpointNode{ + addrs: en, + } +} + +// EndpointMap is a map of endpoints to arbitrary values keyed on only the +// unordered set of address strings within an endpoint. This map is not thread +// safe, thus it is unsafe to access concurrently. Must be created via +// NewEndpointMap; do not construct directly. +type EndpointMap struct { + endpoints map[*endpointNode]any +} + +// NewEndpointMap creates a new EndpointMap. +func NewEndpointMap() *EndpointMap { + return &EndpointMap{ + endpoints: make(map[*endpointNode]any), + } +} + +// Get returns the value for the address in the map, if present. +func (em *EndpointMap) Get(e Endpoint) (value any, ok bool) { + en := toEndpointNode(e) + if endpoint := em.find(en); endpoint != nil { + return em.endpoints[endpoint], true + } + return nil, false +} + +// Set updates or adds the value to the address in the map. +func (em *EndpointMap) Set(e Endpoint, value any) { + en := toEndpointNode(e) + if endpoint := em.find(en); endpoint != nil { + em.endpoints[endpoint] = value + return + } + em.endpoints[&en] = value +} + +// Len returns the number of entries in the map. +func (em *EndpointMap) Len() int { + return len(em.endpoints) +} + +// Keys returns a slice of all current map keys, as endpoints specifying the +// addresses present in the endpoint keys, in which uniqueness is determined by +// the unordered set of addresses. Thus, endpoint information returned is not +// the full endpoint data (drops duplicated addresses and attributes) but can be +// used for EndpointMap accesses. +func (em *EndpointMap) Keys() []Endpoint { + ret := make([]Endpoint, 0, len(em.endpoints)) + for en := range em.endpoints { + var endpoint Endpoint + for addr := range en.addrs { + endpoint.Addresses = append(endpoint.Addresses, Address{Addr: addr}) + } + ret = append(ret, endpoint) + } + return ret +} + +// Values returns a slice of all current map values. +func (em *EndpointMap) Values() []any { + ret := make([]any, 0, len(em.endpoints)) + for _, val := range em.endpoints { + ret = append(ret, val) + } + return ret +} + +// find returns a pointer to the endpoint node in em if the endpoint node is +// already present. If not found, nil is returned. The comparisons are done on +// the unordered set of addresses within an endpoint. +func (em EndpointMap) find(e endpointNode) *endpointNode { + for endpoint := range em.endpoints { + if e.Equal(endpoint) { + return endpoint + } + } + return nil +} + +// Delete removes the specified endpoint from the map. +func (em *EndpointMap) Delete(e Endpoint) { + en := toEndpointNode(e) + if entry := em.find(en); entry != nil { + delete(em.endpoints, entry) + } +} diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index 11384e228..d72f21c1b 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -168,6 +168,9 @@ type BuildOptions struct { // field. In most cases though, it is not appropriate, and this field may // be ignored. Dialer func(context.Context, string) (net.Conn, error) + // Authority is the effective authority of the clientconn for which the + // resolver is built. + Authority string } // An Endpoint is one network endpoint, or server, which may have multiple @@ -240,11 +243,6 @@ type ClientConn interface { // // Deprecated: Use UpdateState instead. NewAddress(addresses []Address) - // NewServiceConfig is called by resolver to notify ClientConn a new - // service config. The service config should be provided as a json string. - // - // Deprecated: Use UpdateState instead. - NewServiceConfig(serviceConfig string) // ParseServiceConfig parses the provided service config and returns an // object that provides the parsed config. ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult @@ -286,6 +284,11 @@ func (t Target) Endpoint() string { return strings.TrimPrefix(endpoint, "/") } +// String returns a string representation of Target. +func (t Target) String() string { + return t.URL.String() +} + // Builder creates a resolver that will be used to watch name resolution updates. type Builder interface { // Build creates a new resolver for the given target. @@ -314,3 +317,13 @@ type Resolver interface { // Close closes the resolver. Close() } + +// AuthorityOverrider is implemented by Builders that wish to override the +// default authority for the ClientConn. +// By default, the authority used is target.Endpoint(). +type AuthorityOverrider interface { + // OverrideAuthority returns the authority to use for a ClientConn with the + // given target. The implementation must generate it without blocking, + // typically in line, and must keep it unchanged. + OverrideAuthority(Target) string +} diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go deleted file mode 100644 index d68330560..000000000 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ /dev/null @@ -1,247 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" - "strings" - "sync" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/internal/pretty" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/serviceconfig" -) - -// resolverStateUpdater wraps the single method used by ccResolverWrapper to -// report a state update from the actual resolver implementation. -type resolverStateUpdater interface { - updateResolverState(s resolver.State, err error) error -} - -// ccResolverWrapper is a wrapper on top of cc for resolvers. -// It implements resolver.ClientConn interface. -type ccResolverWrapper struct { - // The following fields are initialized when the wrapper is created and are - // read-only afterwards, and therefore can be accessed without a mutex. - cc resolverStateUpdater - channelzID *channelz.Identifier - ignoreServiceConfig bool - opts ccResolverWrapperOpts - serializer *grpcsync.CallbackSerializer // To serialize all incoming calls. - serializerCancel context.CancelFunc // To close the serializer, accessed only from close(). - - // All incoming (resolver --> gRPC) calls are guaranteed to execute in a - // mutually exclusive manner as they are scheduled on the serializer. - // Fields accessed *only* in these serializer callbacks, can therefore be - // accessed without a mutex. - curState resolver.State - - // mu guards access to the below fields. - mu sync.Mutex - closed bool - resolver resolver.Resolver // Accessed only from outgoing calls. -} - -// ccResolverWrapperOpts wraps the arguments to be passed when creating a new -// ccResolverWrapper. -type ccResolverWrapperOpts struct { - target resolver.Target // User specified dial target to resolve. - builder resolver.Builder // Resolver builder to use. - bOpts resolver.BuildOptions // Resolver build options to use. - channelzID *channelz.Identifier // Channelz identifier for the channel. -} - -// newCCResolverWrapper uses the resolver.Builder to build a Resolver and -// returns a ccResolverWrapper object which wraps the newly built resolver. -func newCCResolverWrapper(cc resolverStateUpdater, opts ccResolverWrapperOpts) (*ccResolverWrapper, error) { - ctx, cancel := context.WithCancel(context.Background()) - ccr := &ccResolverWrapper{ - cc: cc, - channelzID: opts.channelzID, - ignoreServiceConfig: opts.bOpts.DisableServiceConfig, - opts: opts, - serializer: grpcsync.NewCallbackSerializer(ctx), - serializerCancel: cancel, - } - - // Cannot hold the lock at build time because the resolver can send an - // update or error inline and these incoming calls grab the lock to schedule - // a callback in the serializer. - r, err := opts.builder.Build(opts.target, ccr, opts.bOpts) - if err != nil { - cancel() - return nil, err - } - - // Any error reported by the resolver at build time that leads to a - // re-resolution request from the balancer is dropped by grpc until we - // return from this function. So, we don't have to handle pending resolveNow - // requests here. - ccr.mu.Lock() - ccr.resolver = r - ccr.mu.Unlock() - - return ccr, nil -} - -func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { - ccr.mu.Lock() - defer ccr.mu.Unlock() - - // ccr.resolver field is set only after the call to Build() returns. But in - // the process of building, the resolver may send an error update which when - // propagated to the balancer may result in a re-resolution request. - if ccr.closed || ccr.resolver == nil { - return - } - ccr.resolver.ResolveNow(o) -} - -func (ccr *ccResolverWrapper) close() { - ccr.mu.Lock() - if ccr.closed { - ccr.mu.Unlock() - return - } - - channelz.Info(logger, ccr.channelzID, "Closing the name resolver") - - // Close the serializer to ensure that no more calls from the resolver are - // handled, before actually closing the resolver. - ccr.serializerCancel() - ccr.closed = true - r := ccr.resolver - ccr.mu.Unlock() - - // Give enqueued callbacks a chance to finish. - <-ccr.serializer.Done() - - // Spawn a goroutine to close the resolver (since it may block trying to - // cleanup all allocated resources) and return early. - go r.Close() -} - -// serializerScheduleLocked is a convenience method to schedule a function to be -// run on the serializer while holding ccr.mu. -func (ccr *ccResolverWrapper) serializerScheduleLocked(f func(context.Context)) { - ccr.mu.Lock() - ccr.serializer.Schedule(f) - ccr.mu.Unlock() -} - -// UpdateState is called by resolver implementations to report new state to gRPC -// which includes addresses and service config. -func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { - errCh := make(chan error, 1) - if s.Endpoints == nil { - s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses)) - for _, a := range s.Addresses { - ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes} - ep.Addresses[0].BalancerAttributes = nil - s.Endpoints = append(s.Endpoints, ep) - } - } - ok := ccr.serializer.Schedule(func(context.Context) { - ccr.addChannelzTraceEvent(s) - ccr.curState = s - if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { - errCh <- balancer.ErrBadResolverState - return - } - errCh <- nil - }) - if !ok { - // The only time when Schedule() fail to add the callback to the - // serializer is when the serializer is closed, and this happens only - // when the resolver wrapper is closed. - return nil - } - return <-errCh -} - -// ReportError is called by resolver implementations to report errors -// encountered during name resolution to gRPC. -func (ccr *ccResolverWrapper) ReportError(err error) { - ccr.serializerScheduleLocked(func(_ context.Context) { - channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) - ccr.cc.updateResolverState(resolver.State{}, err) - }) -} - -// NewAddress is called by the resolver implementation to send addresses to -// gRPC. -func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { - ccr.serializerScheduleLocked(func(_ context.Context) { - ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) - ccr.curState.Addresses = addrs - ccr.cc.updateResolverState(ccr.curState, nil) - }) -} - -// NewServiceConfig is called by the resolver implementation to send service -// configs to gRPC. -func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { - ccr.serializerScheduleLocked(func(_ context.Context) { - channelz.Infof(logger, ccr.channelzID, "ccResolverWrapper: got new service config: %s", sc) - if ccr.ignoreServiceConfig { - channelz.Info(logger, ccr.channelzID, "Service config lookups disabled; ignoring config") - return - } - scpr := parseServiceConfig(sc) - if scpr.Err != nil { - channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) - return - } - ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) - ccr.curState.ServiceConfig = scpr - ccr.cc.updateResolverState(ccr.curState, nil) - }) -} - -// ParseServiceConfig is called by resolver implementations to parse a JSON -// representation of the service config. -func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { - return parseServiceConfig(scJSON) -} - -// addChannelzTraceEvent adds a channelz trace event containing the new -// state received from resolver implementations. -func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { - var updates []string - var oldSC, newSC *ServiceConfig - var oldOK, newOK bool - if ccr.curState.ServiceConfig != nil { - oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig) - } - if s.ServiceConfig != nil { - newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig) - } - if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) { - updates = append(updates, "service config updated") - } - if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 { - updates = append(updates, "resolver returned an empty address list") - } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { - updates = append(updates, "resolver returned new addresses") - } - channelz.Infof(logger, ccr.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) -} diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go new file mode 100644 index 000000000..f845ac958 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver_wrapper.go @@ -0,0 +1,198 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "strings" + "sync" + + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +// ccResolverWrapper is a wrapper on top of cc for resolvers. +// It implements resolver.ClientConn interface. +type ccResolverWrapper struct { + // The following fields are initialized when the wrapper is created and are + // read-only afterwards, and therefore can be accessed without a mutex. + cc *ClientConn + ignoreServiceConfig bool + serializer *grpcsync.CallbackSerializer + serializerCancel context.CancelFunc + + resolver resolver.Resolver // only accessed within the serializer + + // The following fields are protected by mu. Caller must take cc.mu before + // taking mu. + mu sync.Mutex + curState resolver.State + closed bool +} + +// newCCResolverWrapper initializes the ccResolverWrapper. It can only be used +// after calling start, which builds the resolver. +func newCCResolverWrapper(cc *ClientConn) *ccResolverWrapper { + ctx, cancel := context.WithCancel(cc.ctx) + return &ccResolverWrapper{ + cc: cc, + ignoreServiceConfig: cc.dopts.disableServiceConfig, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, + } +} + +// start builds the name resolver using the resolver.Builder in cc and returns +// any error encountered. It must always be the first operation performed on +// any newly created ccResolverWrapper, except that close may be called instead. +func (ccr *ccResolverWrapper) start() error { + errCh := make(chan error) + ccr.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil { + return + } + opts := resolver.BuildOptions{ + DisableServiceConfig: ccr.cc.dopts.disableServiceConfig, + DialCreds: ccr.cc.dopts.copts.TransportCredentials, + CredsBundle: ccr.cc.dopts.copts.CredsBundle, + Dialer: ccr.cc.dopts.copts.Dialer, + Authority: ccr.cc.authority, + } + var err error + ccr.resolver, err = ccr.cc.resolverBuilder.Build(ccr.cc.parsedTarget, ccr, opts) + errCh <- err + }) + return <-errCh +} + +func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { + ccr.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || ccr.resolver == nil { + return + } + ccr.resolver.ResolveNow(o) + }) +} + +// close initiates async shutdown of the wrapper. To determine the wrapper has +// finished shutting down, the channel should block on ccr.serializer.Done() +// without cc.mu held. +func (ccr *ccResolverWrapper) close() { + channelz.Info(logger, ccr.cc.channelzID, "Closing the name resolver") + ccr.mu.Lock() + ccr.closed = true + ccr.mu.Unlock() + + ccr.serializer.Schedule(func(context.Context) { + if ccr.resolver == nil { + return + } + ccr.resolver.Close() + ccr.resolver = nil + }) + ccr.serializerCancel() +} + +// UpdateState is called by resolver implementations to report new state to gRPC +// which includes addresses and service config. +func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { + ccr.cc.mu.Lock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + ccr.cc.mu.Unlock() + return nil + } + if s.Endpoints == nil { + s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses)) + for _, a := range s.Addresses { + ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes} + ep.Addresses[0].BalancerAttributes = nil + s.Endpoints = append(s.Endpoints, ep) + } + } + ccr.addChannelzTraceEvent(s) + ccr.curState = s + ccr.mu.Unlock() + return ccr.cc.updateResolverStateAndUnlock(s, nil) +} + +// ReportError is called by resolver implementations to report errors +// encountered during name resolution to gRPC. +func (ccr *ccResolverWrapper) ReportError(err error) { + ccr.cc.mu.Lock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + ccr.cc.mu.Unlock() + return + } + ccr.mu.Unlock() + channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) + ccr.cc.updateResolverStateAndUnlock(resolver.State{}, err) +} + +// NewAddress is called by the resolver implementation to send addresses to +// gRPC. +func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { + ccr.cc.mu.Lock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + ccr.cc.mu.Unlock() + return + } + s := resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig} + ccr.addChannelzTraceEvent(s) + ccr.curState = s + ccr.mu.Unlock() + ccr.cc.updateResolverStateAndUnlock(s, nil) +} + +// ParseServiceConfig is called by resolver implementations to parse a JSON +// representation of the service config. +func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { + return parseServiceConfig(scJSON) +} + +// addChannelzTraceEvent adds a channelz trace event containing the new +// state received from resolver implementations. +func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { + var updates []string + var oldSC, newSC *ServiceConfig + var oldOK, newOK bool + if ccr.curState.ServiceConfig != nil { + oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig) + } + if s.ServiceConfig != nil { + newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig) + } + if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) { + updates = append(updates, "service config updated") + } + if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 { + updates = append(updates, "resolver returned an empty address list") + } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { + updates = append(updates, "resolver returned new addresses") + } + channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) +} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index b7723aa09..82493d237 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -189,6 +189,20 @@ type EmptyCallOption struct{} func (EmptyCallOption) before(*callInfo) error { return nil } func (EmptyCallOption) after(*callInfo, *csAttempt) {} +// StaticMethod returns a CallOption which specifies that a call is being made +// to a method that is static, which means the method is known at compile time +// and doesn't change at runtime. This can be used as a signal to stats plugins +// that this method is safe to include as a key to a measurement. +func StaticMethod() CallOption { + return StaticMethodCallOption{} +} + +// StaticMethodCallOption is a CallOption that specifies that a call comes +// from a static method. +type StaticMethodCallOption struct { + EmptyCallOption +} + // Header returns a CallOptions that retrieves the header metadata // for a unary RPC. func Header(md *metadata.MD) CallOption { @@ -640,14 +654,18 @@ func encode(c baseCodec, msg any) ([]byte, error) { return b, nil } -// compress returns the input bytes compressed by compressor or cp. If both -// compressors are nil, returns nil. +// compress returns the input bytes compressed by compressor or cp. +// If both compressors are nil, or if the message has zero length, returns nil, +// indicating no compression was done. // // TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor. func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) { if compressor == nil && cp == nil { return nil, nil } + if len(in) == 0 { + return nil, nil + } wrapErr := func(err error) error { return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) } @@ -726,17 +744,19 @@ type payloadInfo struct { uncompressedBytes []byte } -func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) { - pf, buf, err := p.recvMsg(maxReceiveMessageSize) +// recvAndDecompress reads a message from the stream, decompressing it if necessary. +// +// Cancelling the returned cancel function releases the buffer back to the pool. So the caller should cancel as soon as +// the buffer is no longer needed. +func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, +) (uncompressedBuf []byte, cancel func(), err error) { + pf, compressedBuf, err := p.recvMsg(maxReceiveMessageSize) if err != nil { - return nil, err - } - if payInfo != nil { - payInfo.compressedLength = len(buf) + return nil, nil, err } if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { - return nil, st.Err() + return nil, nil, st.Err() } var size int @@ -744,21 +764,35 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, // use this decompressor as the default. if dc != nil { - buf, err = dc.Do(bytes.NewReader(buf)) - size = len(buf) + uncompressedBuf, err = dc.Do(bytes.NewReader(compressedBuf)) + size = len(uncompressedBuf) } else { - buf, size, err = decompress(compressor, buf, maxReceiveMessageSize) + uncompressedBuf, size, err = decompress(compressor, compressedBuf, maxReceiveMessageSize) } if err != nil { - return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) + return nil, nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) } if size > maxReceiveMessageSize { // TODO: Revisit the error code. Currently keep it consistent with java // implementation. - return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) + return nil, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) } + } else { + uncompressedBuf = compressedBuf } - return buf, nil + + if payInfo != nil { + payInfo.compressedLength = len(compressedBuf) + payInfo.uncompressedBytes = uncompressedBuf + + cancel = func() {} + } else { + cancel = func() { + p.recvBufferPool.Put(&compressedBuf) + } + } + + return uncompressedBuf, cancel, nil } // Using compressor, decompress d, returning data and size. @@ -778,6 +812,9 @@ func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize // size is used as an estimate to size the buffer, but we // will read more data if available. // +MinRead so ReadFrom will not reallocate if size is correct. + // + // TODO: If we ensure that the buffer size is the same as the DecompressedSize, + // we can also utilize the recv buffer pool here. buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) return buf.Bytes(), int(bytesRead), err @@ -793,18 +830,15 @@ func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize // dc takes precedence over compressor. // TODO(dfawley): wrap the old compressor/decompressor using the new API? func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { - buf, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) + buf, cancel, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) if err != nil { return err } + defer cancel() + if err := c.Unmarshal(buf, m); err != nil { return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err) } - if payInfo != nil { - payInfo.uncompressedBytes = buf - } else { - p.recvBufferPool.Put(&buf) - } return nil } @@ -954,6 +988,7 @@ const ( SupportPackageIsVersion5 = true SupportPackageIsVersion6 = true SupportPackageIsVersion7 = true + SupportPackageIsVersion8 = true ) const grpcUA = "grpc-go/" + Version diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index eeae92fbe..a6a11704b 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -33,8 +33,6 @@ import ( "sync/atomic" "time" - "golang.org/x/net/trace" - "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/encoding" @@ -70,9 +68,10 @@ func init() { internal.GetServerCredentials = func(srv *Server) credentials.TransportCredentials { return srv.opts.creds } - internal.DrainServerTransports = func(srv *Server, addr string) { - srv.drainServerTransports(addr) + internal.IsRegisteredMethod = func(srv *Server, method string) bool { + return srv.isRegisteredMethod(method) } + internal.ServerFromContext = serverFromContext internal.AddGlobalServerOptions = func(opt ...ServerOption) { globalServerOptions = append(globalServerOptions, opt...) } @@ -81,6 +80,7 @@ func init() { } internal.BinaryLogger = binaryLogger internal.JoinServerOptions = newJoinServerOption + internal.RecvBufferPool = recvBufferPool } var statusOK = status.New(codes.OK, "") @@ -129,17 +129,19 @@ type Server struct { drain bool cv *sync.Cond // signaled when connections close for GracefulStop services map[string]*serviceInfo // service name -> service info - events trace.EventLog + events traceEventLog quit *grpcsync.Event done *grpcsync.Event channelzRemoveOnce sync.Once - serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop + serveWG sync.WaitGroup // counts active Serve goroutines for Stop/GracefulStop + handlersWG sync.WaitGroup // counts active method handler goroutines channelzID *channelz.Identifier czData *channelzData - serverWorkerChannel chan func() + serverWorkerChannel chan func() + serverWorkerChannelClose func() } type serverOptions struct { @@ -170,6 +172,7 @@ type serverOptions struct { headerTableSize *uint32 numServerWorkers uint32 recvBufferPool SharedBufferPool + waitForHandlers bool } var defaultServerOptions = serverOptions{ @@ -567,6 +570,21 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption { }) } +// WaitForHandlers cause Stop to wait until all outstanding method handlers have +// exited before returning. If false, Stop will return as soon as all +// connections have closed, but method handlers may still be running. By +// default, Stop does not wait for method handlers to return. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WaitForHandlers(w bool) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.waitForHandlers = w + }) +} + // RecvBufferPool returns a ServerOption that configures the server // to use the provided shared buffer pool for parsing incoming messages. Depending // on the application's workload, this could result in reduced memory allocation. @@ -578,11 +596,13 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption { // options are used: StatsHandler, EnableTracing, or binary logging. In such // cases, the shared buffer pool will be ignored. // -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. +// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in +// v1.60.0 or later. func RecvBufferPool(bufferPool SharedBufferPool) ServerOption { + return recvBufferPool(bufferPool) +} + +func recvBufferPool(bufferPool SharedBufferPool) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.recvBufferPool = bufferPool }) @@ -616,15 +636,14 @@ func (s *Server) serverWorker() { // connections to reduce the time spent overall on runtime.morestack. func (s *Server) initServerWorkers() { s.serverWorkerChannel = make(chan func()) + s.serverWorkerChannelClose = grpcsync.OnceFunc(func() { + close(s.serverWorkerChannel) + }) for i := uint32(0); i < s.opts.numServerWorkers; i++ { go s.serverWorker() } } -func (s *Server) stopServerWorkers() { - close(s.serverWorkerChannel) -} - // NewServer creates a gRPC server which has no service registered and has not // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { @@ -649,7 +668,7 @@ func NewServer(opt ...ServerOption) *Server { s.cv = sync.NewCond(&s.mu) if EnableTracing { _, file, line, _ := runtime.Caller(1) - s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) + s.events = newTraceEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) } if s.opts.numServerWorkers > 0 { @@ -806,6 +825,18 @@ func (l *listenSocket) Close() error { // Serve returns when lis.Accept fails with fatal errors. lis will be closed when // this method returns. // Serve will return a non-nil error unless Stop or GracefulStop is called. +// +// Note: All supported releases of Go (as of December 2023) override the OS +// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive +// with OS defaults for keepalive time and interval, callers need to do the +// following two things: +// - pass a net.Listener created by calling the Listen method on a +// net.ListenConfig with the `KeepAlive` field set to a negative value. This +// will result in the Go standard library not overriding OS defaults for TCP +// keepalive interval and time. But this will also result in the Go standard +// library not enabling TCP keepalives by default. +// - override the Accept method on the passed in net.Listener and set the +// SO_KEEPALIVE socket option to enable TCP keepalives, with OS defaults. func (s *Server) Serve(lis net.Listener) error { s.mu.Lock() s.printf("serving") @@ -913,24 +944,21 @@ func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) { return } + if cc, ok := rawConn.(interface { + PassServerTransport(transport.ServerTransport) + }); ok { + cc.PassServerTransport(st) + } + if !s.addConn(lisAddr, st) { return } go func() { - s.serveStreams(st) + s.serveStreams(context.Background(), st, rawConn) s.removeConn(lisAddr, st) }() } -func (s *Server) drainServerTransports(addr string) { - s.mu.Lock() - conns := s.conns[addr] - for st := range conns { - st.Drain("") - } - s.mu.Unlock() -} - // newHTTP2Transport sets up a http/2 transport (using the // gRPC http2 server transport in transport/http2_server.go). func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { @@ -971,19 +999,32 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { return st } -func (s *Server) serveStreams(st transport.ServerTransport) { - defer st.Close(errors.New("finished serving streams for the server transport")) - var wg sync.WaitGroup +func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, rawConn net.Conn) { + ctx = transport.SetConnection(ctx, rawConn) + ctx = peer.NewContext(ctx, st.Peer()) + for _, sh := range s.opts.statsHandlers { + ctx = sh.TagConn(ctx, &stats.ConnTagInfo{ + RemoteAddr: st.Peer().Addr, + LocalAddr: st.Peer().LocalAddr, + }) + sh.HandleConn(ctx, &stats.ConnBegin{}) + } - streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) - st.HandleStreams(func(stream *transport.Stream) { - wg.Add(1) + defer func() { + st.Close(errors.New("finished serving streams for the server transport")) + for _, sh := range s.opts.statsHandlers { + sh.HandleConn(ctx, &stats.ConnEnd{}) + } + }() + streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) + st.HandleStreams(ctx, func(stream *transport.Stream) { + s.handlersWG.Add(1) streamQuota.acquire() f := func() { defer streamQuota.release() - defer wg.Done() - s.handleStream(st, stream, s.traceInfo(st, stream)) + defer s.handlersWG.Done() + s.handleStream(st, stream) } if s.opts.numServerWorkers > 0 { @@ -995,14 +1036,7 @@ func (s *Server) serveStreams(st transport.ServerTransport) { } } go f() - }, func(ctx context.Context, method string) context.Context { - if !EnableTracing { - return ctx - } - tr := trace.New("grpc.Recv."+methodFamily(method), method) - return trace.NewContext(ctx, tr) }) - wg.Wait() } var _ http.Handler = (*Server)(nil) @@ -1046,31 +1080,7 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } defer s.removeConn(listenerAddressForServeHTTP, st) - s.serveStreams(st) -} - -// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. -// If tracing is not enabled, it returns nil. -func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { - if !EnableTracing { - return nil - } - tr, ok := trace.FromContext(stream.Context()) - if !ok { - return nil - } - - trInfo = &traceInfo{ - tr: tr, - firstLine: firstLine{ - client: false, - remoteAddr: st.RemoteAddr(), - }, - } - if dl, ok := stream.Context().Deadline(); ok { - trInfo.firstLine.deadline = time.Until(dl) - } - return trInfo + s.serveStreams(r.Context(), st, nil) } func (s *Server) addConn(addr string, st transport.ServerTransport) bool { @@ -1133,7 +1143,7 @@ func (s *Server) incrCallsFailed() { atomic.AddInt64(&s.czData.callsFailed, 1) } -func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { +func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { data, err := encode(s.getCodec(stream.ContentSubtype()), msg) if err != nil { channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err) @@ -1152,7 +1162,7 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str err = t.Write(stream, hdr, payload, opts) if err == nil { for _, sh := range s.opts.statsHandlers { - sh.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) + sh.HandleRPC(ctx, outPayload(false, msg, data, payload, time.Now())) } } return err @@ -1194,7 +1204,7 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info } } -func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { +func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { shs := s.opts.statsHandlers if len(shs) != 0 || trInfo != nil || channelz.IsOn() { if channelz.IsOn() { @@ -1208,7 +1218,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. IsClientStream: false, IsServerStream: false, } - sh.HandleRPC(stream.Context(), statsBegin) + sh.HandleRPC(ctx, statsBegin) } if trInfo != nil { trInfo.tr.LazyLog(&trInfo.firstLine, false) @@ -1240,7 +1250,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if err != nil && err != io.EOF { end.Error = toRPCErr(err) } - sh.HandleRPC(stream.Context(), end) + sh.HandleRPC(ctx, end) } if channelz.IsOn() { @@ -1262,7 +1272,6 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } } if len(binlogs) != 0 { - ctx := stream.Context() md, _ := metadata.FromIncomingContext(ctx) logEntry := &binarylog.ClientHeader{ Header: md, @@ -1333,7 +1342,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if len(shs) != 0 || len(binlogs) != 0 { payInfo = &payloadInfo{} } - d, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) + + d, cancel, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) if err != nil { if e := t.WriteStatus(stream, status.Convert(err)); e != nil { channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) @@ -1344,11 +1354,13 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. t.IncrMsgRecv() } df := func(v any) error { + defer cancel() + if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } for _, sh := range shs { - sh.HandleRPC(stream.Context(), &stats.InPayload{ + sh.HandleRPC(ctx, &stats.InPayload{ RecvTime: time.Now(), Payload: v, Length: len(d), @@ -1362,7 +1374,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Message: d, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), cm) + binlog.Log(ctx, cm) } } if trInfo != nil { @@ -1370,7 +1382,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } return nil } - ctx := NewContextWithServerTransportStream(stream.Context(), stream) + ctx = NewContextWithServerTransportStream(ctx, stream) reply, appErr := md.Handler(info.serviceImpl, ctx, df, s.opts.unaryInt) if appErr != nil { appStatus, ok := status.FromError(appErr) @@ -1395,7 +1407,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Header: h, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), sh) + binlog.Log(ctx, sh) } } st := &binarylog.ServerTrailer{ @@ -1403,7 +1415,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } return appErr @@ -1418,7 +1430,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if stream.SendCompress() != sendCompressorName { comp = encoding.GetCompressor(stream.SendCompress()) } - if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil { + if err := s.sendResponse(ctx, t, stream, reply, cp, opts, comp); err != nil { if err == io.EOF { // The entire stream is done (for unary RPC only). return err @@ -1445,8 +1457,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), sh) - binlog.Log(stream.Context(), st) + binlog.Log(ctx, sh) + binlog.Log(ctx, st) } } return err @@ -1460,8 +1472,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Message: reply, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), sh) - binlog.Log(stream.Context(), sm) + binlog.Log(ctx, sh) + binlog.Log(ctx, sm) } } if channelz.IsOn() { @@ -1479,7 +1491,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } return t.WriteStatus(stream, statusOK) @@ -1521,7 +1533,7 @@ func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, inf } } -func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { +func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { if channelz.IsOn() { s.incrCallsStarted() } @@ -1535,10 +1547,10 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp IsServerStream: sd.ServerStreams, } for _, sh := range shs { - sh.HandleRPC(stream.Context(), statsBegin) + sh.HandleRPC(ctx, statsBegin) } } - ctx := NewContextWithServerTransportStream(stream.Context(), stream) + ctx = NewContextWithServerTransportStream(ctx, stream) ss := &serverStream{ ctx: ctx, t: t, @@ -1574,7 +1586,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp end.Error = toRPCErr(err) } for _, sh := range shs { - sh.HandleRPC(stream.Context(), end) + sh.HandleRPC(ctx, end) } } @@ -1616,7 +1628,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp logEntry.PeerAddr = peer.Addr } for _, binlog := range ss.binlogs { - binlog.Log(stream.Context(), logEntry) + binlog.Log(ctx, logEntry) } } @@ -1694,7 +1706,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp Err: appErr, } for _, binlog := range ss.binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } t.WriteStatus(ss.s, appStatus) @@ -1712,53 +1724,87 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp Err: appErr, } for _, binlog := range ss.binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } return t.WriteStatus(ss.s, statusOK) } -func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) { + ctx := stream.Context() + ctx = contextWithServer(ctx, s) + var ti *traceInfo + if EnableTracing { + tr := newTrace("grpc.Recv."+methodFamily(stream.Method()), stream.Method()) + ctx = newTraceContext(ctx, tr) + ti = &traceInfo{ + tr: tr, + firstLine: firstLine{ + client: false, + remoteAddr: t.Peer().Addr, + }, + } + if dl, ok := ctx.Deadline(); ok { + ti.firstLine.deadline = time.Until(dl) + } + } + sm := stream.Method() if sm != "" && sm[0] == '/' { sm = sm[1:] } pos := strings.LastIndex(sm, "/") if pos == -1 { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{sm}}, true) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{sm}}, true) + ti.tr.SetError() } errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ti.tr.SetError() } channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) } - if trInfo != nil { - trInfo.tr.Finish() + if ti != nil { + ti.tr.Finish() } return } service := sm[:pos] method := sm[pos+1:] + md, _ := metadata.FromIncomingContext(ctx) + for _, sh := range s.opts.statsHandlers { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) + sh.HandleRPC(ctx, &stats.InHeader{ + FullMethod: stream.Method(), + RemoteAddr: t.Peer().Addr, + LocalAddr: t.Peer().LocalAddr, + Compression: stream.RecvCompress(), + WireLength: stream.HeaderWireLength(), + Header: md, + }) + } + // To have calls in stream callouts work. Will delete once all stats handler + // calls come from the gRPC layer. + stream.SetContext(ctx) + srv, knownService := s.services[service] if knownService { if md, ok := srv.methods[method]; ok { - s.processUnaryRPC(t, stream, srv, md, trInfo) + s.processUnaryRPC(ctx, t, stream, srv, md, ti) return } if sd, ok := srv.streams[method]; ok { - s.processStreamingRPC(t, stream, srv, sd, trInfo) + s.processStreamingRPC(ctx, t, stream, srv, sd, ti) return } } // Unknown service, or known server unknown method. if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { - s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) + s.processStreamingRPC(ctx, t, stream, nil, unknownDesc, ti) return } var errDesc string @@ -1767,19 +1813,19 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str } else { errDesc = fmt.Sprintf("unknown method %v for service %v", method, service) } - if trInfo != nil { - trInfo.tr.LazyPrintf("%s", errDesc) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyPrintf("%s", errDesc) + ti.tr.SetError() } if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ti.tr.SetError() } channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) } - if trInfo != nil { - trInfo.tr.Finish() + if ti != nil { + ti.tr.Finish() } } @@ -1834,62 +1880,72 @@ func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream // pending RPCs on the client side will get notified by connection // errors. func (s *Server) Stop() { - s.quit.Fire() + s.stop(false) +} - defer func() { - s.serveWG.Wait() - s.done.Fire() - }() +// GracefulStop stops the gRPC server gracefully. It stops the server from +// accepting new connections and RPCs and blocks until all the pending RPCs are +// finished. +func (s *Server) GracefulStop() { + s.stop(true) +} + +func (s *Server) stop(graceful bool) { + s.quit.Fire() + defer s.done.Fire() s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) s.mu.Lock() - listeners := s.lis - s.lis = nil - conns := s.conns - s.conns = nil - // interrupt GracefulStop if Stop and GracefulStop are called concurrently. - s.cv.Broadcast() + s.closeListenersLocked() + // Wait for serving threads to be ready to exit. Only then can we be sure no + // new conns will be created. s.mu.Unlock() + s.serveWG.Wait() - for lis := range listeners { - lis.Close() + s.mu.Lock() + defer s.mu.Unlock() + + if graceful { + s.drainAllServerTransportsLocked() + } else { + s.closeServerTransportsLocked() } - for _, cs := range conns { - for st := range cs { - st.Close(errors.New("Server.Stop called")) - } + + for len(s.conns) != 0 { + s.cv.Wait() } + s.conns = nil + if s.opts.numServerWorkers > 0 { - s.stopServerWorkers() + // Closing the channel (only once, via grpcsync.OnceFunc) after all the + // connections have been closed above ensures that there are no + // goroutines executing the callback passed to st.HandleStreams (where + // the channel is written to). + s.serverWorkerChannelClose() + } + + if graceful || s.opts.waitForHandlers { + s.handlersWG.Wait() } - s.mu.Lock() if s.events != nil { s.events.Finish() s.events = nil } - s.mu.Unlock() } -// GracefulStop stops the gRPC server gracefully. It stops the server from -// accepting new connections and RPCs and blocks until all the pending RPCs are -// finished. -func (s *Server) GracefulStop() { - s.quit.Fire() - defer s.done.Fire() - - s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) - s.mu.Lock() - if s.conns == nil { - s.mu.Unlock() - return +// s.mu must be held by the caller. +func (s *Server) closeServerTransportsLocked() { + for _, conns := range s.conns { + for st := range conns { + st.Close(errors.New("Server.Stop called")) + } } +} - for lis := range s.lis { - lis.Close() - } - s.lis = nil +// s.mu must be held by the caller. +func (s *Server) drainAllServerTransportsLocked() { if !s.drain { for _, conns := range s.conns { for st := range conns { @@ -1898,22 +1954,14 @@ func (s *Server) GracefulStop() { } s.drain = true } +} - // Wait for serving threads to be ready to exit. Only then can we be sure no - // new conns will be created. - s.mu.Unlock() - s.serveWG.Wait() - s.mu.Lock() - - for len(s.conns) != 0 { - s.cv.Wait() - } - s.conns = nil - if s.events != nil { - s.events.Finish() - s.events = nil +// s.mu must be held by the caller. +func (s *Server) closeListenersLocked() { + for lis := range s.lis { + lis.Close() } - s.mu.Unlock() + s.lis = nil } // contentSubtype must be lowercase @@ -1927,11 +1975,50 @@ func (s *Server) getCodec(contentSubtype string) baseCodec { } codec := encoding.GetCodec(contentSubtype) if codec == nil { + logger.Warningf("Unsupported codec %q. Defaulting to %q for now. This will start to fail in future releases.", contentSubtype, proto.Name) return encoding.GetCodec(proto.Name) } return codec } +type serverKey struct{} + +// serverFromContext gets the Server from the context. +func serverFromContext(ctx context.Context) *Server { + s, _ := ctx.Value(serverKey{}).(*Server) + return s +} + +// contextWithServer sets the Server in the context. +func contextWithServer(ctx context.Context, server *Server) context.Context { + return context.WithValue(ctx, serverKey{}, server) +} + +// isRegisteredMethod returns whether the passed in method is registered as a +// method on the server. /service/method and service/method will match if the +// service and method are registered on the server. +func (s *Server) isRegisteredMethod(serviceMethod string) bool { + if serviceMethod != "" && serviceMethod[0] == '/' { + serviceMethod = serviceMethod[1:] + } + pos := strings.LastIndex(serviceMethod, "/") + if pos == -1 { // Invalid method name syntax. + return false + } + service := serviceMethod[:pos] + method := serviceMethod[pos+1:] + srv, knownService := s.services[service] + if knownService { + if _, ok := srv.methods[method]; ok { + return true + } + if _, ok := srv.streams[method]; ok { + return true + } + } + return false +} + // SetHeader sets the header metadata to be sent from the server to the client. // The context provided must be the context passed to the server's handler. // @@ -2033,7 +2120,7 @@ func ClientSupportedCompressors(ctx context.Context) ([]string, error) { return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx) } - return strings.Split(stream.ClientAdvertisedCompressors(), ","), nil + return stream.ClientAdvertisedCompressors(), nil } // SetTrailer sets the trailer metadata that will be sent when an RPC returns. @@ -2073,7 +2160,7 @@ func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric { // validateSendCompressor returns an error when given compressor name cannot be // handled by the server or the client based on the advertised compressors. -func validateSendCompressor(name, clientCompressors string) error { +func validateSendCompressor(name string, clientCompressors []string) error { if name == encoding.Identity { return nil } @@ -2082,7 +2169,7 @@ func validateSendCompressor(name, clientCompressors string) error { return fmt.Errorf("compressor not registered %q", name) } - for _, c := range strings.Split(clientCompressors, ",") { + for _, c := range clientCompressors { if c == name { return nil // found match } diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index b14b2fbea..814e99835 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -27,7 +27,6 @@ import ( "sync" "time" - "golang.org/x/net/trace" "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/encoding" @@ -48,6 +47,8 @@ import ( "google.golang.org/grpc/status" ) +var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool)) + // StreamHandler defines the handler called by gRPC server to complete the // execution of a streaming RPC. // @@ -184,7 +185,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth // when the RPC completes. opts = append([]CallOption{OnFinish(func(error) { cc.idlenessMgr.OnCallEnd() })}, opts...) - if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { + if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok { // validate md if err := imetadata.Validate(md); err != nil { return nil, status.Error(codes.Internal, err.Error()) @@ -429,7 +430,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) var trInfo *traceInfo if EnableTracing { trInfo = &traceInfo{ - tr: trace.New("grpc.Sent."+methodFamily(method), method), + tr: newTrace("grpc.Sent."+methodFamily(method), method), firstLine: firstLine{ client: true, }, @@ -438,7 +439,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) trInfo.firstLine.deadline = time.Until(deadline) } trInfo.tr.LazyLog(&trInfo.firstLine, false) - ctx = trace.NewContext(ctx, trInfo.tr) + ctx = newTraceContext(ctx, trInfo.tr) } if cs.cc.parsedTarget.URL.Scheme == internal.GRPCResolverSchemeExtraMetadata { diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go index bfa5dfa40..07f012576 100644 --- a/vendor/google.golang.org/grpc/tap/tap.go +++ b/vendor/google.golang.org/grpc/tap/tap.go @@ -27,6 +27,8 @@ package tap import ( "context" + + "google.golang.org/grpc/metadata" ) // Info defines the relevant information needed by the handles. @@ -34,6 +36,10 @@ type Info struct { // FullMethodName is the string of grpc method (in the format of // /package.service/method). FullMethodName string + + // Header contains the header metadata received. + Header metadata.MD + // TODO: More to be added. } diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go index 9ded79321..10f4f798f 100644 --- a/vendor/google.golang.org/grpc/trace.go +++ b/vendor/google.golang.org/grpc/trace.go @@ -26,8 +26,6 @@ import ( "strings" "sync" "time" - - "golang.org/x/net/trace" ) // EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package. @@ -44,9 +42,31 @@ func methodFamily(m string) string { return m } +// traceEventLog mirrors golang.org/x/net/trace.EventLog. +// +// It exists in order to avoid importing x/net/trace on grpcnotrace builds. +type traceEventLog interface { + Printf(format string, a ...any) + Errorf(format string, a ...any) + Finish() +} + +// traceLog mirrors golang.org/x/net/trace.Trace. +// +// It exists in order to avoid importing x/net/trace on grpcnotrace builds. +type traceLog interface { + LazyLog(x fmt.Stringer, sensitive bool) + LazyPrintf(format string, a ...any) + SetError() + SetRecycler(f func(any)) + SetTraceInfo(traceID, spanID uint64) + SetMaxEvents(m int) + Finish() +} + // traceInfo contains tracing information for an RPC. type traceInfo struct { - tr trace.Trace + tr traceLog firstLine firstLine } diff --git a/vendor/google.golang.org/grpc/trace_notrace.go b/vendor/google.golang.org/grpc/trace_notrace.go new file mode 100644 index 000000000..1da3a2308 --- /dev/null +++ b/vendor/google.golang.org/grpc/trace_notrace.go @@ -0,0 +1,52 @@ +//go:build grpcnotrace + +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +// grpcnotrace can be used to avoid importing golang.org/x/net/trace, which in +// turn enables binaries using gRPC-Go for dead code elimination, which can +// yield 10-15% improvements in binary size when tracing is not needed. + +import ( + "context" + "fmt" +) + +type notrace struct{} + +func (notrace) LazyLog(x fmt.Stringer, sensitive bool) {} +func (notrace) LazyPrintf(format string, a ...any) {} +func (notrace) SetError() {} +func (notrace) SetRecycler(f func(any)) {} +func (notrace) SetTraceInfo(traceID, spanID uint64) {} +func (notrace) SetMaxEvents(m int) {} +func (notrace) Finish() {} + +func newTrace(family, title string) traceLog { + return notrace{} +} + +func newTraceContext(ctx context.Context, tr traceLog) context.Context { + return ctx +} + +func newTraceEventLog(family, title string) traceEventLog { + return nil +} diff --git a/vendor/google.golang.org/grpc/trace_withtrace.go b/vendor/google.golang.org/grpc/trace_withtrace.go new file mode 100644 index 000000000..88d6e8571 --- /dev/null +++ b/vendor/google.golang.org/grpc/trace_withtrace.go @@ -0,0 +1,39 @@ +//go:build !grpcnotrace + +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + + t "golang.org/x/net/trace" +) + +func newTrace(family, title string) traceLog { + return t.New(family, title) +} + +func newTraceContext(ctx context.Context, tr traceLog) context.Context { + return t.NewContext(ctx, tr) +} + +func newTraceEventLog(family, title string) traceEventLog { + return t.NewEventLog(family, title) +} diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 724ad2102..46ad8113f 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.58.3" +const Version = "1.62.1" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index bbc9e2e3c..7a33c215b 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -35,14 +35,13 @@ if [[ "$1" = "-install" ]]; then # Install the pinned versions as defined in module tools. pushd ./test/tools go install \ - golang.org/x/lint/golint \ golang.org/x/tools/cmd/goimports \ honnef.co/go/tools/cmd/staticcheck \ github.com/client9/misspell/cmd/misspell popd if [[ -z "${VET_SKIP_PROTO}" ]]; then if [[ "${GITHUB_ACTIONS}" = "true" ]]; then - PROTOBUF_VERSION=22.0 # a.k.a v4.22.0 in pb.go files. + PROTOBUF_VERSION=25.2 # a.k.a. v4.22.0 in pb.go files. PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip pushd /home/runner/go wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} @@ -77,15 +76,19 @@ fi not grep 'func Test[^(]' *_test.go not grep 'func Test[^(]' test/*.go +# - Check for typos in test function names +git grep 'func (s) ' -- "*_test.go" | not grep -v 'func (s) Test' +git grep 'func [A-Z]' -- "*_test.go" | not grep -v 'func Test\|Benchmark\|Example' + # - Do not import x/net/context. not git grep -l 'x/net/context' -- "*.go" # - Do not import math/rand for real library code. Use internal/grpcrand for # thread safety. -git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test' +git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^interop/stress\|grpcrand\|^benchmark\|wrr_test' # - Do not use "interface{}"; use "any" instead. -git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc' +git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc\|grpc_testing_not_regenerate' # - Do not call grpclog directly. Use grpclog.Component instead. git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' @@ -93,13 +96,15 @@ git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpc # - Ensure all ptypes proto packages are renamed when importing. not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" +# - Ensure all usages of grpc_testing package are renamed when importing. +not git grep "\(import \|^\s*\)\"google.golang.org/grpc/interop/grpc_testing" -- "*.go" + # - Ensure all xds proto imports are renamed to *pb or *grpc. git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "' misspell -error . -# - gofmt, goimports, golint (with exceptions for generated code), go vet, -# go mod tidy. +# - gofmt, goimports, go vet, go mod tidy. # Perform these checks on each module inside gRPC. for MOD_FILE in $(find . -name 'go.mod'); do MOD_DIR=$(dirname ${MOD_FILE}) @@ -107,7 +112,6 @@ for MOD_FILE in $(find . -name 'go.mod'); do go vet -all ./... | fail_on_output gofmt -s -d -l . 2>&1 | fail_on_output goimports -l . 2>&1 | not grep -vE "\.pb\.go" - golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:" go mod tidy -compat=1.19 git status --porcelain 2>&1 | fail_on_output || \ @@ -116,94 +120,71 @@ for MOD_FILE in $(find . -name 'go.mod'); do done # - Collection of static analysis checks -# -# TODO(dfawley): don't use deprecated functions in examples or first-party -# plugins. -# TODO(dfawley): enable ST1019 (duplicate imports) but allow for protobufs. SC_OUT="$(mktemp)" -staticcheck -go 1.19 -checks 'inherit,-ST1015,-ST1019,-SA1019' ./... > "${SC_OUT}" || true -# Error if anything other than deprecation warnings are printed. -not grep -v "is deprecated:.*SA1019" "${SC_OUT}" -# Only ignore the following deprecated types/fields/functions. -not grep -Fv '.CredsBundle -.HeaderMap -.Metadata is deprecated: use Attributes -.NewAddress -.NewServiceConfig -.Type is deprecated: use Attributes -BuildVersion is deprecated -balancer.ErrTransientFailure -balancer.Picker -extDesc.Filename is deprecated -github.com/golang/protobuf/jsonpb is deprecated -grpc.CallCustomCodec -grpc.Code -grpc.Compressor -grpc.CustomCodec -grpc.Decompressor -grpc.MaxMsgSize -grpc.MethodConfig -grpc.NewGZIPCompressor -grpc.NewGZIPDecompressor -grpc.RPCCompressor -grpc.RPCDecompressor -grpc.ServiceConfig -grpc.WithCompressor -grpc.WithDecompressor -grpc.WithDialer -grpc.WithMaxMsgSize -grpc.WithServiceConfig -grpc.WithTimeout -http.CloseNotifier -info.SecurityVersion -proto is deprecated -proto.InternalMessageInfo is deprecated -proto.EnumName is deprecated -proto.ErrInternalBadWireType is deprecated -proto.FileDescriptor is deprecated -proto.Marshaler is deprecated -proto.MessageType is deprecated -proto.RegisterEnum is deprecated -proto.RegisterFile is deprecated -proto.RegisterType is deprecated -proto.RegisterExtension is deprecated -proto.RegisteredExtension is deprecated -proto.RegisteredExtensions is deprecated -proto.RegisterMapType is deprecated -proto.Unmarshaler is deprecated +staticcheck -go 1.19 -checks 'all' ./... > "${SC_OUT}" || true + +# Error for anything other than checks that need exclusions. +grep -v "(ST1000)" "${SC_OUT}" | grep -v "(SA1019)" | grep -v "(ST1003)" | not grep -v "(ST1019)\|\(other import of\)" + +# Exclude underscore checks for generated code. +grep "(ST1003)" "${SC_OUT}" | not grep -v '\(.pb.go:\)\|\(code_string_test.go:\)\|\(grpc_testing_not_regenerate\)' + +# Error for duplicate imports not including grpc protos. +grep "(ST1019)\|\(other import of\)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused +channelz/grpc_channelz_v1" +go-control-plane/envoy +grpclb/grpc_lb_v1" +health/grpc_health_v1" +interop/grpc_testing" +orca/v3" +proto/grpc_gcp" +proto/grpc_lookup_v1" +reflection/grpc_reflection_v1" +reflection/grpc_reflection_v1alpha" +XXXXX PleaseIgnoreUnused' + +# Error for any package comments not in generated code. +grep "(ST1000)" "${SC_OUT}" | not grep -v "\.pb\.go:" + +# Only ignore the following deprecated types/fields/functions and exclude +# generated code. +grep "(SA1019)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused +XXXXX Protobuf related deprecation errors: +"github.com/golang/protobuf +.pb.go: +grpc_testing_not_regenerate +: ptypes. +proto.RegisterType +XXXXX gRPC internal usage deprecation errors: +"google.golang.org/grpc +: grpc. +: v1alpha. +: v1alphareflectionpb. +BalancerAttributes is deprecated: +CredsBundle is deprecated: +Metadata is deprecated: use Attributes instead. +NewSubConn is deprecated: +OverrideServerName is deprecated: +RemoveSubConn is deprecated: +SecurityVersion is deprecated: Target is deprecated: Use the Target field in the BuildOptions instead. -xxx_messageInfo_ -' "${SC_OUT}" - -# - special golint on package comments. -lint_package_comment_per_package() { - # Number of files in this go package. - fileCount=$(go list -f '{{len .GoFiles}}' $1) - if [ ${fileCount} -eq 0 ]; then - return 0 - fi - # Number of package errors generated by golint. - lintPackageCommentErrorsCount=$(golint --min_confidence 0 $1 | grep -c "should have a package comment") - # golint complains about every file that's missing the package comment. If the - # number of files for this package is greater than the number of errors, there's - # at least one file with package comment, good. Otherwise, fail. - if [ ${fileCount} -le ${lintPackageCommentErrorsCount} ]; then - echo "Package $1 (with ${fileCount} files) is missing package comment" - return 1 - fi -} -lint_package_comment() { - set +ex - - count=0 - for i in $(go list ./...); do - lint_package_comment_per_package "$i" - ((count += $?)) - done - - set -ex - return $count -} -lint_package_comment +UpdateAddresses is deprecated: +UpdateSubConnState is deprecated: +balancer.ErrTransientFailure is deprecated: +grpc/reflection/v1alpha/reflection.proto +XXXXX xDS deprecated fields we support +.ExactMatch +.PrefixMatch +.SafeRegexMatch +.SuffixMatch +GetContainsMatch +GetExactMatch +GetMatchSubjectAltNames +GetPrefixMatch +GetSafeRegexMatch +GetSuffixMatch +GetTlsCertificateCertificateProviderInstance +GetValidationContextCertificateProviderInstance +XXXXX PleaseIgnoreUnused' echo SUCCESS diff --git a/vendor/google.golang.org/protobuf/protoadapt/convert.go b/vendor/google.golang.org/protobuf/protoadapt/convert.go new file mode 100644 index 000000000..ea276d15a --- /dev/null +++ b/vendor/google.golang.org/protobuf/protoadapt/convert.go @@ -0,0 +1,31 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protoadapt bridges the original and new proto APIs. +package protoadapt + +import ( + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/runtime/protoimpl" +) + +// MessageV1 is the original [github.com/golang/protobuf/proto.Message] type. +type MessageV1 = protoiface.MessageV1 + +// MessageV2 is the [google.golang.org/protobuf/proto.Message] type used by the +// current [google.golang.org/protobuf] module, adding support for reflection. +type MessageV2 = proto.Message + +// MessageV1Of converts a v2 message to a v1 message. +// It returns nil if m is nil. +func MessageV1Of(m MessageV2) MessageV1 { + return protoimpl.X.ProtoMessageV1Of(m) +} + +// MessageV2Of converts a v1 message to a v2 message. +// It returns nil if m is nil. +func MessageV2Of(m MessageV1) MessageV2 { + return protoimpl.X.ProtoMessageV2Of(m) +} diff --git a/vendor/k8s.io/api/core/v1/generated.proto b/vendor/k8s.io/api/core/v1/generated.proto index cf9b6e6eb..d099238cd 100644 --- a/vendor/k8s.io/api/core/v1/generated.proto +++ b/vendor/k8s.io/api/core/v1/generated.proto @@ -3286,7 +3286,7 @@ message PersistentVolumeStatus { // lastPhaseTransitionTime is the time the phase transitioned from one to another // and automatically resets to current time everytime a volume phase transitions. - // This is an alpha field and requires enabling PersistentVolumeLastPhaseTransitionTime feature. + // This is a beta field and requires the PersistentVolumeLastPhaseTransitionTime feature to be enabled (enabled by default). // +featureGate=PersistentVolumeLastPhaseTransitionTime // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastPhaseTransitionTime = 4; diff --git a/vendor/k8s.io/api/core/v1/types.go b/vendor/k8s.io/api/core/v1/types.go index 1aade3806..61ba21bca 100644 --- a/vendor/k8s.io/api/core/v1/types.go +++ b/vendor/k8s.io/api/core/v1/types.go @@ -423,7 +423,7 @@ type PersistentVolumeStatus struct { Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` // lastPhaseTransitionTime is the time the phase transitioned from one to another // and automatically resets to current time everytime a volume phase transitions. - // This is an alpha field and requires enabling PersistentVolumeLastPhaseTransitionTime feature. + // This is a beta field and requires the PersistentVolumeLastPhaseTransitionTime feature to be enabled (enabled by default). // +featureGate=PersistentVolumeLastPhaseTransitionTime // +optional LastPhaseTransitionTime *metav1.Time `json:"lastPhaseTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastPhaseTransitionTime"` diff --git a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go index 01152a096..fd6f7dc61 100644 --- a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -1478,7 +1478,7 @@ var map_PersistentVolumeStatus = map[string]string{ "phase": "phase indicates if a volume is available, bound to a claim, or released by a claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase", "message": "message is a human-readable message indicating details about why the volume is in this state.", "reason": "reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.", - "lastPhaseTransitionTime": "lastPhaseTransitionTime is the time the phase transitioned from one to another and automatically resets to current time everytime a volume phase transitions. This is an alpha field and requires enabling PersistentVolumeLastPhaseTransitionTime feature.", + "lastPhaseTransitionTime": "lastPhaseTransitionTime is the time the phase transitioned from one to another and automatically resets to current time everytime a volume phase transitions. This is a beta field and requires the PersistentVolumeLastPhaseTransitionTime feature to be enabled (enabled by default).", } func (PersistentVolumeStatus) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/klog/v2/OWNERS b/vendor/k8s.io/klog/v2/OWNERS index a2fe8f351..7500475a6 100644 --- a/vendor/k8s.io/klog/v2/OWNERS +++ b/vendor/k8s.io/klog/v2/OWNERS @@ -1,14 +1,16 @@ # See the OWNERS docs at https://go.k8s.io/owners reviewers: - harshanarayana + - mengjiao-liu - pohly approvers: - dims + - pohly - thockin - - serathius emeritus_approvers: - brancz - justinsb - lavalamp - piosz + - serathius - tallclair diff --git a/vendor/k8s.io/klog/v2/contextual_slog.go b/vendor/k8s.io/klog/v2/contextual_slog.go new file mode 100644 index 000000000..d3b562521 --- /dev/null +++ b/vendor/k8s.io/klog/v2/contextual_slog.go @@ -0,0 +1,31 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +import ( + "log/slog" + + "github.com/go-logr/logr" +) + +// SetSlogLogger reconfigures klog to log through the slog logger. The logger must not be nil. +func SetSlogLogger(logger *slog.Logger) { + SetLoggerWithOptions(logr.FromSlogHandler(logger.Handler()), ContextualLogger(true)) +} diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go index 72502db3a..026be9e3b 100644 --- a/vendor/k8s.io/klog/v2/klog.go +++ b/vendor/k8s.io/klog/v2/klog.go @@ -14,9 +14,26 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package klog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. -// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as -// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags. +// Package klog contains the following functionality: +// +// - output routing as defined via command line flags ([InitFlags]) +// - log formatting as text, either with a single, unstructured string ([Info], [Infof], etc.) +// or as a structured log entry with message and key/value pairs ([InfoS], etc.) +// - management of a go-logr [Logger] ([SetLogger], [Background], [TODO]) +// - helper functions for logging values ([Format]) and managing the state of klog ([CaptureState], [State.Restore]) +// - wrappers for [logr] APIs for contextual logging where the wrappers can +// be turned into no-ops ([EnableContextualLogging], [NewContext], [FromContext], +// [LoggerWithValues], [LoggerWithName]); if the ability to turn off +// contextual logging is not needed, then go-logr can also be used directly +// - type aliases for go-logr types to simplify imports in code which uses both (e.g. [Logger]) +// - [k8s.io/klog/v2/textlogger]: a logger which uses the same formatting as klog log with +// simpler output routing; beware that it comes with its own command line flags +// and does not use the ones from klog +// - [k8s.io/klog/v2/ktesting]: per-test output in Go unit tests +// - [k8s.io/klog/v2/klogr]: a deprecated, standalone [logr.Logger] on top of the main klog package; +// use [Background] instead if klog output routing is needed, [k8s.io/klog/v2/textlogger] if not +// - [k8s.io/klog/v2/examples]: demos of this functionality +// - [k8s.io/klog/v2/test]: reusable tests for [logr.Logger] implementations // // Basic examples: // diff --git a/vendor/k8s.io/klog/v2/klogr_slog.go b/vendor/k8s.io/klog/v2/klogr_slog.go index f7bf74030..c77d7baaf 100644 --- a/vendor/k8s.io/klog/v2/klogr_slog.go +++ b/vendor/k8s.io/klog/v2/klogr_slog.go @@ -25,7 +25,7 @@ import ( "strconv" "time" - "github.com/go-logr/logr/slogr" + "github.com/go-logr/logr" "k8s.io/klog/v2/internal/buffer" "k8s.io/klog/v2/internal/serialize" @@ -35,7 +35,7 @@ import ( func (l *klogger) Handle(ctx context.Context, record slog.Record) error { if logging.logger != nil { - if slogSink, ok := logging.logger.GetSink().(slogr.SlogSink); ok { + if slogSink, ok := logging.logger.GetSink().(logr.SlogSink); ok { // Let that logger do the work. return slogSink.Handle(ctx, record) } @@ -77,13 +77,13 @@ func slogOutput(file string, line int, now time.Time, err error, s severity.Seve buffer.PutBuffer(b) } -func (l *klogger) WithAttrs(attrs []slog.Attr) slogr.SlogSink { +func (l *klogger) WithAttrs(attrs []slog.Attr) logr.SlogSink { clone := *l clone.values = serialize.WithValues(l.values, sloghandler.Attrs2KVList(l.groups, attrs)) return &clone } -func (l *klogger) WithGroup(name string) slogr.SlogSink { +func (l *klogger) WithGroup(name string) logr.SlogSink { clone := *l if clone.groups != "" { clone.groups += "." + name @@ -93,4 +93,4 @@ func (l *klogger) WithGroup(name string) slogr.SlogSink { return &clone } -var _ slogr.SlogSink = &klogger{} +var _ logr.SlogSink = &klogger{} diff --git a/vendor/k8s.io/klog/v2/safeptr.go b/vendor/k8s.io/klog/v2/safeptr.go new file mode 100644 index 000000000..bbe24c2e8 --- /dev/null +++ b/vendor/k8s.io/klog/v2/safeptr.go @@ -0,0 +1,34 @@ +//go:build go1.18 +// +build go1.18 + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +// SafePtr is a function that takes a pointer of any type (T) as an argument. +// If the provided pointer is not nil, it returns the same pointer. If it is nil, it returns nil instead. +// +// This function is particularly useful to prevent nil pointer dereferencing when: +// +// - The type implements interfaces that are called by the logger, such as `fmt.Stringer`. +// - And these interface implementations do not perform nil checks themselves. +func SafePtr[T any](p *T) any { + if p == nil { + return nil + } + return p +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 7bd2d2247..f28988b43 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -32,6 +32,25 @@ github.com/cenkalti/backoff/v4 # github.com/cespare/xxhash/v2 v2.2.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 +# github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2 v2.0.0-20231030012137-0836a524e995 +## explicit; go 1.18 +github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2 +# github.com/cloudevents/sdk-go/v2 v2.14.0 +## explicit; go 1.17 +github.com/cloudevents/sdk-go/v2 +github.com/cloudevents/sdk-go/v2/binding +github.com/cloudevents/sdk-go/v2/binding/format +github.com/cloudevents/sdk-go/v2/binding/spec +github.com/cloudevents/sdk-go/v2/client +github.com/cloudevents/sdk-go/v2/context +github.com/cloudevents/sdk-go/v2/event +github.com/cloudevents/sdk-go/v2/event/datacodec +github.com/cloudevents/sdk-go/v2/event/datacodec/json +github.com/cloudevents/sdk-go/v2/event/datacodec/text +github.com/cloudevents/sdk-go/v2/event/datacodec/xml +github.com/cloudevents/sdk-go/v2/protocol +github.com/cloudevents/sdk-go/v2/protocol/http +github.com/cloudevents/sdk-go/v2/types # github.com/coreos/go-semver v0.3.1 ## explicit; go 1.8 github.com/coreos/go-semver/semver @@ -45,6 +64,10 @@ github.com/cyphar/filepath-securejoin # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew +# github.com/eclipse/paho.golang v0.11.0 +## explicit; go 1.15 +github.com/eclipse/paho.golang/packets +github.com/eclipse/paho.golang/paho # github.com/emicklei/go-restful/v3 v3.11.0 ## explicit; go 1.13 github.com/emicklei/go-restful/v3 @@ -69,7 +92,6 @@ github.com/fsnotify/fsnotify ## explicit; go 1.18 github.com/go-logr/logr github.com/go-logr/logr/funcr -github.com/go-logr/logr/slogr # github.com/go-logr/stdr v1.2.2 ## explicit; go 1.16 github.com/go-logr/stdr @@ -104,11 +126,11 @@ github.com/gogo/protobuf/sortkeys github.com/golang/groupcache/lru # github.com/golang/protobuf v1.5.4 ## explicit; go 1.17 -github.com/golang/protobuf/jsonpb github.com/golang/protobuf/proto github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration +github.com/golang/protobuf/ptypes/empty github.com/golang/protobuf/ptypes/timestamp # github.com/google/cel-go v0.17.7 ## explicit; go 1.18 @@ -152,7 +174,7 @@ github.com/google/go-cmp/cmp/internal/value ## explicit; go 1.12 github.com/google/gofuzz github.com/google/gofuzz/bytesource -# github.com/google/uuid v1.3.0 +# github.com/google/uuid v1.6.0 ## explicit github.com/google/uuid # github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 @@ -230,8 +252,8 @@ github.com/onsi/ginkgo/reporters/stenographer github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty github.com/onsi/ginkgo/types -# github.com/onsi/gomega v1.30.0 -## explicit; go 1.18 +# github.com/onsi/gomega v1.31.1 +## explicit; go 1.20 github.com/onsi/gomega github.com/onsi/gomega/format github.com/onsi/gomega/internal @@ -242,7 +264,7 @@ github.com/onsi/gomega/matchers/support/goraph/edge github.com/onsi/gomega/matchers/support/goraph/node github.com/onsi/gomega/matchers/support/goraph/util github.com/onsi/gomega/types -# github.com/openshift/build-machinery-go v0.0.0-20230306181456-d321ffa04533 +# github.com/openshift/build-machinery-go v0.0.0-20231128094528-1e9b1b0595c8 ## explicit; go 1.13 github.com/openshift/build-machinery-go github.com/openshift/build-machinery-go/make @@ -403,7 +425,7 @@ go.uber.org/zap/internal/pool go.uber.org/zap/internal/stacktrace go.uber.org/zap/zapcore go.uber.org/zap/zapgrpc -# golang.org/x/crypto v0.17.0 +# golang.org/x/crypto v0.19.0 ## explicit; go 1.18 golang.org/x/crypto/bcrypt golang.org/x/crypto/blowfish @@ -420,7 +442,7 @@ golang.org/x/crypto/scrypt ## explicit; go 1.20 golang.org/x/exp/constraints golang.org/x/exp/slices -# golang.org/x/net v0.19.0 +# golang.org/x/net v0.21.0 ## explicit; go 1.18 golang.org/x/net/context golang.org/x/net/html @@ -433,21 +455,22 @@ golang.org/x/net/idna golang.org/x/net/internal/timeseries golang.org/x/net/trace golang.org/x/net/websocket -# golang.org/x/oauth2 v0.12.0 +# golang.org/x/oauth2 v0.16.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/internal -# golang.org/x/sync v0.5.0 +# golang.org/x/sync v0.6.0 ## explicit; go 1.18 +golang.org/x/sync/semaphore golang.org/x/sync/singleflight -# golang.org/x/sys v0.16.0 +# golang.org/x/sys v0.17.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/term v0.15.0 +# golang.org/x/term v0.17.0 ## explicit; go 1.18 golang.org/x/term # golang.org/x/text v0.14.0 @@ -484,7 +507,7 @@ golang.org/x/text/width # golang.org/x/time v0.3.0 ## explicit golang.org/x/time/rate -# google.golang.org/appengine v1.6.7 +# google.golang.org/appengine v1.6.8 ## explicit; go 1.11 google.golang.org/appengine/internal google.golang.org/appengine/internal/base @@ -493,20 +516,20 @@ google.golang.org/appengine/internal/log google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 +# google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 ## explicit; go 1.19 google.golang.org/genproto/internal -# google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e +# google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 ## explicit; go 1.19 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/expr/v1alpha1 google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d +# google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 ## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.58.3 +# google.golang.org/grpc v1.62.1 ## explicit; go 1.19 google.golang.org/grpc google.golang.org/grpc/attributes @@ -544,6 +567,7 @@ google.golang.org/grpc/internal/metadata google.golang.org/grpc/internal/pretty google.golang.org/grpc/internal/resolver google.golang.org/grpc/internal/resolver/dns +google.golang.org/grpc/internal/resolver/dns/internal google.golang.org/grpc/internal/resolver/passthrough google.golang.org/grpc/internal/resolver/unix google.golang.org/grpc/internal/serviceconfig @@ -555,6 +579,7 @@ google.golang.org/grpc/keepalive google.golang.org/grpc/metadata google.golang.org/grpc/peer google.golang.org/grpc/resolver +google.golang.org/grpc/resolver/dns google.golang.org/grpc/resolver/manual google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats @@ -586,6 +611,7 @@ google.golang.org/protobuf/internal/set google.golang.org/protobuf/internal/strs google.golang.org/protobuf/internal/version google.golang.org/protobuf/proto +google.golang.org/protobuf/protoadapt google.golang.org/protobuf/reflect/protodesc google.golang.org/protobuf/reflect/protoreflect google.golang.org/protobuf/reflect/protoregistry @@ -628,7 +654,7 @@ helm.sh/helm/v3/pkg/ignore helm.sh/helm/v3/pkg/release helm.sh/helm/v3/pkg/releaseutil helm.sh/helm/v3/pkg/time -# k8s.io/api v0.29.1 +# k8s.io/api v0.29.2 ## explicit; go 1.21 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -695,7 +721,7 @@ k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 -# k8s.io/apimachinery v0.29.1 +# k8s.io/apimachinery v0.29.2 ## explicit; go 1.21 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -903,7 +929,7 @@ k8s.io/apiserver/plugin/pkg/audit/truncate k8s.io/apiserver/plugin/pkg/audit/webhook k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook -# k8s.io/client-go v0.29.1 +# k8s.io/client-go v0.29.2 ## explicit; go 1.21 k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1 @@ -1235,8 +1261,8 @@ k8s.io/component-base/metrics/testutil k8s.io/component-base/tracing k8s.io/component-base/tracing/api/v1 k8s.io/component-base/version -# k8s.io/klog/v2 v2.110.1 -## explicit; go 1.13 +# k8s.io/klog/v2 v2.120.1 +## explicit; go 1.18 k8s.io/klog/v2 k8s.io/klog/v2/internal/buffer k8s.io/klog/v2/internal/clock @@ -1271,7 +1297,7 @@ k8s.io/kube-openapi/pkg/validation/errors k8s.io/kube-openapi/pkg/validation/spec k8s.io/kube-openapi/pkg/validation/strfmt k8s.io/kube-openapi/pkg/validation/strfmt/bson -# k8s.io/utils v0.0.0-20240102154912-e7106e64919e +# k8s.io/utils v0.0.0-20240310230437-4693a0247e57 ## explicit; go 1.18 k8s.io/utils/buffer k8s.io/utils/clock @@ -1342,12 +1368,29 @@ open-cluster-management.io/api/cluster/v1beta2 open-cluster-management.io/api/utils/work/v1/workapplier open-cluster-management.io/api/work/v1 open-cluster-management.io/api/work/v1alpha1 -# open-cluster-management.io/sdk-go v0.13.0 +# open-cluster-management.io/sdk-go v0.13.1-0.20240321032811-7dbdd1b5c63d ## explicit; go 1.21 open-cluster-management.io/sdk-go/pkg/apis/cluster/v1alpha1 open-cluster-management.io/sdk-go/pkg/apis/cluster/v1beta1 open-cluster-management.io/sdk-go/pkg/apis/work/v1/applier open-cluster-management.io/sdk-go/pkg/apis/work/v1/builder +open-cluster-management.io/sdk-go/pkg/cloudevents/generic +open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options +open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc +open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1 +open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol +open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt +open-cluster-management.io/sdk-go/pkg/cloudevents/generic/payload +open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types +open-cluster-management.io/sdk-go/pkg/cloudevents/work +open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/client +open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/handler +open-cluster-management.io/sdk-go/pkg/cloudevents/work/common +open-cluster-management.io/sdk-go/pkg/cloudevents/work/internal +open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/client +open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/handler +open-cluster-management.io/sdk-go/pkg/cloudevents/work/utils +open-cluster-management.io/sdk-go/pkg/cloudevents/work/watcher open-cluster-management.io/sdk-go/pkg/patcher # sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 ## explicit; go 1.20 diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/agentclient.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/agentclient.go new file mode 100644 index 000000000..fc451156f --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/agentclient.go @@ -0,0 +1,310 @@ +package generic + +import ( + "context" + "fmt" + "strconv" + + cloudevents "github.com/cloudevents/sdk-go/v2" + + "k8s.io/klog/v2" + + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/payload" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" +) + +// CloudEventAgentClient is a client for an agent to resync/send/receive its resources with cloud events. +// +// An agent is a component that handles the deployment of requested resources on the managed cluster and status report +// to the source. +type CloudEventAgentClient[T ResourceObject] struct { + *baseClient + lister Lister[T] + codecs map[types.CloudEventsDataType]Codec[T] + statusHashGetter StatusHashGetter[T] + agentID string + clusterName string +} + +// NewCloudEventAgentClient returns an instance for CloudEventAgentClient. The following arguments are required to +// create a client. +// - agentOptions provides the clusterName and agentID and the cloudevents clients that are based on different event +// protocols for sending/receiving the cloudevents. +// - lister gets the resources from a cache/store of an agent. +// - statusHashGetter calculates the resource status hash. +// - codecs is list of codecs for encoding/decoding a resource objet/cloudevent to/from a cloudevent/resource objet. +func NewCloudEventAgentClient[T ResourceObject]( + ctx context.Context, + agentOptions *options.CloudEventsAgentOptions, + lister Lister[T], + statusHashGetter StatusHashGetter[T], + codecs ...Codec[T], +) (*CloudEventAgentClient[T], error) { + baseClient := &baseClient{ + cloudEventsOptions: agentOptions.CloudEventsOptions, + cloudEventsRateLimiter: NewRateLimiter(agentOptions.EventRateLimit), + reconnectedChan: make(chan struct{}), + } + + if err := baseClient.connect(ctx); err != nil { + return nil, err + } + + evtCodes := make(map[types.CloudEventsDataType]Codec[T]) + for _, codec := range codecs { + evtCodes[codec.EventDataType()] = codec + } + + return &CloudEventAgentClient[T]{ + baseClient: baseClient, + lister: lister, + codecs: evtCodes, + statusHashGetter: statusHashGetter, + agentID: agentOptions.AgentID, + clusterName: agentOptions.ClusterName, + }, nil +} + +// ReconnectedChan returns a chan which indicates the source/agent client is reconnected. +// The source/agent client callers should consider sending a resync request when receiving this signal. +func (c *CloudEventAgentClient[T]) ReconnectedChan() <-chan struct{} { + return c.reconnectedChan +} + +// Resync the resources spec by sending a spec resync request from the current to the given source. +func (c *CloudEventAgentClient[T]) Resync(ctx context.Context, source string) error { + // list the resource objects that are maintained by the current agent with the given source + objs, err := c.lister.List(types.ListOptions{Source: source, ClusterName: c.clusterName}) + if err != nil { + return err + } + + resources := &payload.ResourceVersionList{Versions: make([]payload.ResourceVersion, len(objs))} + for i, obj := range objs { + resourceVersion, err := strconv.ParseInt(obj.GetResourceVersion(), 10, 64) + if err != nil { + return err + } + + resources.Versions[i] = payload.ResourceVersion{ + ResourceID: string(obj.GetUID()), + ResourceVersion: resourceVersion, + } + } + + // only resync the resources whose event data type is registered + for eventDataType := range c.codecs { + eventType := types.CloudEventsType{ + CloudEventsDataType: eventDataType, + SubResource: types.SubResourceSpec, + Action: types.ResyncRequestAction, + } + + evt := types.NewEventBuilder(c.agentID, eventType). + WithOriginalSource(source). + WithClusterName(c.clusterName). + NewEvent() + if err := evt.SetData(cloudevents.ApplicationJSON, resources); err != nil { + return fmt.Errorf("failed to set data to cloud event: %v", err) + } + + if err := c.publish(ctx, evt); err != nil { + return err + } + } + + return nil +} + +// Publish a resource status from an agent to a source. +func (c *CloudEventAgentClient[T]) Publish(ctx context.Context, eventType types.CloudEventsType, obj T) error { + codec, ok := c.codecs[eventType.CloudEventsDataType] + if !ok { + return fmt.Errorf("failed to find a codec for event %s", eventType.CloudEventsDataType) + } + + if eventType.SubResource != types.SubResourceStatus { + return fmt.Errorf("unsupported event eventType %s", eventType) + } + + evt, err := codec.Encode(c.agentID, eventType, obj) + if err != nil { + return err + } + + if err := c.publish(ctx, *evt); err != nil { + return err + } + + return nil +} + +// Subscribe the events that are from the source status resync request or source resource spec request. +// For status resync request, agent publish the current resources status back as response. +// For resource spec request, agent receives resource spec and handles the spec with resource handlers. +func (c *CloudEventAgentClient[T]) Subscribe(ctx context.Context, handlers ...ResourceHandler[T]) { + c.subscribe(ctx, func(ctx context.Context, evt cloudevents.Event) { + c.receive(ctx, evt, handlers...) + }) +} + +func (c *CloudEventAgentClient[T]) receive(ctx context.Context, evt cloudevents.Event, handlers ...ResourceHandler[T]) { + klog.V(4).Infof("Received event:\n%s", evt) + + eventType, err := types.ParseCloudEventsType(evt.Type()) + if err != nil { + klog.Errorf("failed to parse cloud event type %s, %v", evt.Type(), err) + return + } + + if eventType.Action == types.ResyncRequestAction { + if eventType.SubResource != types.SubResourceStatus { + klog.Warningf("unsupported resync event type %s, ignore", eventType) + return + } + + if err := c.respondResyncStatusRequest(ctx, eventType.CloudEventsDataType, evt); err != nil { + klog.Errorf("failed to resync manifestsstatus, %v", err) + } + + return + } + + if eventType.SubResource != types.SubResourceSpec { + klog.Warningf("unsupported event type %s, ignore", eventType) + return + } + + codec, ok := c.codecs[eventType.CloudEventsDataType] + if !ok { + klog.Warningf("failed to find the codec for event %s, ignore", eventType.CloudEventsDataType) + return + } + + obj, err := codec.Decode(&evt) + if err != nil { + klog.Errorf("failed to decode spec, %v", err) + return + } + + action, err := c.specAction(evt.Source(), obj) + if err != nil { + klog.Errorf("failed to generate spec action %s, %v", evt, err) + return + } + + if len(action) == 0 { + // no action is required, ignore + return + } + + for _, handler := range handlers { + if err := handler(action, obj); err != nil { + klog.Errorf("failed to handle spec event %s, %v", evt, err) + } + } +} + +// Upon receiving the status resync event, the agent responds by sending resource status events to the broker as +// follows: +// - If the event payload is empty, the agent returns the status of all resources it maintains. +// - If the event payload is not empty, the agent retrieves the resource with the specified ID and compares the +// received resource status hash with the current resource status hash. If they are not equal, the agent sends the +// resource status message. +func (c *CloudEventAgentClient[T]) respondResyncStatusRequest( + ctx context.Context, eventDataType types.CloudEventsDataType, evt cloudevents.Event) error { + objs, err := c.lister.List(types.ListOptions{ClusterName: c.clusterName, Source: evt.Source()}) + if err != nil { + return err + } + + statusHashes, err := payload.DecodeStatusResyncRequest(evt) + if err != nil { + return err + } + + eventType := types.CloudEventsType{ + CloudEventsDataType: eventDataType, + SubResource: types.SubResourceStatus, + Action: types.ResyncResponseAction, + } + + if len(statusHashes.Hashes) == 0 { + // publish all resources status + for _, obj := range objs { + if err := c.Publish(ctx, eventType, obj); err != nil { + return err + } + } + + return nil + } + + for _, obj := range objs { + lastHash, ok := findStatusHash(string(obj.GetUID()), statusHashes.Hashes) + if !ok { + // ignore the resource that is not on the source, but exists on the agent, wait for the source deleting it + klog.Infof("The resource %s is not found from the source, ignore", obj.GetUID()) + continue + } + + currentHash, err := c.statusHashGetter(obj) + if err != nil { + continue + } + + if currentHash == lastHash { + // the status is not changed, do nothing + continue + } + + if err := c.Publish(ctx, eventType, obj); err != nil { + return err + } + } + + return nil +} + +func (c *CloudEventAgentClient[T]) specAction(source string, obj T) (evt types.ResourceAction, err error) { + objs, err := c.lister.List(types.ListOptions{ClusterName: c.clusterName, Source: source}) + if err != nil { + return evt, err + } + + lastObj, exists := getObj(string(obj.GetUID()), objs) + if !exists { + return types.Added, nil + } + + if !obj.GetDeletionTimestamp().IsZero() { + return types.Deleted, nil + } + + if obj.GetResourceVersion() == lastObj.GetResourceVersion() { + return evt, nil + } + + return types.Modified, nil +} + +func getObj[T ResourceObject](resourceID string, objs []T) (obj T, exists bool) { + for _, obj := range objs { + if string(obj.GetUID()) == resourceID { + return obj, true + } + } + + return obj, false +} + +func findStatusHash(id string, hashes []payload.ResourceStatusHash) (string, bool) { + for _, hash := range hashes { + if id == hash.ResourceID { + return hash.StatusHash, true + } + } + + return "", false +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/baseclient.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/baseclient.go new file mode 100644 index 000000000..c9be382de --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/baseclient.go @@ -0,0 +1,223 @@ +package generic + +import ( + "context" + "fmt" + "sync" + "time" + + cloudevents "github.com/cloudevents/sdk-go/v2" + + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/flowcontrol" + "k8s.io/klog/v2" + "k8s.io/utils/clock" + + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options" +) + +const ( + restartReceiverSignal = iota + stopReceiverSignal +) + +type receiveFn func(ctx context.Context, evt cloudevents.Event) + +type baseClient struct { + sync.RWMutex + cloudEventsOptions options.CloudEventsOptions + cloudEventsClient cloudevents.Client + cloudEventsRateLimiter flowcontrol.RateLimiter + receiverChan chan int + reconnectedChan chan struct{} +} + +func (c *baseClient) connect(ctx context.Context) error { + var err error + c.cloudEventsClient, err = c.cloudEventsOptions.Client(ctx) + if err != nil { + return err + } + + // start a go routine to handle cloudevents client connection errors + go func() { + var err error + + // the reconnect backoff will stop at [1,5) min interval. If we don't backoff for 10min, we reset the backoff. + delayFn := wait.Backoff{ + Duration: 5 * time.Second, + Cap: 1 * time.Minute, + Steps: 12, // now a required argument + Factor: 5.0, + Jitter: 1.0, + }.DelayWithReset(&clock.RealClock{}, 10*time.Minute) + cloudEventsClient := c.cloudEventsClient + + for { + if cloudEventsClient == nil { + klog.V(4).Infof("reconnecting the cloudevents client") + cloudEventsClient, err = c.cloudEventsOptions.Client(ctx) + // TODO enhance the cloudevents SKD to avoid wrapping the error type to distinguish the net connection + // errors + if err != nil { + // failed to reconnect, try agin + runtime.HandleError(fmt.Errorf("the cloudevents client reconnect failed, %v", err)) + <-wait.RealTimer(delayFn()).C() + continue + } + + // the cloudevents network connection is back, refresh the current cloudevents client and send the + // receiver restart signal + klog.V(4).Infof("the cloudevents client is reconnected") + c.resetClient(cloudEventsClient) + c.sendReceiverSignal(restartReceiverSignal) + c.sendReconnectedSignal() + } + + select { + case <-ctx.Done(): + if c.receiverChan != nil { + close(c.receiverChan) + } + return + case err, ok := <-c.cloudEventsOptions.ErrorChan(): + if !ok { + // error channel is closed, do nothing + return + } + + runtime.HandleError(fmt.Errorf("the cloudevents client is disconnected, %v", err)) + + // the cloudevents client network connection is closed, send the receiver stop signal, set the current + // client to nil and retry + c.sendReceiverSignal(stopReceiverSignal) + + cloudEventsClient = nil + c.resetClient(cloudEventsClient) + + <-wait.RealTimer(delayFn()).C() + } + } + }() + + return nil +} + +func (c *baseClient) publish(ctx context.Context, evt cloudevents.Event) error { + now := time.Now() + + if err := c.cloudEventsRateLimiter.Wait(ctx); err != nil { + return fmt.Errorf("client rate limiter Wait returned an error: %w", err) + } + + latency := time.Since(now) + if latency > longThrottleLatency { + klog.Warningf(fmt.Sprintf("Waited for %v due to client-side throttling, not priority and fairness, request: %s", + latency, evt)) + } + + sendingCtx, err := c.cloudEventsOptions.WithContext(ctx, evt.Context) + if err != nil { + return err + } + + klog.V(4).Infof("Sent event: %v\n%s", ctx, evt) + + // make sure the current client is the newest + c.RLock() + defer c.RUnlock() + + if c.cloudEventsClient == nil { + return fmt.Errorf("the cloudevents client is not ready") + } + + if result := c.cloudEventsClient.Send(sendingCtx, evt); cloudevents.IsUndelivered(result) { + return fmt.Errorf("failed to send event %s, %v", evt, result) + } + + return nil +} + +func (c *baseClient) subscribe(ctx context.Context, receive receiveFn) { + c.Lock() + defer c.Unlock() + + // make sure there is only one subscription go routine starting for one client. + if c.receiverChan != nil { + klog.Warningf("the subscription has already started") + return + } + + c.receiverChan = make(chan int) + + // start a go routine to handle cloudevents subscription + go func() { + receiverCtx, receiverCancel := context.WithCancel(context.TODO()) + cloudEventsClient := c.cloudEventsClient + + for { + if cloudEventsClient != nil { + go func() { + if err := cloudEventsClient.StartReceiver(receiverCtx, func(evt cloudevents.Event) { + receive(receiverCtx, evt) + }); err != nil { + runtime.HandleError(fmt.Errorf("failed to receive cloudevents, %v", err)) + } + }() + } + + select { + case <-ctx.Done(): + receiverCancel() + return + case signal, ok := <-c.receiverChan: + if !ok { + // receiver channel is closed, stop the receiver + receiverCancel() + return + } + + switch signal { + case restartReceiverSignal: + klog.V(4).Infof("restart the cloudevents receiver") + // make sure the current client is the newest + c.RLock() + cloudEventsClient = c.cloudEventsClient + c.RUnlock() + + // rebuild the receiver context + receiverCtx, receiverCancel = context.WithCancel(context.TODO()) + case stopReceiverSignal: + klog.V(4).Infof("stop the cloudevents receiver") + receiverCancel() + cloudEventsClient = nil + default: + runtime.HandleError(fmt.Errorf("unknown receiver signal %d", signal)) + } + } + } + }() +} + +func (c *baseClient) resetClient(client cloudevents.Client) { + c.Lock() + defer c.Unlock() + + c.cloudEventsClient = client +} + +func (c *baseClient) sendReceiverSignal(signal int) { + c.RLock() + defer c.RUnlock() + + if c.receiverChan != nil { + c.receiverChan <- signal + } +} + +func (c *baseClient) sendReconnectedSignal() { + c.RLock() + defer c.RUnlock() + c.reconnectedChan <- struct{}{} +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/interface.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/interface.go new file mode 100644 index 000000000..06cb8fd45 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/interface.go @@ -0,0 +1,74 @@ +package generic + +import ( + "context" + + cloudevents "github.com/cloudevents/sdk-go/v2" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kubetypes "k8s.io/apimachinery/pkg/types" + + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" +) + +// ResourceHandler handles the received resource object. +type ResourceHandler[T ResourceObject] func(action types.ResourceAction, obj T) error + +// StatusHashGetter gets the status hash of one resource object. +type StatusHashGetter[T ResourceObject] func(obj T) (string, error) + +type ResourceObject interface { + // GetUID returns the resource ID of this object. The resource ID represents the unique identifier for this object. + // The source should ensure its uniqueness and consistency. + GetUID() kubetypes.UID + + // GetResourceVersion returns the resource version of this object. The resource version is a required int64 sequence + // number property that must be incremented by the source whenever this resource changes. + // The source should guarantee its incremental nature. + GetResourceVersion() string + + // GetDeletionTimestamp returns the deletion timestamp of this object. The deletiontimestamp is an optional + // timestamp property representing the resource is deleting from the source, the agent needs to clean up the + // resource from its cluster. + GetDeletionTimestamp() *metav1.Time +} + +type Lister[T ResourceObject] interface { + // List returns the list of resource objects that are maintained by source/agent. + List(options types.ListOptions) ([]T, error) +} + +type Codec[T ResourceObject] interface { + // EventDataType indicates which type of the event data the codec is used for. + EventDataType() types.CloudEventsDataType + + // Encode a resource object to cloudevents event. + // Each event should have the following extensions: `resourceid`, `resourceversion` and `clustername`. + // The source set the `deletiontimestamp` extension to indicate one resource object is deleting from a source. + // The agent set the `originalsource` extension to indicate one resource belonged to which source. + Encode(source string, eventType types.CloudEventsType, obj T) (*cloudevents.Event, error) + + // Decode a cloudevents event to a resource object. + Decode(event *cloudevents.Event) (T, error) +} + +type CloudEventsClient[T ResourceObject] interface { + // Resync the resources of one source/agent by sending resync request. + // The second parameter is used to specify cluster name/source ID for a source/agent. + // - A source sends the resource status resync request to a cluster with the given cluster name. + // If setting this parameter to `types.ClusterAll`, the source will broadcast the resync request to all clusters. + // - An agent sends the resources spec resync request to a source with the given source ID. + // If setting this parameter to `types.SourceAll`, the agent will broadcast the resync request to all sources. + Resync(context.Context, string) error + + // Publish the resources spec/status event to the broker. + Publish(ctx context.Context, eventType types.CloudEventsType, obj T) error + + // Subscribe the resources status/spec event to the broker to receive the resources status/spec and use + // ResourceHandler to handle them. + Subscribe(ctx context.Context, handlers ...ResourceHandler[T]) + + // ReconnectedChan returns a chan which indicates the source/agent client is reconnected. + // The source/agent client callers should consider sending a resync request when receiving this signal. + ReconnectedChan() <-chan struct{} +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/agentoptions.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/agentoptions.go new file mode 100644 index 000000000..293f4b178 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/agentoptions.go @@ -0,0 +1,57 @@ +package grpc + +import ( + "context" + + cloudevents "github.com/cloudevents/sdk-go/v2" + + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" +) + +type grpcAgentOptions struct { + GRPCOptions + errorChan chan error // grpc client connection doesn't have error channel, it will handle reconnecting automatically + clusterName string +} + +func NewAgentOptions(grpcOptions *GRPCOptions, clusterName, agentID string) *options.CloudEventsAgentOptions { + return &options.CloudEventsAgentOptions{ + CloudEventsOptions: &grpcAgentOptions{ + GRPCOptions: *grpcOptions, + errorChan: make(chan error), + clusterName: clusterName, + }, + AgentID: agentID, + ClusterName: clusterName, + } +} + +func (o *grpcAgentOptions) WithContext(ctx context.Context, evtCtx cloudevents.EventContext) (context.Context, error) { + // grpc agent client doesn't need to update topic in the context + return ctx, nil +} + +func (o *grpcAgentOptions) Client(ctx context.Context) (cloudevents.Client, error) { + receiver, err := o.GetCloudEventsClient( + ctx, + func(err error) { + o.errorChan <- err + }, + protocol.WithSubscribeOption(&protocol.SubscribeOption{ + // TODO: Update this code to determine the subscription source for the agent client. + // Currently, the grpc agent client is not utilized, and the 'Source' field serves + // as a placeholder with all the sources. + Source: types.SourceAll, + }), + ) + if err != nil { + return nil, err + } + return receiver, nil +} + +func (o *grpcAgentOptions) ErrorChan() <-chan error { + return o.errorChan +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/options.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/options.go new file mode 100644 index 000000000..403673c09 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/options.go @@ -0,0 +1,155 @@ +package grpc + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "os" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + "gopkg.in/yaml.v2" + + cloudevents "github.com/cloudevents/sdk-go/v2" + + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol" +) + +// GRPCOptions holds the options that are used to build gRPC client. +type GRPCOptions struct { + URL string + CAFile string + ClientCertFile string + ClientKeyFile string +} + +// GRPCConfig holds the information needed to build connect to gRPC server as a given user. +type GRPCConfig struct { + // URL is the address of the gRPC server (host:port). + URL string `json:"url" yaml:"url"` + // CAFile is the file path to a cert file for the gRPC server certificate authority. + CAFile string `json:"caFile,omitempty" yaml:"caFile,omitempty"` + // ClientCertFile is the file path to a client cert file for TLS. + ClientCertFile string `json:"clientCertFile,omitempty" yaml:"clientCertFile,omitempty"` + // ClientKeyFile is the file path to a client key file for TLS. + ClientKeyFile string `json:"clientKeyFile,omitempty" yaml:"clientKeyFile,omitempty"` +} + +// BuildGRPCOptionsFromFlags builds configs from a config filepath. +func BuildGRPCOptionsFromFlags(configPath string) (*GRPCOptions, error) { + configData, err := os.ReadFile(configPath) + if err != nil { + return nil, err + } + + config := &GRPCConfig{} + if err := yaml.Unmarshal(configData, config); err != nil { + return nil, err + } + + if config.URL == "" { + return nil, fmt.Errorf("url is required") + } + + if (config.ClientCertFile == "" && config.ClientKeyFile != "") || + (config.ClientCertFile != "" && config.ClientKeyFile == "") { + return nil, fmt.Errorf("either both or none of clientCertFile and clientKeyFile must be set") + } + if config.ClientCertFile != "" && config.ClientKeyFile != "" && config.CAFile == "" { + return nil, fmt.Errorf("setting clientCertFile and clientKeyFile requires caFile") + } + + return &GRPCOptions{ + URL: config.URL, + CAFile: config.CAFile, + ClientCertFile: config.ClientCertFile, + ClientKeyFile: config.ClientKeyFile, + }, nil +} + +func NewGRPCOptions() *GRPCOptions { + return &GRPCOptions{} +} + +func (o *GRPCOptions) GetGRPCClientConn() (*grpc.ClientConn, error) { + if len(o.CAFile) != 0 { + certPool, err := x509.SystemCertPool() + if err != nil { + return nil, err + } + + caPEM, err := os.ReadFile(o.CAFile) + if err != nil { + return nil, err + } + + if ok := certPool.AppendCertsFromPEM(caPEM); !ok { + return nil, fmt.Errorf("invalid CA %s", o.CAFile) + } + + clientCerts, err := tls.LoadX509KeyPair(o.ClientCertFile, o.ClientKeyFile) + if err != nil { + return nil, err + } + + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{clientCerts}, + RootCAs: certPool, + MinVersion: tls.VersionTLS13, + MaxVersion: tls.VersionTLS13, + } + + conn, err := grpc.Dial(o.URL, grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig))) + if err != nil { + return nil, fmt.Errorf("failed to connect to grpc server %s, %v", o.URL, err) + } + + return conn, nil + } + + conn, err := grpc.Dial(o.URL, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + return nil, fmt.Errorf("failed to connect to grpc server %s, %v", o.URL, err) + } + + return conn, nil +} + +func (o *GRPCOptions) GetCloudEventsClient(ctx context.Context, errorHandler func(error), clientOpts ...protocol.Option) (cloudevents.Client, error) { + conn, err := o.GetGRPCClientConn() + if err != nil { + return nil, err + } + + // Periodically (every 100ms) check the connection status and reconnect if necessary. + go func() { + ticker := time.NewTicker(100 * time.Millisecond) + for { + select { + case <-ctx.Done(): + ticker.Stop() + conn.Close() + case <-ticker.C: + if conn.GetState() == connectivity.TransientFailure { + errorHandler(fmt.Errorf("grpc connection is disconnected")) + ticker.Stop() + conn.Close() + return // exit the goroutine as the error handler function will handle the reconnection. + } + } + } + }() + + opts := []protocol.Option{} + opts = append(opts, clientOpts...) + p, err := protocol.NewProtocol(conn, opts...) + if err != nil { + return nil, err + } + + return cloudevents.NewClient(p) +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/README.md b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/README.md new file mode 100644 index 000000000..0bc2eeba6 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/README.md @@ -0,0 +1,30 @@ +# CloudEvent gRPC Protobuf Definitions + +## Overview + +This repository includes the protobuf message and RPC method definitions for CloudEvent gRPC service, along with the corresponding Go code generated from these definitions. + +## Getting Started + +### Prerequisites + +Make sure you have the following tools installed: + +- [Protocol Compiler (protoc)](https://grpc.io/docs/protoc-installation/) +- Go plugins for the protocol compiler: + +```bash +$ go install google.golang.org/protobuf/cmd/protoc-gen-go +$ go install google.golang.org/grpc/cmd/protoc-gen-go-grpc +``` + +### Updating CloudEvent gRPC Service + +1. Modify the `*.proto` files to reflect your desired changes. +2. Run the following command to update the generated code: + + ```bash + go generate + ``` + + This step is crucial to ensure that your changes are applied to both the gRPC server and client stub. diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/cloudevent.pb.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/cloudevent.pb.go new file mode 100644 index 000000000..5cc30a3cb --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/cloudevent.pb.go @@ -0,0 +1,649 @@ +// After making changes to the *.proto files, always run the following +// command in current directory to update the generated code: +// go generate + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.12.4 +// source: cloudevent.proto + +package v1 + +import ( + any1 "github.com/golang/protobuf/ptypes/any" + empty "github.com/golang/protobuf/ptypes/empty" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// CloudEvent is copied from +// https://github.com/cloudevents/spec/blob/main/cloudevents/formats/protobuf-format.md. +type CloudEvent struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Unique event identifier. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // URI of the event source. + Source string `protobuf:"bytes,2,opt,name=source,proto3" json:"source,omitempty"` + // Version of the spec in use. + SpecVersion string `protobuf:"bytes,3,opt,name=spec_version,json=specVersion,proto3" json:"spec_version,omitempty"` + // Event type identifier. + Type string `protobuf:"bytes,4,opt,name=type,proto3" json:"type,omitempty"` + // Optional & Extension Attributes + Attributes map[string]*CloudEventAttributeValue `protobuf:"bytes,5,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // CloudEvent Data (Bytes, Text, or Proto) + // + // Types that are assignable to Data: + // + // *CloudEvent_BinaryData + // *CloudEvent_TextData + // *CloudEvent_ProtoData + Data isCloudEvent_Data `protobuf_oneof:"data"` +} + +func (x *CloudEvent) Reset() { + *x = CloudEvent{} + if protoimpl.UnsafeEnabled { + mi := &file_cloudevent_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CloudEvent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CloudEvent) ProtoMessage() {} + +func (x *CloudEvent) ProtoReflect() protoreflect.Message { + mi := &file_cloudevent_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CloudEvent.ProtoReflect.Descriptor instead. +func (*CloudEvent) Descriptor() ([]byte, []int) { + return file_cloudevent_proto_rawDescGZIP(), []int{0} +} + +func (x *CloudEvent) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *CloudEvent) GetSource() string { + if x != nil { + return x.Source + } + return "" +} + +func (x *CloudEvent) GetSpecVersion() string { + if x != nil { + return x.SpecVersion + } + return "" +} + +func (x *CloudEvent) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *CloudEvent) GetAttributes() map[string]*CloudEventAttributeValue { + if x != nil { + return x.Attributes + } + return nil +} + +func (m *CloudEvent) GetData() isCloudEvent_Data { + if m != nil { + return m.Data + } + return nil +} + +func (x *CloudEvent) GetBinaryData() []byte { + if x, ok := x.GetData().(*CloudEvent_BinaryData); ok { + return x.BinaryData + } + return nil +} + +func (x *CloudEvent) GetTextData() string { + if x, ok := x.GetData().(*CloudEvent_TextData); ok { + return x.TextData + } + return "" +} + +func (x *CloudEvent) GetProtoData() *any1.Any { + if x, ok := x.GetData().(*CloudEvent_ProtoData); ok { + return x.ProtoData + } + return nil +} + +type isCloudEvent_Data interface { + isCloudEvent_Data() +} + +type CloudEvent_BinaryData struct { + // If the event is binary data then the datacontenttype attribute + // should be set to an appropriate media-type. + BinaryData []byte `protobuf:"bytes,6,opt,name=binary_data,json=binaryData,proto3,oneof"` +} + +type CloudEvent_TextData struct { + // If the event is string data then the datacontenttype attribute + // should be set to an appropriate media-type such as application/json. + TextData string `protobuf:"bytes,7,opt,name=text_data,json=textData,proto3,oneof"` +} + +type CloudEvent_ProtoData struct { + // If the event is a protobuf then it must be encoded using this Any + // type. The datacontenttype attribute should be set to + // application/protobuf and the dataschema attribute set to the message + // type. + ProtoData *any1.Any `protobuf:"bytes,8,opt,name=proto_data,json=protoData,proto3,oneof"` +} + +func (*CloudEvent_BinaryData) isCloudEvent_Data() {} + +func (*CloudEvent_TextData) isCloudEvent_Data() {} + +func (*CloudEvent_ProtoData) isCloudEvent_Data() {} + +// CloudEventAttribute enables extensions to use any of the seven allowed +// data types as the value of an envelope key. +type CloudEventAttributeValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The value can be any one of these types. + // + // Types that are assignable to Attr: + // + // *CloudEventAttributeValue_CeBoolean + // *CloudEventAttributeValue_CeInteger + // *CloudEventAttributeValue_CeString + // *CloudEventAttributeValue_CeBytes + // *CloudEventAttributeValue_CeUri + // *CloudEventAttributeValue_CeUriRef + // *CloudEventAttributeValue_CeTimestamp + Attr isCloudEventAttributeValue_Attr `protobuf_oneof:"attr"` +} + +func (x *CloudEventAttributeValue) Reset() { + *x = CloudEventAttributeValue{} + if protoimpl.UnsafeEnabled { + mi := &file_cloudevent_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CloudEventAttributeValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CloudEventAttributeValue) ProtoMessage() {} + +func (x *CloudEventAttributeValue) ProtoReflect() protoreflect.Message { + mi := &file_cloudevent_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CloudEventAttributeValue.ProtoReflect.Descriptor instead. +func (*CloudEventAttributeValue) Descriptor() ([]byte, []int) { + return file_cloudevent_proto_rawDescGZIP(), []int{1} +} + +func (m *CloudEventAttributeValue) GetAttr() isCloudEventAttributeValue_Attr { + if m != nil { + return m.Attr + } + return nil +} + +func (x *CloudEventAttributeValue) GetCeBoolean() bool { + if x, ok := x.GetAttr().(*CloudEventAttributeValue_CeBoolean); ok { + return x.CeBoolean + } + return false +} + +func (x *CloudEventAttributeValue) GetCeInteger() int32 { + if x, ok := x.GetAttr().(*CloudEventAttributeValue_CeInteger); ok { + return x.CeInteger + } + return 0 +} + +func (x *CloudEventAttributeValue) GetCeString() string { + if x, ok := x.GetAttr().(*CloudEventAttributeValue_CeString); ok { + return x.CeString + } + return "" +} + +func (x *CloudEventAttributeValue) GetCeBytes() []byte { + if x, ok := x.GetAttr().(*CloudEventAttributeValue_CeBytes); ok { + return x.CeBytes + } + return nil +} + +func (x *CloudEventAttributeValue) GetCeUri() string { + if x, ok := x.GetAttr().(*CloudEventAttributeValue_CeUri); ok { + return x.CeUri + } + return "" +} + +func (x *CloudEventAttributeValue) GetCeUriRef() string { + if x, ok := x.GetAttr().(*CloudEventAttributeValue_CeUriRef); ok { + return x.CeUriRef + } + return "" +} + +func (x *CloudEventAttributeValue) GetCeTimestamp() *timestamp.Timestamp { + if x, ok := x.GetAttr().(*CloudEventAttributeValue_CeTimestamp); ok { + return x.CeTimestamp + } + return nil +} + +type isCloudEventAttributeValue_Attr interface { + isCloudEventAttributeValue_Attr() +} + +type CloudEventAttributeValue_CeBoolean struct { + // Boolean value. + CeBoolean bool `protobuf:"varint,1,opt,name=ce_boolean,json=ceBoolean,proto3,oneof"` +} + +type CloudEventAttributeValue_CeInteger struct { + // Integer value. + CeInteger int32 `protobuf:"varint,2,opt,name=ce_integer,json=ceInteger,proto3,oneof"` +} + +type CloudEventAttributeValue_CeString struct { + // String value. + CeString string `protobuf:"bytes,3,opt,name=ce_string,json=ceString,proto3,oneof"` +} + +type CloudEventAttributeValue_CeBytes struct { + // Byte string value. + CeBytes []byte `protobuf:"bytes,4,opt,name=ce_bytes,json=ceBytes,proto3,oneof"` +} + +type CloudEventAttributeValue_CeUri struct { + // URI value. + CeUri string `protobuf:"bytes,5,opt,name=ce_uri,json=ceUri,proto3,oneof"` +} + +type CloudEventAttributeValue_CeUriRef struct { + // URI reference value. + CeUriRef string `protobuf:"bytes,6,opt,name=ce_uri_ref,json=ceUriRef,proto3,oneof"` +} + +type CloudEventAttributeValue_CeTimestamp struct { + // Timestamp value. + CeTimestamp *timestamp.Timestamp `protobuf:"bytes,7,opt,name=ce_timestamp,json=ceTimestamp,proto3,oneof"` +} + +func (*CloudEventAttributeValue_CeBoolean) isCloudEventAttributeValue_Attr() {} + +func (*CloudEventAttributeValue_CeInteger) isCloudEventAttributeValue_Attr() {} + +func (*CloudEventAttributeValue_CeString) isCloudEventAttributeValue_Attr() {} + +func (*CloudEventAttributeValue_CeBytes) isCloudEventAttributeValue_Attr() {} + +func (*CloudEventAttributeValue_CeUri) isCloudEventAttributeValue_Attr() {} + +func (*CloudEventAttributeValue_CeUriRef) isCloudEventAttributeValue_Attr() {} + +func (*CloudEventAttributeValue_CeTimestamp) isCloudEventAttributeValue_Attr() {} + +type PublishRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Define the CloudEvent to be published + Event *CloudEvent `protobuf:"bytes,1,opt,name=event,proto3" json:"event,omitempty"` +} + +func (x *PublishRequest) Reset() { + *x = PublishRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_cloudevent_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PublishRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PublishRequest) ProtoMessage() {} + +func (x *PublishRequest) ProtoReflect() protoreflect.Message { + mi := &file_cloudevent_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PublishRequest.ProtoReflect.Descriptor instead. +func (*PublishRequest) Descriptor() ([]byte, []int) { + return file_cloudevent_proto_rawDescGZIP(), []int{2} +} + +func (x *PublishRequest) GetEvent() *CloudEvent { + if x != nil { + return x.Event + } + return nil +} + +type SubscriptionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The original source of the respond CloudEvent(s). + Source string `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty"` +} + +func (x *SubscriptionRequest) Reset() { + *x = SubscriptionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_cloudevent_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SubscriptionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubscriptionRequest) ProtoMessage() {} + +func (x *SubscriptionRequest) ProtoReflect() protoreflect.Message { + mi := &file_cloudevent_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscriptionRequest.ProtoReflect.Descriptor instead. +func (*SubscriptionRequest) Descriptor() ([]byte, []int) { + return file_cloudevent_proto_rawDescGZIP(), []int{3} +} + +func (x *SubscriptionRequest) GetSource() string { + if x != nil { + return x.Source + } + return "" +} + +var File_cloudevent_proto protoreflect.FileDescriptor + +var file_cloudevent_proto_rawDesc = []byte{ + 0x0a, 0x10, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x11, 0x69, 0x6f, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x73, 0x2e, 0x76, 0x31, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa7, + 0x03, 0x0a, 0x0a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x0e, 0x0a, + 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, + 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x70, 0x65, 0x63, 0x5f, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x70, 0x65, + 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x4d, 0x0a, 0x0a, + 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2d, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, + 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x21, 0x0a, 0x0b, 0x62, + 0x69, 0x6e, 0x61, 0x72, 0x79, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, + 0x48, 0x00, 0x52, 0x0a, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1d, + 0x0a, 0x09, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x09, 0x48, 0x00, 0x52, 0x08, 0x74, 0x65, 0x78, 0x74, 0x44, 0x61, 0x74, 0x61, 0x12, 0x35, 0x0a, + 0x0a, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x44, 0x61, 0x74, 0x61, 0x1a, 0x6a, 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x41, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x42, 0x06, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x9a, 0x02, 0x0a, 0x18, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x63, 0x65, 0x5f, 0x62, 0x6f, 0x6f, 0x6c, + 0x65, 0x61, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x63, 0x65, 0x42, + 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x12, 0x1f, 0x0a, 0x0a, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x74, + 0x65, 0x67, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x09, 0x63, 0x65, + 0x49, 0x6e, 0x74, 0x65, 0x67, 0x65, 0x72, 0x12, 0x1d, 0x0a, 0x09, 0x63, 0x65, 0x5f, 0x73, 0x74, + 0x72, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x63, 0x65, + 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x1b, 0x0a, 0x08, 0x63, 0x65, 0x5f, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x07, 0x63, 0x65, 0x42, 0x79, + 0x74, 0x65, 0x73, 0x12, 0x17, 0x0a, 0x06, 0x63, 0x65, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x63, 0x65, 0x55, 0x72, 0x69, 0x12, 0x1e, 0x0a, 0x0a, + 0x63, 0x65, 0x5f, 0x75, 0x72, 0x69, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x48, 0x00, 0x52, 0x08, 0x63, 0x65, 0x55, 0x72, 0x69, 0x52, 0x65, 0x66, 0x12, 0x3f, 0x0a, 0x0c, + 0x63, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x48, 0x00, + 0x52, 0x0b, 0x63, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x06, 0x0a, + 0x04, 0x61, 0x74, 0x74, 0x72, 0x22, 0x45, 0x0a, 0x0e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x33, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x2d, 0x0a, 0x13, + 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x32, 0xb3, 0x01, 0x0a, 0x11, + 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x12, 0x46, 0x0a, 0x07, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x12, 0x21, 0x2e, 0x69, + 0x6f, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x56, 0x0a, 0x09, 0x53, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x12, 0x26, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, + 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, + 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x00, 0x30, + 0x01, 0x42, 0x50, 0x5a, 0x4e, 0x6f, 0x70, 0x65, 0x6e, 0x2d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x2d, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x69, 0x6f, 0x2f, + 0x73, 0x64, 0x6b, 0x2d, 0x67, 0x6f, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_cloudevent_proto_rawDescOnce sync.Once + file_cloudevent_proto_rawDescData = file_cloudevent_proto_rawDesc +) + +func file_cloudevent_proto_rawDescGZIP() []byte { + file_cloudevent_proto_rawDescOnce.Do(func() { + file_cloudevent_proto_rawDescData = protoimpl.X.CompressGZIP(file_cloudevent_proto_rawDescData) + }) + return file_cloudevent_proto_rawDescData +} + +var file_cloudevent_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_cloudevent_proto_goTypes = []interface{}{ + (*CloudEvent)(nil), // 0: io.cloudevents.v1.CloudEvent + (*CloudEventAttributeValue)(nil), // 1: io.cloudevents.v1.CloudEventAttributeValue + (*PublishRequest)(nil), // 2: io.cloudevents.v1.PublishRequest + (*SubscriptionRequest)(nil), // 3: io.cloudevents.v1.SubscriptionRequest + nil, // 4: io.cloudevents.v1.CloudEvent.AttributesEntry + (*any1.Any)(nil), // 5: google.protobuf.Any + (*timestamp.Timestamp)(nil), // 6: google.protobuf.Timestamp + (*empty.Empty)(nil), // 7: google.protobuf.Empty +} +var file_cloudevent_proto_depIdxs = []int32{ + 4, // 0: io.cloudevents.v1.CloudEvent.attributes:type_name -> io.cloudevents.v1.CloudEvent.AttributesEntry + 5, // 1: io.cloudevents.v1.CloudEvent.proto_data:type_name -> google.protobuf.Any + 6, // 2: io.cloudevents.v1.CloudEventAttributeValue.ce_timestamp:type_name -> google.protobuf.Timestamp + 0, // 3: io.cloudevents.v1.PublishRequest.event:type_name -> io.cloudevents.v1.CloudEvent + 1, // 4: io.cloudevents.v1.CloudEvent.AttributesEntry.value:type_name -> io.cloudevents.v1.CloudEventAttributeValue + 2, // 5: io.cloudevents.v1.CloudEventService.Publish:input_type -> io.cloudevents.v1.PublishRequest + 3, // 6: io.cloudevents.v1.CloudEventService.Subscribe:input_type -> io.cloudevents.v1.SubscriptionRequest + 7, // 7: io.cloudevents.v1.CloudEventService.Publish:output_type -> google.protobuf.Empty + 0, // 8: io.cloudevents.v1.CloudEventService.Subscribe:output_type -> io.cloudevents.v1.CloudEvent + 7, // [7:9] is the sub-list for method output_type + 5, // [5:7] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_cloudevent_proto_init() } +func file_cloudevent_proto_init() { + if File_cloudevent_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_cloudevent_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CloudEvent); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cloudevent_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CloudEventAttributeValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cloudevent_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PublishRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cloudevent_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscriptionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_cloudevent_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*CloudEvent_BinaryData)(nil), + (*CloudEvent_TextData)(nil), + (*CloudEvent_ProtoData)(nil), + } + file_cloudevent_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*CloudEventAttributeValue_CeBoolean)(nil), + (*CloudEventAttributeValue_CeInteger)(nil), + (*CloudEventAttributeValue_CeString)(nil), + (*CloudEventAttributeValue_CeBytes)(nil), + (*CloudEventAttributeValue_CeUri)(nil), + (*CloudEventAttributeValue_CeUriRef)(nil), + (*CloudEventAttributeValue_CeTimestamp)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cloudevent_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_cloudevent_proto_goTypes, + DependencyIndexes: file_cloudevent_proto_depIdxs, + MessageInfos: file_cloudevent_proto_msgTypes, + }.Build() + File_cloudevent_proto = out.File + file_cloudevent_proto_rawDesc = nil + file_cloudevent_proto_goTypes = nil + file_cloudevent_proto_depIdxs = nil +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/cloudevent.proto b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/cloudevent.proto new file mode 100644 index 000000000..30cf9c9fd --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/cloudevent.proto @@ -0,0 +1,81 @@ +// After making changes to the *.proto files, always run the following +// command in current directory to update the generated code: +// go generate + +syntax = "proto3"; + +package io.cloudevents.v1; + +option go_package = "open-cluster-management.io/sdk-go/cloudevents/generic/options/grpc/protobuf/v1"; + +import "google/protobuf/empty.proto"; +import "google/protobuf/any.proto"; +import "google/protobuf/timestamp.proto"; + +// CloudEvent is copied from +// https://github.com/cloudevents/spec/blob/main/cloudevents/formats/protobuf-format.md. +message CloudEvent { + // Unique event identifier. + string id = 1; + // URI of the event source. + string source = 2; + // Version of the spec in use. + string spec_version = 3; + // Event type identifier. + string type = 4; + + // Optional & Extension Attributes + map attributes = 5; + + // CloudEvent Data (Bytes, Text, or Proto) + oneof data { + // If the event is binary data then the datacontenttype attribute + // should be set to an appropriate media-type. + bytes binary_data = 6; + // If the event is string data then the datacontenttype attribute + // should be set to an appropriate media-type such as application/json. + string text_data = 7; + // If the event is a protobuf then it must be encoded using this Any + // type. The datacontenttype attribute should be set to + // application/protobuf and the dataschema attribute set to the message + // type. + google.protobuf.Any proto_data = 8; + } +} + +// CloudEventAttribute enables extensions to use any of the seven allowed +// data types as the value of an envelope key. +message CloudEventAttributeValue { + // The value can be any one of these types. + oneof attr { + // Boolean value. + bool ce_boolean = 1; + // Integer value. + int32 ce_integer = 2; + // String value. + string ce_string = 3; + // Byte string value. + bytes ce_bytes = 4; + // URI value. + string ce_uri = 5; + // URI reference value. + string ce_uri_ref = 6; + // Timestamp value. + google.protobuf.Timestamp ce_timestamp = 7; + } +} + +message PublishRequest { + // Required. Define the CloudEvent to be published + CloudEvent event = 1; +} + +message SubscriptionRequest { + // Required. The original source of the respond CloudEvent(s). + string source = 1; +} + +service CloudEventService { + rpc Publish(PublishRequest) returns (google.protobuf.Empty) {} + rpc Subscribe(SubscriptionRequest) returns (stream CloudEvent) {} +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/cloudevent_grpc.pb.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/cloudevent_grpc.pb.go new file mode 100644 index 000000000..79b138153 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/cloudevent_grpc.pb.go @@ -0,0 +1,179 @@ +// After making changes to the *.proto files, always run the following +// command in current directory to update the generated code: +// go generate + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v3.12.4 +// source: cloudevent.proto + +package v1 + +import ( + context "context" + empty "github.com/golang/protobuf/ptypes/empty" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + CloudEventService_Publish_FullMethodName = "/io.cloudevents.v1.CloudEventService/Publish" + CloudEventService_Subscribe_FullMethodName = "/io.cloudevents.v1.CloudEventService/Subscribe" +) + +// CloudEventServiceClient is the client API for CloudEventService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type CloudEventServiceClient interface { + Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*empty.Empty, error) + Subscribe(ctx context.Context, in *SubscriptionRequest, opts ...grpc.CallOption) (CloudEventService_SubscribeClient, error) +} + +type cloudEventServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewCloudEventServiceClient(cc grpc.ClientConnInterface) CloudEventServiceClient { + return &cloudEventServiceClient{cc} +} + +func (c *cloudEventServiceClient) Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, CloudEventService_Publish_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *cloudEventServiceClient) Subscribe(ctx context.Context, in *SubscriptionRequest, opts ...grpc.CallOption) (CloudEventService_SubscribeClient, error) { + stream, err := c.cc.NewStream(ctx, &CloudEventService_ServiceDesc.Streams[0], CloudEventService_Subscribe_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &cloudEventServiceSubscribeClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type CloudEventService_SubscribeClient interface { + Recv() (*CloudEvent, error) + grpc.ClientStream +} + +type cloudEventServiceSubscribeClient struct { + grpc.ClientStream +} + +func (x *cloudEventServiceSubscribeClient) Recv() (*CloudEvent, error) { + m := new(CloudEvent) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// CloudEventServiceServer is the server API for CloudEventService service. +// All implementations must embed UnimplementedCloudEventServiceServer +// for forward compatibility +type CloudEventServiceServer interface { + Publish(context.Context, *PublishRequest) (*empty.Empty, error) + Subscribe(*SubscriptionRequest, CloudEventService_SubscribeServer) error + mustEmbedUnimplementedCloudEventServiceServer() +} + +// UnimplementedCloudEventServiceServer must be embedded to have forward compatible implementations. +type UnimplementedCloudEventServiceServer struct { +} + +func (UnimplementedCloudEventServiceServer) Publish(context.Context, *PublishRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Publish not implemented") +} +func (UnimplementedCloudEventServiceServer) Subscribe(*SubscriptionRequest, CloudEventService_SubscribeServer) error { + return status.Errorf(codes.Unimplemented, "method Subscribe not implemented") +} +func (UnimplementedCloudEventServiceServer) mustEmbedUnimplementedCloudEventServiceServer() {} + +// UnsafeCloudEventServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to CloudEventServiceServer will +// result in compilation errors. +type UnsafeCloudEventServiceServer interface { + mustEmbedUnimplementedCloudEventServiceServer() +} + +func RegisterCloudEventServiceServer(s grpc.ServiceRegistrar, srv CloudEventServiceServer) { + s.RegisterService(&CloudEventService_ServiceDesc, srv) +} + +func _CloudEventService_Publish_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PublishRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CloudEventServiceServer).Publish(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: CloudEventService_Publish_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CloudEventServiceServer).Publish(ctx, req.(*PublishRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CloudEventService_Subscribe_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SubscriptionRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(CloudEventServiceServer).Subscribe(m, &cloudEventServiceSubscribeServer{stream}) +} + +type CloudEventService_SubscribeServer interface { + Send(*CloudEvent) error + grpc.ServerStream +} + +type cloudEventServiceSubscribeServer struct { + grpc.ServerStream +} + +func (x *cloudEventServiceSubscribeServer) Send(m *CloudEvent) error { + return x.ServerStream.SendMsg(m) +} + +// CloudEventService_ServiceDesc is the grpc.ServiceDesc for CloudEventService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var CloudEventService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "io.cloudevents.v1.CloudEventService", + HandlerType: (*CloudEventServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Publish", + Handler: _CloudEventService_Publish_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Subscribe", + Handler: _CloudEventService_Subscribe_Handler, + ServerStreams: true, + }, + }, + Metadata: "cloudevent.proto", +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/gen.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/gen.go new file mode 100644 index 000000000..f643ef818 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/gen.go @@ -0,0 +1,3 @@ +package v1 + +//go:generate protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative cloudevent.proto diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/message.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/message.go new file mode 100644 index 000000000..b03e77dc9 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/message.go @@ -0,0 +1,205 @@ +package protocol + +import ( + "bytes" + "context" + "fmt" + "net/url" + "strings" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/binding/format" + "github.com/cloudevents/sdk-go/v2/binding/spec" + "github.com/cloudevents/sdk-go/v2/types" + + pbv1 "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1" +) + +const ( + prefix = "ce-" + contenttype = "contenttype" + // dataSchema = "dataschema" + subject = "subject" + time = "time" +) + +var specs = spec.WithPrefix(prefix) + +// Message represents a gRPC message. +// This message *can* be read several times safely +type Message struct { + internal *pbv1.CloudEvent + version spec.Version + format format.Format +} + +// Check if Message implements binding.Message +var ( + _ binding.Message = (*Message)(nil) + _ binding.MessageMetadataReader = (*Message)(nil) +) + +func NewMessage(msg *pbv1.CloudEvent) *Message { + var f format.Format + var v spec.Version + if msg.Attributes != nil { + if contentType, ok := msg.Attributes[contenttype]; ok && format.IsFormat(contentType.GetCeString()) { + f = format.Lookup(contentType.GetCeString()) + } else if s := msg.SpecVersion; s != "" { + v = specs.Version(s) + } + } + return &Message{ + internal: msg, + version: v, + format: f, + } +} + +func (m *Message) ReadEncoding() binding.Encoding { + if m.version != nil { + return binding.EncodingBinary + } + if m.format != nil { + return binding.EncodingStructured + } + + return binding.EncodingUnknown +} + +func (m *Message) ReadStructured(ctx context.Context, encoder binding.StructuredWriter) error { + if m.format == nil { + return binding.ErrNotStructured + } + + return encoder.SetStructuredEvent(ctx, m.format, bytes.NewReader(m.internal.GetBinaryData())) +} + +func (m *Message) ReadBinary(ctx context.Context, encoder binding.BinaryWriter) error { + if m.version == nil { + return binding.ErrNotBinary + } + + if m.format != nil { + return binding.ErrNotBinary + } + + if m.internal.SpecVersion != "" { + err := encoder.SetAttribute(m.version.AttributeFromKind(spec.SpecVersion), m.internal.SpecVersion) + if err != nil { + return err + } + } + if m.internal.Id != "" { + err := encoder.SetAttribute(m.version.AttributeFromKind(spec.ID), m.internal.Id) + if err != nil { + return err + } + } + if m.internal.Source != "" { + err := encoder.SetAttribute(m.version.AttributeFromKind(spec.Source), m.internal.Source) + if err != nil { + return err + } + } + if m.internal.Type != "" { + err := encoder.SetAttribute(m.version.AttributeFromKind(spec.Type), m.internal.Type) + if err != nil { + return err + } + } + + for name, value := range m.internal.Attributes { + attrVal, err := valueFrom(value) + if err != nil { + return fmt.Errorf("failed to convert attribute %s: %s", name, err) + } + + if strings.HasPrefix(name, prefix) { + attr := m.version.Attribute(name) + if attr != nil { + err = encoder.SetAttribute(attr, attrVal) + if err != nil { + return err + } + } else { + err = encoder.SetExtension(strings.TrimPrefix(name, prefix), attrVal) + if err != nil { + return err + } + } + } else if name == contenttype { + err = encoder.SetAttribute(m.version.AttributeFromKind(spec.DataContentType), attrVal) + if err != nil { + return err + } + } + } + + if m.internal.GetBinaryData() != nil { + return encoder.SetData(bytes.NewBuffer(m.internal.GetBinaryData())) + } + + return nil +} + +func (m *Message) Finish(error) error { + return nil +} + +func (m *Message) GetAttribute(k spec.Kind) (spec.Attribute, interface{}) { + attr := m.version.AttributeFromKind(k) + if attr != nil { + switch attr.Kind() { + case spec.SpecVersion: + return attr, m.internal.SpecVersion + case spec.Type: + return attr, m.internal.Type + case spec.Source: + return attr, m.internal.Source + case spec.ID: + return attr, m.internal.Id + // case spec.DataContentType: + // return attr, m.internal.Attributes[contenttype].GetCeString() + default: + return attr, m.internal.Attributes[prefix+attr.Name()] + } + } + + return nil, nil +} + +func (m *Message) GetExtension(name string) interface{} { + return m.internal.Attributes[prefix+name] +} + +func valueFrom(attr *pbv1.CloudEventAttributeValue) (interface{}, error) { + var v interface{} + switch vt := attr.Attr.(type) { + case *pbv1.CloudEventAttributeValue_CeBoolean: + v = vt.CeBoolean + case *pbv1.CloudEventAttributeValue_CeInteger: + v = vt.CeInteger + case *pbv1.CloudEventAttributeValue_CeString: + v = vt.CeString + case *pbv1.CloudEventAttributeValue_CeBytes: + v = vt.CeBytes + case *pbv1.CloudEventAttributeValue_CeUri: + uri, err := url.Parse(vt.CeUri) + if err != nil { + return nil, fmt.Errorf("failed to parse URI value %s: %s", vt.CeUri, err.Error()) + } + v = uri + case *pbv1.CloudEventAttributeValue_CeUriRef: + uri, err := url.Parse(vt.CeUriRef) + if err != nil { + return nil, fmt.Errorf("failed to parse URIRef value %s: %s", vt.CeUriRef, err.Error()) + } + v = types.URIRef{URL: *uri} + case *pbv1.CloudEventAttributeValue_CeTimestamp: + v = vt.CeTimestamp.AsTime() + default: + return nil, fmt.Errorf("unsupported attribute type: %T", vt) + } + return types.Validate(v) +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/option.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/option.go new file mode 100644 index 000000000..361b1b5de --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/option.go @@ -0,0 +1,24 @@ +package protocol + +import ( + "fmt" +) + +// Option is the function signature +type Option func(*Protocol) error + +// SubscribeOption +type SubscribeOption struct { + Source string +} + +// WithSubscribeOption sets the Subscribe configuration for the client. +func WithSubscribeOption(subscribeOpt *SubscribeOption) Option { + return func(p *Protocol) error { + if subscribeOpt == nil { + return fmt.Errorf("the subscribe option must not be nil") + } + p.subscribeOption = subscribeOpt + return nil + } +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/protocol.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/protocol.go new file mode 100644 index 000000000..467fc2455 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/protocol.go @@ -0,0 +1,149 @@ +package protocol + +import ( + "context" + "fmt" + "io" + "sync" + + "google.golang.org/grpc" + + "github.com/cloudevents/sdk-go/v2/binding" + cecontext "github.com/cloudevents/sdk-go/v2/context" + "github.com/cloudevents/sdk-go/v2/protocol" + + pbv1 "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1" +) + +// protocol for grpc +// define protocol for grpc + +type Protocol struct { + client pbv1.CloudEventServiceClient + subscribeOption *SubscribeOption + // receiver + incoming chan *pbv1.CloudEvent + // inOpen + openerMutex sync.Mutex + + closeChan chan struct{} +} + +var ( + _ protocol.Sender = (*Protocol)(nil) + _ protocol.Opener = (*Protocol)(nil) + _ protocol.Receiver = (*Protocol)(nil) + _ protocol.Closer = (*Protocol)(nil) +) + +// new create grpc protocol +func NewProtocol(clientConn grpc.ClientConnInterface, opts ...Option) (*Protocol, error) { + if clientConn == nil { + return nil, fmt.Errorf("the client connection must not be nil") + } + + // TODO: support clientID and error handling in grpc connection + p := &Protocol{ + client: pbv1.NewCloudEventServiceClient(clientConn), + // subClient: + incoming: make(chan *pbv1.CloudEvent), + closeChan: make(chan struct{}), + } + + if err := p.applyOptions(opts...); err != nil { + return nil, err + } + + return p, nil +} + +func (p *Protocol) applyOptions(opts ...Option) error { + for _, fn := range opts { + if err := fn(p); err != nil { + return err + } + } + return nil +} + +func (p *Protocol) Send(ctx context.Context, m binding.Message, transformers ...binding.Transformer) error { + var err error + defer func() { + err = m.Finish(err) + }() + + msg := &pbv1.CloudEvent{} + err = WritePBMessage(ctx, m, msg, transformers...) + if err != nil { + return err + } + + logger := cecontext.LoggerFrom(ctx) + logger.Infof("publishing event with id: %v", msg.Id) + _, err = p.client.Publish(ctx, &pbv1.PublishRequest{ + Event: msg, + }) + if err != nil { + return err + } + return err +} + +func (p *Protocol) OpenInbound(ctx context.Context) error { + if p.subscribeOption == nil { + return fmt.Errorf("the subscribe option must not be nil") + } + + if len(p.subscribeOption.Source) == 0 { + return fmt.Errorf("the subscribe option source must not be empty") + } + + p.openerMutex.Lock() + defer p.openerMutex.Unlock() + + logger := cecontext.LoggerFrom(ctx) + subClient, err := p.client.Subscribe(ctx, &pbv1.SubscriptionRequest{ + Source: p.subscribeOption.Source, + }) + if err != nil { + return err + } + + logger.Infof("subscribing events for: %v", p.subscribeOption.Source) + go func() { + for { + msg, err := subClient.Recv() + if err != nil { + return + } + p.incoming <- msg + } + }() + + // Wait until external or internal context done + select { + case <-ctx.Done(): + case <-p.closeChan: + } + + return nil +} + +// Receive implements Receiver.Receive +func (p *Protocol) Receive(ctx context.Context) (binding.Message, error) { + select { + case m, ok := <-p.incoming: + if !ok { + return nil, io.EOF + } + msg := NewMessage(m) + return msg, nil + case <-ctx.Done(): + return nil, io.EOF + } +} + +func (p *Protocol) Close(ctx context.Context) error { + close(p.closeChan) + return nil +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/write_message.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/write_message.go new file mode 100644 index 000000000..1c6149344 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/write_message.go @@ -0,0 +1,215 @@ +package protocol + +import ( + "bytes" + "context" + "fmt" + "io" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/binding/format" + "github.com/cloudevents/sdk-go/v2/binding/spec" + "github.com/cloudevents/sdk-go/v2/types" + "google.golang.org/protobuf/types/known/timestamppb" + + pbv1 "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1" +) + +// WritePBMessage fills the provided pubMessage with the message m. +// Using context you can tweak the encoding processing (more details on binding.Write documentation). +func WritePBMessage(ctx context.Context, m binding.Message, pbEvt *pbv1.CloudEvent, transformers ...binding.Transformer) error { + structuredWriter := (*pbEventWriter)(pbEvt) + binaryWriter := (*pbEventWriter)(pbEvt) + + _, err := binding.Write( + ctx, + m, + structuredWriter, + binaryWriter, + transformers..., + ) + return err +} + +type pbEventWriter pbv1.CloudEvent + +var ( + _ binding.StructuredWriter = (*pbEventWriter)(nil) + _ binding.BinaryWriter = (*pbEventWriter)(nil) +) + +func (b *pbEventWriter) SetStructuredEvent(ctx context.Context, f format.Format, event io.Reader) error { + if b.Attributes == nil { + b.Attributes = make(map[string]*pbv1.CloudEventAttributeValue) + } + + b.Attributes[contenttype], _ = attributeFor(f.MediaType()) + + var buf bytes.Buffer + _, err := io.Copy(&buf, event) + if err != nil { + return err + } + + // TODO: check the data content type and set the right data format + b.Data = &pbv1.CloudEvent_BinaryData{ + BinaryData: buf.Bytes(), + } + + return nil +} + +func (b *pbEventWriter) Start(ctx context.Context) error { + if b.Attributes == nil { + b.Attributes = make(map[string]*pbv1.CloudEventAttributeValue) + } + + return nil +} + +func (b *pbEventWriter) End(ctx context.Context) error { + return nil +} + +func (b *pbEventWriter) SetData(reader io.Reader) error { + buf, ok := reader.(*bytes.Buffer) + if !ok { + buf = new(bytes.Buffer) + _, err := io.Copy(buf, reader) + if err != nil { + return err + } + } + + b.Data = &pbv1.CloudEvent_BinaryData{ + BinaryData: buf.Bytes(), + } + + return nil +} + +func (b *pbEventWriter) SetAttribute(attribute spec.Attribute, value interface{}) error { + switch attribute.Kind() { + case spec.SpecVersion: + val, ok := value.(string) + if !ok { + return fmt.Errorf("invalid SpecVersion type, expected string got %T", value) + } + b.SpecVersion = val + case spec.ID: + val, ok := value.(string) + if !ok { + return fmt.Errorf("invalid ID type, expected string got %T", value) + } + b.Id = val + case spec.Source: + val, ok := value.(string) + if !ok { + return fmt.Errorf("invalid Source type, expected string got %T", value) + } + b.Source = val + case spec.Type: + val, ok := value.(string) + if !ok { + return fmt.Errorf("invalid Type type, expected string got %T", value) + } + b.Type = val + case spec.DataContentType: + if value == nil { + delete(b.Attributes, contenttype) + } else { + attrVal, err := attributeFor(value) + if err != nil { + return err + } + b.Attributes[contenttype] = attrVal + } + case spec.Subject: + if value == nil { + delete(b.Attributes, prefix+subject) + } else { + attrVal, err := attributeFor(value) + if err != nil { + return err + } + b.Attributes[prefix+subject] = attrVal + } + case spec.Time: + if value == nil { + delete(b.Attributes, prefix+time) + } else { + attrVal, err := attributeFor(value) + if err != nil { + return err + } + b.Attributes[prefix+time] = attrVal + } + default: + if value == nil { + delete(b.Attributes, prefix+attribute.Name()) + } else { + attrVal, err := attributeFor(value) + if err != nil { + return err + } + b.Attributes[prefix+attribute.Name()] = attrVal + } + } + + return nil +} + +func (b *pbEventWriter) SetExtension(name string, value interface{}) error { + if value == nil { + delete(b.Attributes, prefix+name) + } else { + attrVal, err := attributeFor(value) + if err != nil { + return err + } + b.Attributes[prefix+name] = attrVal + } + + return nil +} + +func attributeFor(v interface{}) (*pbv1.CloudEventAttributeValue, error) { + vv, err := types.Validate(v) + if err != nil { + return nil, err + } + attr := &pbv1.CloudEventAttributeValue{} + switch vt := vv.(type) { + case bool: + attr.Attr = &pbv1.CloudEventAttributeValue_CeBoolean{ + CeBoolean: vt, + } + case int32: + attr.Attr = &pbv1.CloudEventAttributeValue_CeInteger{ + CeInteger: vt, + } + case string: + attr.Attr = &pbv1.CloudEventAttributeValue_CeString{ + CeString: vt, + } + case []byte: + attr.Attr = &pbv1.CloudEventAttributeValue_CeBytes{ + CeBytes: vt, + } + case types.URI: + attr.Attr = &pbv1.CloudEventAttributeValue_CeUri{ + CeUri: vt.String(), + } + case types.URIRef: + attr.Attr = &pbv1.CloudEventAttributeValue_CeUriRef{ + CeUriRef: vt.String(), + } + case types.Timestamp: + attr.Attr = &pbv1.CloudEventAttributeValue_CeTimestamp{ + CeTimestamp: timestamppb.New(vt.Time), + } + default: + return nil, fmt.Errorf("unsupported attribute type: %T", v) + } + return attr, nil +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/sourceoptions.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/sourceoptions.go new file mode 100644 index 000000000..e9a65ff02 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/sourceoptions.go @@ -0,0 +1,52 @@ +package grpc + +import ( + "context" + + cloudevents "github.com/cloudevents/sdk-go/v2" + + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol" +) + +type gRPCSourceOptions struct { + GRPCOptions + errorChan chan error // grpc client connection doesn't have error channel, it will handle reconnecting automatically + sourceID string +} + +func NewSourceOptions(gRPCOptions *GRPCOptions, sourceID string) *options.CloudEventsSourceOptions { + return &options.CloudEventsSourceOptions{ + CloudEventsOptions: &gRPCSourceOptions{ + GRPCOptions: *gRPCOptions, + errorChan: make(chan error), + sourceID: sourceID, + }, + SourceID: sourceID, + } +} + +func (o *gRPCSourceOptions) WithContext(ctx context.Context, evtCtx cloudevents.EventContext) (context.Context, error) { + // grpc source client doesn't need to update topic in the context + return ctx, nil +} + +func (o *gRPCSourceOptions) Client(ctx context.Context) (cloudevents.Client, error) { + receiver, err := o.GetCloudEventsClient( + ctx, + func(err error) { + o.errorChan <- err + }, + protocol.WithSubscribeOption(&protocol.SubscribeOption{ + Source: o.sourceID, + }), + ) + if err != nil { + return nil, err + } + return receiver, nil +} + +func (o *gRPCSourceOptions) ErrorChan() <-chan error { + return o.errorChan +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt/agentoptions.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt/agentoptions.go new file mode 100644 index 000000000..526c7feb5 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt/agentoptions.go @@ -0,0 +1,117 @@ +package mqtt + +import ( + "context" + "fmt" + "strings" + + cloudeventsmqtt "github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2" + cloudevents "github.com/cloudevents/sdk-go/v2" + cloudeventscontext "github.com/cloudevents/sdk-go/v2/context" + "github.com/eclipse/paho.golang/paho" + "k8s.io/klog/v2" + + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" +) + +type mqttAgentOptions struct { + MQTTOptions + errorChan chan error + clusterName string + agentID string +} + +func NewAgentOptions(mqttOptions *MQTTOptions, clusterName, agentID string) *options.CloudEventsAgentOptions { + mqttAgentOptions := &mqttAgentOptions{ + MQTTOptions: *mqttOptions, + errorChan: make(chan error), + clusterName: clusterName, + agentID: agentID, + } + + return &options.CloudEventsAgentOptions{ + CloudEventsOptions: mqttAgentOptions, + AgentID: mqttAgentOptions.agentID, + ClusterName: mqttAgentOptions.clusterName, + } +} + +func (o *mqttAgentOptions) WithContext(ctx context.Context, evtCtx cloudevents.EventContext) (context.Context, error) { + topic, err := getAgentPubTopic(ctx) + if err != nil { + return nil, err + } + + if topic != nil { + return cloudeventscontext.WithTopic(ctx, string(*topic)), nil + } + + eventType, err := types.ParseCloudEventsType(evtCtx.GetType()) + if err != nil { + return nil, fmt.Errorf("unsupported event type %s, %v", eventType, err) + } + + originalSource, err := evtCtx.GetExtension(types.ExtensionOriginalSource) + if err != nil { + return nil, err + } + + // agent request to sync resource spec from all sources + if eventType.Action == types.ResyncRequestAction && originalSource == types.SourceAll { + if len(o.Topics.AgentBroadcast) == 0 { + klog.Warningf("the agent broadcast topic not set, fall back to the agent events topic") + + // TODO after supporting multiple sources, we should list each source + eventsTopic := replaceLast(o.Topics.AgentEvents, "+", o.clusterName) + return cloudeventscontext.WithTopic(ctx, eventsTopic), nil + } + + resyncTopic := strings.Replace(o.Topics.AgentBroadcast, "+", o.clusterName, 1) + return cloudeventscontext.WithTopic(ctx, resyncTopic), nil + } + + topicSource, err := getSourceFromEventsTopic(o.Topics.AgentEvents) + if err != nil { + return nil, err + } + + // agent publishes status events or spec resync events + eventsTopic := replaceLast(o.Topics.AgentEvents, "+", o.clusterName) + eventsTopic = replaceLast(eventsTopic, "+", topicSource) + return cloudeventscontext.WithTopic(ctx, eventsTopic), nil +} + +func (o *mqttAgentOptions) Client(ctx context.Context) (cloudevents.Client, error) { + subscribe := &paho.Subscribe{ + Subscriptions: map[string]paho.SubscribeOptions{ + // TODO support multiple sources, currently the client require the source events topic has a sourceID, in + // the future, client may need a source list, it will subscribe to each source + // receiving the sources events + replaceLast(o.Topics.SourceEvents, "+", o.clusterName): {QoS: byte(o.SubQoS)}, + }, + } + + if len(o.Topics.SourceBroadcast) != 0 { + // receiving status resync events from all sources + subscribe.Subscriptions[o.Topics.SourceBroadcast] = paho.SubscribeOptions{QoS: byte(o.SubQoS)} + } + + receiver, err := o.GetCloudEventsClient( + ctx, + fmt.Sprintf("%s-client", o.agentID), + func(err error) { + o.errorChan <- err + }, + cloudeventsmqtt.WithPublish(&paho.Publish{QoS: byte(o.PubQoS)}), + cloudeventsmqtt.WithSubscribe(subscribe), + ) + if err != nil { + return nil, err + } + return receiver, nil +} + +func (o *mqttAgentOptions) ErrorChan() <-chan error { + return o.errorChan +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt/options.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt/options.go new file mode 100644 index 000000000..56b13be94 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt/options.go @@ -0,0 +1,326 @@ +package mqtt + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "net" + "os" + "regexp" + "strings" + "time" + + cloudeventsmqtt "github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2" + cloudevents "github.com/cloudevents/sdk-go/v2" + "github.com/eclipse/paho.golang/packets" + "github.com/eclipse/paho.golang/paho" + "gopkg.in/yaml.v2" + "k8s.io/apimachinery/pkg/util/errors" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" +) + +type TopicKey string +type PubTopic string + +const ( + MQTT_SOURCE_PUB_TOPIC_KEY TopicKey = "mqtt_source_pub_topic" + MQTT_AGENT_PUB_TOPIC_KEY TopicKey = "mqtt_agent_pub_topic" +) + +// MQTTOptions holds the options that are used to build MQTT client. +type MQTTOptions struct { + Topics types.Topics + BrokerHost string + Username string + Password string + CAFile string + ClientCertFile string + ClientKeyFile string + KeepAlive uint16 + DialTimeout time.Duration + PubQoS int + SubQoS int +} + +// MQTTConfig holds the information needed to build connect to MQTT broker as a given user. +type MQTTConfig struct { + // BrokerHost is the host of the MQTT broker (hostname:port). + BrokerHost string `json:"brokerHost" yaml:"brokerHost"` + + // Username is the username for basic authentication to connect the MQTT broker. + Username string `json:"username,omitempty" yaml:"username,omitempty"` + // Password is the password for basic authentication to connect the MQTT broker. + Password string `json:"password,omitempty" yaml:"password,omitempty"` + + // CAFile is the file path to a cert file for the MQTT broker certificate authority. + CAFile string `json:"caFile,omitempty" yaml:"caFile,omitempty"` + // ClientCertFile is the file path to a client cert file for TLS. + ClientCertFile string `json:"clientCertFile,omitempty" yaml:"clientCertFile,omitempty"` + // ClientKeyFile is the file path to a client key file for TLS. + ClientKeyFile string `json:"clientKeyFile,omitempty" yaml:"clientKeyFile,omitempty"` + + // KeepAlive is the keep alive time in seconds for MQTT clients, by default is 60s + KeepAlive *uint16 `json:"keepAlive,omitempty" yaml:"keepAlive,omitempty"` + + // DialTimeout is the timeout when establishing a MQTT TCP connection, by default is 60s + DialTimeout *time.Duration `json:"dialTimeout,omitempty" yaml:"dialTimeout,omitempty"` + + // PubQoS is the QoS for publish, by default is 1 + PubQoS *int `json:"pubQoS,omitempty" yaml:"pubQoS,omitempty"` + // SubQoS is the Qos for subscribe, by default is 1 + SubQoS *int `json:"subQoS,omitempty" yaml:"subQoS,omitempty"` + + // Topics are MQTT topics for resource spec, status and resync. + Topics *types.Topics `json:"topics,omitempty" yaml:"topics,omitempty"` +} + +// BuildMQTTOptionsFromFlags builds configs from a config filepath. +func BuildMQTTOptionsFromFlags(configPath string) (*MQTTOptions, error) { + configData, err := os.ReadFile(configPath) + if err != nil { + return nil, err + } + + config := &MQTTConfig{} + if err := yaml.Unmarshal(configData, config); err != nil { + return nil, err + } + + if config.BrokerHost == "" { + return nil, fmt.Errorf("brokerHost is required") + } + + if (config.ClientCertFile == "" && config.ClientKeyFile != "") || + (config.ClientCertFile != "" && config.ClientKeyFile == "") { + return nil, fmt.Errorf("either both or none of clientCertFile and clientKeyFile must be set") + } + if config.ClientCertFile != "" && config.ClientKeyFile != "" && config.CAFile == "" { + return nil, fmt.Errorf("setting clientCertFile and clientKeyFile requires caFile") + } + + if err := validateTopics(config.Topics); err != nil { + return nil, err + } + + options := &MQTTOptions{ + BrokerHost: config.BrokerHost, + Username: config.Username, + Password: config.Password, + CAFile: config.CAFile, + ClientCertFile: config.ClientCertFile, + ClientKeyFile: config.ClientKeyFile, + KeepAlive: 60, + PubQoS: 1, + SubQoS: 1, + DialTimeout: 60 * time.Second, + Topics: *config.Topics, + } + + if config.KeepAlive != nil { + options.KeepAlive = *config.KeepAlive + } + + if config.DialTimeout != nil { + options.DialTimeout = *config.DialTimeout + } + + if config.PubQoS != nil { + options.PubQoS = *config.PubQoS + } + + if config.SubQoS != nil { + options.SubQoS = *config.SubQoS + } + + return options, nil +} + +func (o *MQTTOptions) GetNetConn() (net.Conn, error) { + if len(o.CAFile) != 0 { + certPool, err := x509.SystemCertPool() + if err != nil { + return nil, err + } + + caPEM, err := os.ReadFile(o.CAFile) + if err != nil { + return nil, err + } + + if ok := certPool.AppendCertsFromPEM(caPEM); !ok { + return nil, fmt.Errorf("invalid CA %s", o.CAFile) + } + + clientCerts, err := tls.LoadX509KeyPair(o.ClientCertFile, o.ClientKeyFile) + if err != nil { + return nil, err + } + + conn, err := tls.DialWithDialer(&net.Dialer{Timeout: o.DialTimeout}, "tcp", o.BrokerHost, &tls.Config{ + RootCAs: certPool, + Certificates: []tls.Certificate{clientCerts}, + }) + if err != nil { + return nil, fmt.Errorf("failed to connect to MQTT broker %s, %v", o.BrokerHost, err) + } + + // ensure parallel writes are thread-Safe + return packets.NewThreadSafeConn(conn), nil + } + + conn, err := net.DialTimeout("tcp", o.BrokerHost, o.DialTimeout) + if err != nil { + return nil, fmt.Errorf("failed to connect to MQTT broker %s, %v", o.BrokerHost, err) + } + + // ensure parallel writes are thread-Safe + return packets.NewThreadSafeConn(conn), nil +} + +func (o *MQTTOptions) GetMQTTConnectOption(clientID string) *paho.Connect { + connect := &paho.Connect{ + ClientID: clientID, + KeepAlive: o.KeepAlive, + CleanStart: true, + } + + if len(o.Username) != 0 { + connect.Username = o.Username + connect.UsernameFlag = true + } + + if len(o.Password) != 0 { + connect.Password = []byte(o.Password) + connect.PasswordFlag = true + } + + return connect +} + +func (o *MQTTOptions) GetCloudEventsClient( + ctx context.Context, + clientID string, + errorHandler func(error), + clientOpts ...cloudeventsmqtt.Option, +) (cloudevents.Client, error) { + netConn, err := o.GetNetConn() + if err != nil { + return nil, err + } + + config := &paho.ClientConfig{ + ClientID: clientID, + Conn: netConn, + OnClientError: errorHandler, + } + + opts := []cloudeventsmqtt.Option{cloudeventsmqtt.WithConnect(o.GetMQTTConnectOption(clientID))} + opts = append(opts, clientOpts...) + protocol, err := cloudeventsmqtt.New(ctx, config, opts...) + if err != nil { + return nil, err + } + + return cloudevents.NewClient(protocol) +} + +func validateTopics(topics *types.Topics) error { + if topics == nil { + return fmt.Errorf("the topics must be set") + } + + var errs []error + if !regexp.MustCompile(types.SourceEventsTopicPattern).MatchString(topics.SourceEvents) { + errs = append(errs, fmt.Errorf("invalid source events topic %q, it should match `%s`", + topics.SourceEvents, types.SourceEventsTopicPattern)) + } + + if !regexp.MustCompile(types.AgentEventsTopicPattern).MatchString(topics.AgentEvents) { + errs = append(errs, fmt.Errorf("invalid agent events topic %q, it should match `%s`", + topics.AgentEvents, types.AgentEventsTopicPattern)) + } + + if len(topics.SourceBroadcast) != 0 { + if !regexp.MustCompile(types.SourceBroadcastTopicPattern).MatchString(topics.SourceBroadcast) { + errs = append(errs, fmt.Errorf("invalid source broadcast topic %q, it should match `%s`", + topics.SourceBroadcast, types.SourceBroadcastTopicPattern)) + } + } + + if len(topics.AgentBroadcast) != 0 { + if !regexp.MustCompile(types.AgentBroadcastTopicPattern).MatchString(topics.AgentBroadcast) { + errs = append(errs, fmt.Errorf("invalid agent broadcast topic %q, it should match `%s`", + topics.AgentBroadcast, types.AgentBroadcastTopicPattern)) + } + } + + return errors.NewAggregate(errs) +} + +func getSourceFromEventsTopic(topic string) (string, error) { + if !regexp.MustCompile(types.EventsTopicPattern).MatchString(topic) { + return "", fmt.Errorf("failed to get source from topic: %q", topic) + } + + subTopics := strings.Split(topic, "/") + // get source form share topic, e.g. $share/group/sources/+/consumers/+/agentevents + if strings.HasPrefix(topic, "$share") { + return subTopics[3], nil + } + + // get source form topic, e.g. sources/+/consumers/+/agentevents + return subTopics[1], nil +} + +func replaceLast(str, old, new string) string { + last := strings.LastIndex(str, old) + if last == -1 { + return str + } + return str[:last] + new + str[last+len(old):] +} + +func getSourcePubTopic(ctx context.Context) (*PubTopic, error) { + ctxTopic := ctx.Value(MQTT_SOURCE_PUB_TOPIC_KEY) + if ctxTopic == nil { + return nil, nil + } + + topic, ok := ctxTopic.(PubTopic) + if !ok { + return nil, fmt.Errorf("source pub topic should be a string") + } + + if regexp.MustCompile(types.SourceEventsTopicPattern).MatchString(string(topic)) { + return &topic, nil + } + + if regexp.MustCompile(types.SourceBroadcastTopicPattern).MatchString(string(topic)) { + return &topic, nil + } + + return nil, fmt.Errorf("invalid source pub topic") +} + +func getAgentPubTopic(ctx context.Context) (*PubTopic, error) { + ctxTopic := ctx.Value(MQTT_AGENT_PUB_TOPIC_KEY) + if ctxTopic == nil { + return nil, nil + } + + topic, ok := ctxTopic.(PubTopic) + if !ok { + return nil, fmt.Errorf("agent pub topic should be a string") + } + + if regexp.MustCompile(types.AgentEventsTopicPattern).MatchString(string(topic)) { + return &topic, nil + } + + if regexp.MustCompile(types.AgentBroadcastTopicPattern).MatchString(string(topic)) { + return &topic, nil + } + + return nil, fmt.Errorf("invalid agent pub topic") +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt/sourceoptions.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt/sourceoptions.go new file mode 100644 index 000000000..1f51e378c --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt/sourceoptions.go @@ -0,0 +1,113 @@ +package mqtt + +import ( + "context" + "fmt" + "strings" + + cloudeventsmqtt "github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2" + cloudevents "github.com/cloudevents/sdk-go/v2" + cloudeventscontext "github.com/cloudevents/sdk-go/v2/context" + "github.com/eclipse/paho.golang/paho" + + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" +) + +type mqttSourceOptions struct { + MQTTOptions + errorChan chan error + sourceID string + clientID string +} + +func NewSourceOptions(mqttOptions *MQTTOptions, clientID, sourceID string) *options.CloudEventsSourceOptions { + mqttSourceOptions := &mqttSourceOptions{ + MQTTOptions: *mqttOptions, + errorChan: make(chan error), + sourceID: sourceID, + clientID: clientID, + } + + return &options.CloudEventsSourceOptions{ + CloudEventsOptions: mqttSourceOptions, + SourceID: mqttSourceOptions.sourceID, + } +} + +func (o *mqttSourceOptions) WithContext(ctx context.Context, evtCtx cloudevents.EventContext) (context.Context, error) { + topic, err := getSourcePubTopic(ctx) + if err != nil { + return nil, err + } + + if topic != nil { + return cloudeventscontext.WithTopic(ctx, string(*topic)), nil + } + + eventType, err := types.ParseCloudEventsType(evtCtx.GetType()) + if err != nil { + return nil, fmt.Errorf("unsupported event type %s, %v", eventType, err) + } + + clusterName, err := evtCtx.GetExtension(types.ExtensionClusterName) + if err != nil { + return nil, err + } + + if eventType.Action == types.ResyncRequestAction && clusterName == types.ClusterAll { + // source request to get resources status from all agents + if len(o.Topics.SourceBroadcast) == 0 { + return nil, fmt.Errorf("the source broadcast topic not set") + } + + resyncTopic := strings.Replace(o.Topics.SourceBroadcast, "+", o.sourceID, 1) + return cloudeventscontext.WithTopic(ctx, resyncTopic), nil + } + + // source publishes spec events or status resync events + eventsTopic := strings.Replace(o.Topics.SourceEvents, "+", fmt.Sprintf("%s", clusterName), 1) + return cloudeventscontext.WithTopic(ctx, eventsTopic), nil +} + +func (o *mqttSourceOptions) Client(ctx context.Context) (cloudevents.Client, error) { + topicSource, err := getSourceFromEventsTopic(o.Topics.AgentEvents) + if err != nil { + return nil, err + } + + if topicSource != o.sourceID { + return nil, fmt.Errorf("the topic source %q does not match with the client sourceID %q", + o.Topics.AgentEvents, o.sourceID) + } + + subscribe := &paho.Subscribe{ + Subscriptions: map[string]paho.SubscribeOptions{ + // receiving the agent events + o.Topics.AgentEvents: {QoS: byte(o.SubQoS)}, + }, + } + + if len(o.Topics.AgentBroadcast) != 0 { + // receiving spec resync events from all agents + subscribe.Subscriptions[o.Topics.AgentBroadcast] = paho.SubscribeOptions{QoS: byte(o.SubQoS)} + } + + receiver, err := o.GetCloudEventsClient( + ctx, + o.clientID, + func(err error) { + o.errorChan <- err + }, + cloudeventsmqtt.WithPublish(&paho.Publish{QoS: byte(o.PubQoS)}), + cloudeventsmqtt.WithSubscribe(subscribe), + ) + if err != nil { + return nil, err + } + return receiver, nil +} + +func (o *mqttSourceOptions) ErrorChan() <-chan error { + return o.errorChan +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/options.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/options.go new file mode 100644 index 000000000..57fbf14e9 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/options.go @@ -0,0 +1,66 @@ +package options + +import ( + "context" + + cloudevents "github.com/cloudevents/sdk-go/v2" +) + +// CloudEventsOptions provides cloudevents clients to send/receive cloudevents based on different event protocol. +// +// Available implementations: +// - MQTT +type CloudEventsOptions interface { + // WithContext returns back a new context with the given cloudevent context. The new context will be used when + // sending a cloudevent.The new context is protocol-dependent, for example, for MQTT, the new context should contain + // the MQTT topic, for Kafka, the context should contain the message key, etc. + WithContext(ctx context.Context, evtContext cloudevents.EventContext) (context.Context, error) + + // Client returns a cloudevents client for sending and receiving cloudevents + Client(ctx context.Context) (cloudevents.Client, error) + + // ErrorChan returns a chan which will receive the cloudevents connection error. The source/agent client will try to + // reconnect the when this error occurs. + ErrorChan() <-chan error +} + +// EventRateLimit for limiting the event sending rate. +type EventRateLimit struct { + // QPS indicates the maximum QPS to send the event. + // If it's less than or equal to zero, the DefaultQPS (50) will be used. + QPS float32 + + // Maximum burst for throttle. + // If it's less than or equal to zero, the DefaultBurst (100) will be used. + Burst int +} + +// CloudEventsSourceOptions provides the required options to build a source CloudEventsClient +type CloudEventsSourceOptions struct { + // CloudEventsOptions provides cloudevents clients to send/receive cloudevents based on different event protocol. + CloudEventsOptions CloudEventsOptions + + // SourceID is a unique identifier for a source, for example, it can generate a source ID by hashing the hub cluster + // URL and appending the controller name. Similarly, a RESTful service can select a unique name or generate a unique + // ID in the associated database for its source identification. + SourceID string + + // EventRateLimit limits the event sending rate. + EventRateLimit EventRateLimit +} + +// CloudEventsAgentOptions provides the required options to build an agent CloudEventsClient +type CloudEventsAgentOptions struct { + // CloudEventsOptions provides cloudevents clients to send/receive cloudevents based on different event protocol. + CloudEventsOptions CloudEventsOptions + + // AgentID is a unique identifier for an agent, for example, it can consist of a managed cluster name and an agent + // name. + AgentID string + + // ClusterName is the name of a managed cluster on which the agent runs. + ClusterName string + + // EventRateLimit limits the event sending rate. + EventRateLimit EventRateLimit +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/payload/payload.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/payload/payload.go new file mode 100644 index 000000000..cacfcd950 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/payload/payload.go @@ -0,0 +1,48 @@ +package payload + +import ( + "encoding/json" + "fmt" + + cloudevents "github.com/cloudevents/sdk-go/v2" +) + +type ResourceVersion struct { + ResourceID string `json:"resourceID"` + ResourceVersion int64 `json:"resourceVersion"` +} + +type ResourceStatusHash struct { + ResourceID string `json:"resourceID"` + StatusHash string `json:"statusHash"` +} + +// ResourceVersionList represents the resource versions of the resources maintained by the agent. +// The item of this list includes the resource ID and resource version. +type ResourceVersionList struct { + Versions []ResourceVersion `json:"resourceVersions"` +} + +// ResourceStatusHashList represents the status hash of the resources maintained by the source. +// The item of this list includes the resource ID and resource status hash. +type ResourceStatusHashList struct { + Hashes []ResourceStatusHash `json:"statusHashes"` +} + +func DecodeSpecResyncRequest(evt cloudevents.Event) (*ResourceVersionList, error) { + versions := &ResourceVersionList{} + data := evt.Data() + if err := json.Unmarshal(data, versions); err != nil { + return nil, fmt.Errorf("failed to unmarshal spec resync request payload %s, %v", string(data), err) + } + return versions, nil +} + +func DecodeStatusResyncRequest(evt cloudevents.Event) (*ResourceStatusHashList, error) { + hashes := &ResourceStatusHashList{} + data := evt.Data() + if err := json.Unmarshal(data, hashes); err != nil { + return nil, fmt.Errorf("failed to unmarshal status resync request payload %s, %v", string(data), err) + } + return hashes, nil +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/ratelimiter.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/ratelimiter.go new file mode 100644 index 000000000..ac1e2c188 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/ratelimiter.go @@ -0,0 +1,34 @@ +package generic + +import ( + "time" + + "k8s.io/client-go/util/flowcontrol" + + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options" +) + +// longThrottleLatency defines threshold for logging requests. All requests being +// throttled (via the provided rateLimiter) for more than longThrottleLatency will +// be logged. +const longThrottleLatency = 1 * time.Second + +const ( + // TODO we may adjust these after performance test + DefaultQPS float32 = 50.0 + DefaultBurst int = 100 +) + +func NewRateLimiter(limit options.EventRateLimit) flowcontrol.RateLimiter { + qps := limit.QPS + if qps <= 0.0 { + qps = DefaultQPS + } + + burst := limit.Burst + if burst <= 0 { + burst = DefaultBurst + } + + return flowcontrol.NewTokenBucketRateLimiter(qps, burst) +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/sourceclient.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/sourceclient.go new file mode 100644 index 000000000..58da81556 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/sourceclient.go @@ -0,0 +1,314 @@ +package generic + +import ( + "context" + "fmt" + "strconv" + + cloudevents "github.com/cloudevents/sdk-go/v2" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" + + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/payload" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" +) + +// CloudEventSourceClient is a client for a source to resync/send/receive its resources with cloud events. +// +// A source is a component that runs on a server, it can be a controller on the hub cluster or a RESTful service +// handling resource requests. +type CloudEventSourceClient[T ResourceObject] struct { + *baseClient + lister Lister[T] + codecs map[types.CloudEventsDataType]Codec[T] + statusHashGetter StatusHashGetter[T] + sourceID string +} + +// NewCloudEventSourceClient returns an instance for CloudEventSourceClient. The following arguments are required to +// create a client +// - sourceOptions provides the sourceID and the cloudevents clients that are based on different event protocols for +// sending/receiving the cloudevents. +// - lister gets the resources from a cache/store of a source. +// - statusHashGetter calculates the resource status hash. +// - codecs is list of codecs for encoding/decoding a resource objet/cloudevent to/from a cloudevent/resource objet. +func NewCloudEventSourceClient[T ResourceObject]( + ctx context.Context, + sourceOptions *options.CloudEventsSourceOptions, + lister Lister[T], + statusHashGetter StatusHashGetter[T], + codecs ...Codec[T], +) (*CloudEventSourceClient[T], error) { + baseClient := &baseClient{ + cloudEventsOptions: sourceOptions.CloudEventsOptions, + cloudEventsRateLimiter: NewRateLimiter(sourceOptions.EventRateLimit), + reconnectedChan: make(chan struct{}), + } + + if err := baseClient.connect(ctx); err != nil { + return nil, err + } + + evtCodes := make(map[types.CloudEventsDataType]Codec[T]) + for _, codec := range codecs { + evtCodes[codec.EventDataType()] = codec + } + + return &CloudEventSourceClient[T]{ + baseClient: baseClient, + lister: lister, + codecs: evtCodes, + statusHashGetter: statusHashGetter, + sourceID: sourceOptions.SourceID, + }, nil +} + +func (c *CloudEventSourceClient[T]) ReconnectedChan() <-chan struct{} { + return c.reconnectedChan +} + +// Resync the resources status by sending a status resync request from the current source to a specified cluster. +func (c *CloudEventSourceClient[T]) Resync(ctx context.Context, clusterName string) error { + // list the resource objects that are maintained by the current source with a specified cluster + objs, err := c.lister.List(types.ListOptions{Source: c.sourceID, ClusterName: clusterName}) + if err != nil { + return err + } + + hashes := &payload.ResourceStatusHashList{Hashes: make([]payload.ResourceStatusHash, len(objs))} + for i, obj := range objs { + statusHash, err := c.statusHashGetter(obj) + if err != nil { + return err + } + + hashes.Hashes[i] = payload.ResourceStatusHash{ + ResourceID: string(obj.GetUID()), + StatusHash: statusHash, + } + } + + // only resync the resources whose event data type is registered + for eventDataType := range c.codecs { + eventType := types.CloudEventsType{ + CloudEventsDataType: eventDataType, + SubResource: types.SubResourceStatus, + Action: types.ResyncRequestAction, + } + + evt := types.NewEventBuilder(c.sourceID, eventType).WithClusterName(clusterName).NewEvent() + if err := evt.SetData(cloudevents.ApplicationJSON, hashes); err != nil { + return fmt.Errorf("failed to set data to cloud event: %v", err) + } + + if err := c.publish(ctx, evt); err != nil { + return err + } + } + + return nil +} + +// Publish a resource spec from a source to an agent. +func (c *CloudEventSourceClient[T]) Publish(ctx context.Context, eventType types.CloudEventsType, obj T) error { + if eventType.SubResource != types.SubResourceSpec { + return fmt.Errorf("unsupported event eventType %s", eventType) + } + + codec, ok := c.codecs[eventType.CloudEventsDataType] + if !ok { + return fmt.Errorf("failed to find the codec for event %s", eventType.CloudEventsDataType) + } + + evt, err := codec.Encode(c.sourceID, eventType, obj) + if err != nil { + return err + } + + if err := c.publish(ctx, *evt); err != nil { + return err + } + + return nil +} + +// Subscribe the events that are from the agent spec resync request or agent resource status request. +// For spec resync request, source publish the current resources spec back as response. +// For resource status request, source receives resource status and handles the status with resource handlers. +func (c *CloudEventSourceClient[T]) Subscribe(ctx context.Context, handlers ...ResourceHandler[T]) { + c.subscribe(ctx, func(ctx context.Context, evt cloudevents.Event) { + c.receive(ctx, evt, handlers...) + }) +} + +func (c *CloudEventSourceClient[T]) receive(ctx context.Context, evt cloudevents.Event, handlers ...ResourceHandler[T]) { + klog.V(4).Infof("Received event:\n%s", evt) + + eventType, err := types.ParseCloudEventsType(evt.Type()) + if err != nil { + klog.Errorf("failed to parse cloud event type, %v", err) + return + } + + if eventType.Action == types.ResyncRequestAction { + if eventType.SubResource != types.SubResourceSpec { + klog.Warningf("unsupported event type %s, ignore", eventType) + return + } + + if err := c.respondResyncSpecRequest(ctx, eventType.CloudEventsDataType, evt); err != nil { + klog.Errorf("failed to resync resources spec, %v", err) + } + + return + } + + codec, ok := c.codecs[eventType.CloudEventsDataType] + if !ok { + klog.Warningf("failed to find the codec for event %s, ignore", eventType.CloudEventsDataType) + return + } + + if eventType.SubResource != types.SubResourceStatus { + klog.Warningf("unsupported event type %s, ignore", eventType) + return + } + + clusterName, err := evt.Context.GetExtension(types.ExtensionClusterName) + if err != nil { + klog.Errorf("failed to find cluster name, %v", err) + return + } + + obj, err := codec.Decode(&evt) + if err != nil { + klog.Errorf("failed to decode status, %v", err) + return + } + + action, err := c.statusAction(fmt.Sprintf("%s", clusterName), obj) + if err != nil { + klog.Errorf("failed to generate status event %s, %v", evt, err) + return + } + + if len(action) == 0 { + // no action is required, ignore + return + } + + for _, handler := range handlers { + if err := handler(action, obj); err != nil { + klog.Errorf("failed to handle status event %s, %v", evt, err) + } + } +} + +// Upon receiving the spec resync event, the source responds by sending resource status events to the broker as follows: +// - If the request event message is empty, the source returns all resources associated with the work agent. +// - If the request event message contains resource IDs and versions, the source retrieves the resource with the +// specified ID and compares the versions. +// - If the requested resource version matches the source's current maintained resource version, the source does not +// resend the resource. +// - If the requested resource version is older than the source's current maintained resource version, the source +// sends the resource. +func (c *CloudEventSourceClient[T]) respondResyncSpecRequest( + ctx context.Context, evtDataType types.CloudEventsDataType, evt cloudevents.Event) error { + resourceVersions, err := payload.DecodeSpecResyncRequest(evt) + if err != nil { + return err + } + + eventType := types.CloudEventsType{ + CloudEventsDataType: evtDataType, + SubResource: types.SubResourceSpec, + Action: types.ResyncResponseAction, + } + + clusterName, err := evt.Context.GetExtension(types.ExtensionClusterName) + if err != nil { + return err + } + + objs, err := c.lister.List(types.ListOptions{ClusterName: fmt.Sprintf("%s", clusterName), Source: c.sourceID}) + if err != nil { + return err + } + + for _, obj := range objs { + lastResourceVersion := findResourceVersion(string(obj.GetUID()), resourceVersions.Versions) + currentResourceVersion, err := strconv.ParseInt(obj.GetResourceVersion(), 10, 64) + if err != nil { + continue + } + + if currentResourceVersion > lastResourceVersion { + if err := c.Publish(ctx, eventType, obj); err != nil { + return err + } + } + } + + // the resources do not exist on the source, but exist on the agent, delete them + for _, rv := range resourceVersions.Versions { + _, exists := getObj(rv.ResourceID, objs) + if exists { + continue + } + + // send a delete event for the current resource + evt := types.NewEventBuilder(c.sourceID, eventType). + WithResourceID(rv.ResourceID). + WithResourceVersion(rv.ResourceVersion). + WithClusterName(fmt.Sprintf("%s", clusterName)). + WithDeletionTimestamp(metav1.Now().Time). + NewEvent() + if err := c.publish(ctx, evt); err != nil { + return err + } + } + + return nil +} + +func (c *CloudEventSourceClient[T]) statusAction(clusterName string, obj T) (evt types.ResourceAction, err error) { + objs, err := c.lister.List(types.ListOptions{ClusterName: clusterName, Source: c.sourceID}) + if err != nil { + return evt, err + } + + lastObj, exists := getObj(string(obj.GetUID()), objs) + if !exists { + return evt, nil + } + + lastStatusHash, err := c.statusHashGetter(lastObj) + if err != nil { + klog.Warningf("failed to hash object %s status, %v", lastObj.GetUID(), err) + return evt, err + } + + currentStatusHash, err := c.statusHashGetter(obj) + if err != nil { + klog.Warningf("failed to hash object %s status, %v", obj.GetUID(), err) + return evt, nil + } + + if lastStatusHash == currentStatusHash { + return evt, nil + } + + return types.StatusModified, nil +} + +func findResourceVersion(id string, versions []payload.ResourceVersion) int64 { + for _, version := range versions { + if id == version.ResourceID { + return version.ResourceVersion + } + } + + return 0 +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types/types.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types/types.go new file mode 100644 index 000000000..b39361162 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types/types.go @@ -0,0 +1,283 @@ +package types + +import ( + "fmt" + "strings" + "time" + + cloudevents "github.com/cloudevents/sdk-go/v2" + "github.com/google/uuid" +) + +const ( + // ClusterAll is the default argument to specify on a context when you want to list or filter resources across all + // managed clusters. + ClusterAll = "" + + // SourceAll is the default argument to specify on a context when you want to list or filter resources across all + // sources. + SourceAll = "" +) + +// EventSubResource describes the subresource of a cloud event. Only `spec` and `status` are supported. +type EventSubResource string + +const ( + // SubResourceSpec represents the cloud event data is from the resource spec. + SubResourceSpec EventSubResource = "spec" + + // SubResourceSpec represents the cloud event data is from the resource status. + SubResourceStatus EventSubResource = "status" +) + +// EventAction describes the expected action of a cloud event. +type EventAction string + +const ( + // ResyncRequestAction represents the cloud event is for the resync request. + ResyncRequestAction EventAction = "resync_request" + + // ResyncRequestAction represents the cloud event is for the resync response. + ResyncResponseAction EventAction = "resync_response" +) + +const ( + // ExtensionResourceID is the cloud event extension key of the resource ID. + ExtensionResourceID = "resourceid" + + // ExtensionResourceVersion is the cloud event extension key of the resource version. + ExtensionResourceVersion = "resourceversion" + + // ExtensionStatusUpdateSequenceID is the cloud event extension key of the status update event sequence ID. + // The status update event sequence id represents the order in which status update events occur on a single agent. + ExtensionStatusUpdateSequenceID = "sequenceid" + + // ExtensionDeletionTimestamp is the cloud event extension key of the deletion timestamp. + ExtensionDeletionTimestamp = "deletiontimestamp" + + // ExtensionClusterName is the cloud event extension key of the cluster name. + ExtensionClusterName = "clustername" + + // ExtensionOriginalSource is the cloud event extension key of the original source. + ExtensionOriginalSource = "originalsource" +) + +// ResourceAction represents an action on a resource object on the source or agent. +type ResourceAction string + +const ( + // Added represents a resource is added on the source part. + Added ResourceAction = "ADDED" + + // Modified represents a resource is modified on the source part. + Modified ResourceAction = "MODIFIED" + + // StatusModified represents the status of a resource is modified on the agent part. + StatusModified ResourceAction = "STATUSMODIFIED" + + // Deleted represents a resource is deleted from the source prat. + Deleted ResourceAction = "DELETED" +) + +const ( + EventsTopicPattern = `^(\$share/[a-z0-9-]+/)?([a-z]+)/([a-z0-9-]+|\+)/([a-z]+)/([a-z0-9-]+|\+)/(sourceevents|agentevents)$` + SourceEventsTopicPattern = `^(\$share/[a-z0-9-]+/)?([a-z]+)/([a-z0-9-]+|\+)/([a-z]+)/([a-z0-9-]+|\+)/sourceevents$` + AgentEventsTopicPattern = `^(\$share/[a-z0-9-]+/)?([a-z]+)/([a-z0-9-]+|\+)/([a-z]+)/([a-z0-9-]+|\+)/agentevents$` + SourceBroadcastTopicPattern = `^(\$share/[a-z0-9-]+/)?([a-z]+)/([a-z0-9-]+|\+)/sourcebroadcast$` + AgentBroadcastTopicPattern = `^(\$share/[a-z0-9-]+/)?([a-z]+)/([a-z0-9-]+|\+)/agentbroadcast$` +) + +// Topics represents required messaging system topics for a source or agent. +type Topics struct { + // SourceEvents topic is a topic for sources to publish their resource create/update/delete events or status resync events + // - A source uses this topic to publish its resource create/update/delete request or status resync request with + // its sourceID to a specified agent + // - An agent subscribes to this topic with its cluster name to response sources resource create/update/delete + // request or status resync request + // The topic format is `^(\$share/[a-z0-9-]+/)?([a-z]+)/([a-z0-9-]+|\+)/([a-z]+)/([a-z0-9-]+|\+)/sourceevents$`, e.g. + // sources/+/clusters/+/sourceevents, sources/source1/clusters/+/sourceevents, sources/source1/clusters/cluster1/sourceevents + // or $share/source-group/sources/+/clusters/+/sourceevents + SourceEvents string `json:"sourceEvents" yaml:"sourceEvents"` + + // AgentEvents topic is a topic for agents to publish their resource status update events or spec resync events + // - An agent using this topic to publish the resource status update request or spec resync request with its + // cluster name to a specified source. + // - A source subscribe to this topic with its sourceID to response agents resource status update request or spec + // resync request + // The topic format is `^(\$share/[a-z0-9-]+/)?([a-z]+)/([a-z0-9-]+|\+)/([a-z]+)/([a-z0-9-]+|\+)/agentevents$`, e.g. + // sources/+/clusters/+/agentevents, sources/source1/clusters/+/agentevents, sources/source1/clusters/cluster1/agentevents + // or $share/agent-group/+/clusters/+/agentevents + AgentEvents string `json:"agentEvents" yaml:"agentEvents"` + + // SourceBroadcast is an optional topic, it is for a source to publish its events to all agents, currently, we use + // this topic to resync resource status from all agents for a source that does not known the exact agents, e.g. + // - A source uses this topic to publish its resource status resync request with its sourceID to all the agents + // - Each agent subscribes to this topic to response sources resource status resync request + // The topic format is `^(\$share/[a-z0-9-]+/)?([a-z]+)/([a-z0-9-]+|\+)/sourcebroadcast$`, e.g. + // sources/+/sourcebroadcast, sources/source1/sourcebroadcast or $share/source-group/sources/+/sourcebroadcast + SourceBroadcast string `json:"sourceBroadcast,omitempty" yaml:"sourceBroadcast,omitempty"` + + // AgentBroadcast is an optional topic, it is for a agent to publish its events to all sources, currently, we use + // this topic to resync resources from all sources for an agent that does not known the exact sources, e.g. + // - An agent using this topic to publish the spec resync request with its cluster name to all the sources. + // - Each source subscribe to this topic to response agents spec resync request + // The topic format is `^(\$share/[a-z0-9-]+/)?([a-z]+)/([a-z0-9-]+|\+)/agentbroadcast$`, e.g. + // clusters/+/agentbroadcast, clusters/cluster1/agentbroadcast or $share/agent-group/clusters/+/agentbroadcast + AgentBroadcast string `json:"agentBroadcast,omitempty" yaml:"agentBroadcast,omitempty"` +} + +// ListOptions is the query options for listing the resource objects from the source/agent. +type ListOptions struct { + // Source use the cluster name to restrict the list of returned objects by their cluster name. + // Defaults to all clusters. + ClusterName string + + // Agent use the source ID to restrict the list of returned objects by their source ID. + // Defaults to all sources. + Source string +} + +// CloudEventsDataType uniquely identifies the type of cloud event data. +type CloudEventsDataType struct { + Group string + Version string + Resource string +} + +func (t CloudEventsDataType) String() string { + return fmt.Sprintf("%s.%s.%s", t.Group, t.Version, t.Resource) +} + +// CloudEventsType represents the type of cloud events, which describes the type of cloud event data. +type CloudEventsType struct { + // CloudEventsDataType uniquely identifies the type of cloud event data. + CloudEventsDataType + + // SubResource represents the cloud event data is from the resource spec or status. + SubResource EventSubResource + + // Action represents the expected action for this cloud event. + Action EventAction +} + +func (t CloudEventsType) String() string { + return fmt.Sprintf("%s.%s.%s.%s.%s", t.Group, t.Version, t.Resource, t.SubResource, t.Action) +} + +// ParseCloudEventsDataType parse the cloud event data type to a struct object. +// The type format is `..`. +func ParseCloudEventsDataType(cloudEventsDataType string) (*CloudEventsDataType, error) { + types := strings.Split(cloudEventsDataType, ".") + length := len(types) + if length < 3 { + return nil, fmt.Errorf("unsupported cloudevents data type format") + } + return &CloudEventsDataType{ + Group: strings.Join(types[0:length-2], "."), + Version: types[length-2], + Resource: types[length-1], + }, nil +} + +// ParseCloudEventsType parse the cloud event type to a struct object. +// The type format is `....`. +// The `` must be one of "spec" and "status". +func ParseCloudEventsType(cloudEventsType string) (*CloudEventsType, error) { + types := strings.Split(cloudEventsType, ".") + length := len(types) + if length < 5 { + return nil, fmt.Errorf("unsupported cloudevents type format") + } + + subResource := EventSubResource(types[length-2]) + if subResource != SubResourceSpec && subResource != SubResourceStatus { + return nil, fmt.Errorf("unsupported subresource %s", subResource) + } + + return &CloudEventsType{ + CloudEventsDataType: CloudEventsDataType{ + Group: strings.Join(types[0:length-4], "."), + Version: types[length-4], + Resource: types[length-3], + }, + SubResource: subResource, + Action: EventAction(types[length-1]), + }, nil +} + +type EventBuilder struct { + source string + clusterName string + originalSource string + resourceID string + sequenceID string + resourceVersion *int64 + eventType CloudEventsType + deletionTimestamp time.Time +} + +func NewEventBuilder(source string, eventType CloudEventsType) *EventBuilder { + return &EventBuilder{ + source: source, + eventType: eventType, + } +} + +func (b *EventBuilder) WithResourceID(resourceID string) *EventBuilder { + b.resourceID = resourceID + return b +} + +func (b *EventBuilder) WithResourceVersion(resourceVersion int64) *EventBuilder { + b.resourceVersion = &resourceVersion + return b +} + +func (b *EventBuilder) WithStatusUpdateSequenceID(sequenceID string) *EventBuilder { + b.sequenceID = sequenceID + return b +} + +func (b *EventBuilder) WithClusterName(clusterName string) *EventBuilder { + b.clusterName = clusterName + return b +} + +func (b *EventBuilder) WithOriginalSource(originalSource string) *EventBuilder { + b.originalSource = originalSource + return b +} + +func (b *EventBuilder) WithDeletionTimestamp(timestamp time.Time) *EventBuilder { + b.deletionTimestamp = timestamp + return b +} + +func (b *EventBuilder) NewEvent() cloudevents.Event { + evt := cloudevents.NewEvent() + evt.SetID(uuid.New().String()) + evt.SetType(b.eventType.String()) + evt.SetTime(time.Now()) + evt.SetSource(b.source) + + evt.SetExtension(ExtensionClusterName, b.clusterName) + evt.SetExtension(ExtensionOriginalSource, b.originalSource) + + if len(b.resourceID) != 0 { + evt.SetExtension(ExtensionResourceID, b.resourceID) + } + + if b.resourceVersion != nil { + evt.SetExtension(ExtensionResourceVersion, *b.resourceVersion) + } + + if len(b.sequenceID) != 0 { + evt.SetExtension(ExtensionStatusUpdateSequenceID, b.sequenceID) + } + + if !b.deletionTimestamp.IsZero() { + evt.SetExtension(ExtensionDeletionTimestamp, b.deletionTimestamp) + } + + return evt +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/client/manifestwork.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/client/manifestwork.go new file mode 100644 index 000000000..2b0624241 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/client/manifestwork.go @@ -0,0 +1,160 @@ +package client + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kubetypes "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/klog/v2" + + workv1client "open-cluster-management.io/api/client/work/clientset/versioned/typed/work/v1" + workv1lister "open-cluster-management.io/api/client/work/listers/work/v1" + workv1 "open-cluster-management.io/api/work/v1" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/common" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/utils" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/watcher" +) + +// ManifestWorkAgentClient implements the ManifestWorkInterface. It sends the manifestworks status back to source by +// CloudEventAgentClient. +type ManifestWorkAgentClient struct { + cloudEventsClient *generic.CloudEventAgentClient[*workv1.ManifestWork] + watcher *watcher.ManifestWorkWatcher + lister workv1lister.ManifestWorkNamespaceLister +} + +var _ workv1client.ManifestWorkInterface = &ManifestWorkAgentClient{} + +func NewManifestWorkAgentClient(cloudEventsClient *generic.CloudEventAgentClient[*workv1.ManifestWork], watcher *watcher.ManifestWorkWatcher) *ManifestWorkAgentClient { + return &ManifestWorkAgentClient{ + cloudEventsClient: cloudEventsClient, + watcher: watcher, + } +} + +func (c *ManifestWorkAgentClient) SetLister(lister workv1lister.ManifestWorkNamespaceLister) { + c.lister = lister +} + +func (c *ManifestWorkAgentClient) Create(ctx context.Context, manifestWork *workv1.ManifestWork, opts metav1.CreateOptions) (*workv1.ManifestWork, error) { + return nil, errors.NewMethodNotSupported(common.ManifestWorkGR, "create") +} + +func (c *ManifestWorkAgentClient) Update(ctx context.Context, manifestWork *workv1.ManifestWork, opts metav1.UpdateOptions) (*workv1.ManifestWork, error) { + return nil, errors.NewMethodNotSupported(common.ManifestWorkGR, "update") +} + +func (c *ManifestWorkAgentClient) UpdateStatus(ctx context.Context, manifestWork *workv1.ManifestWork, opts metav1.UpdateOptions) (*workv1.ManifestWork, error) { + return nil, errors.NewMethodNotSupported(common.ManifestWorkGR, "updatestatus") +} + +func (c *ManifestWorkAgentClient) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return errors.NewMethodNotSupported(common.ManifestWorkGR, "delete") +} + +func (c *ManifestWorkAgentClient) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + return errors.NewMethodNotSupported(common.ManifestWorkGR, "deletecollection") +} + +func (c *ManifestWorkAgentClient) Get(ctx context.Context, name string, opts metav1.GetOptions) (*workv1.ManifestWork, error) { + klog.V(4).Infof("getting manifestwork %s", name) + return c.lister.Get(name) +} + +func (c *ManifestWorkAgentClient) List(ctx context.Context, opts metav1.ListOptions) (*workv1.ManifestWorkList, error) { + klog.V(4).Infof("sync manifestworks") + // send resync request to fetch manifestworks from source when the ManifestWorkInformer starts + if err := c.cloudEventsClient.Resync(ctx, types.SourceAll); err != nil { + return nil, err + } + + return &workv1.ManifestWorkList{}, nil +} + +func (c *ManifestWorkAgentClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return c.watcher, nil +} + +func (c *ManifestWorkAgentClient) Patch(ctx context.Context, name string, pt kubetypes.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *workv1.ManifestWork, err error) { + klog.V(4).Infof("patching manifestwork %s", name) + + lastWork, err := c.lister.Get(name) + if err != nil { + return nil, err + } + + patchedWork, err := utils.Patch(pt, lastWork, data) + if err != nil { + return nil, err + } + + eventDataType, err := types.ParseCloudEventsDataType(patchedWork.Annotations[common.CloudEventsDataTypeAnnotationKey]) + if err != nil { + return nil, err + } + + eventType := types.CloudEventsType{ + CloudEventsDataType: *eventDataType, + SubResource: types.SubResourceStatus, + } + + newWork := patchedWork.DeepCopy() + + statusUpdated, err := isStatusUpdate(subresources) + if err != nil { + return nil, err + } + + if statusUpdated { + eventType.Action = common.UpdateRequestAction + if err := c.cloudEventsClient.Publish(ctx, eventType, newWork); err != nil { + return nil, err + } + + // refresh the work status in the ManifestWorkInformer local cache with patched work. + c.watcher.Receive(watch.Event{Type: watch.Modified, Object: newWork}) + return newWork, nil + } + + // the finalizers of a deleting manifestwork are removed, marking the manifestwork status to deleted and sending + // it back to source + if !newWork.DeletionTimestamp.IsZero() && len(newWork.Finalizers) == 0 { + meta.SetStatusCondition(&newWork.Status.Conditions, metav1.Condition{ + Type: common.ManifestsDeleted, + Status: metav1.ConditionTrue, + Reason: "ManifestsDeleted", + Message: fmt.Sprintf("The manifests are deleted from the cluster %s", newWork.Namespace), + }) + + eventType.Action = common.DeleteRequestAction + if err := c.cloudEventsClient.Publish(ctx, eventType, newWork); err != nil { + return nil, err + } + + // delete the manifestwork from the ManifestWorkInformer local cache. + c.watcher.Receive(watch.Event{Type: watch.Deleted, Object: newWork}) + return newWork, nil + } + + // refresh the work in the ManifestWorkInformer local cache with patched work. + c.watcher.Receive(watch.Event{Type: watch.Modified, Object: newWork}) + return newWork, nil +} + +func isStatusUpdate(subresources []string) (bool, error) { + if len(subresources) == 0 { + return false, nil + } + + if len(subresources) == 1 && subresources[0] == "status" { + return true, nil + } + + return false, fmt.Errorf("unsupported subresources %v", subresources) +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/handler/resourcehandler.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/handler/resourcehandler.go new file mode 100644 index 000000000..fa3901430 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/handler/resourcehandler.go @@ -0,0 +1,76 @@ +package handler + +import ( + "fmt" + "strconv" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/klog/v2" + workv1lister "open-cluster-management.io/api/client/work/listers/work/v1" + + workv1 "open-cluster-management.io/api/work/v1" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/watcher" +) + +// NewManifestWorkAgentHandler returns a ResourceHandler for a ManifestWork on managed cluster. It sends the kube events +// with ManifestWorWatcher after CloudEventAgentClient received the ManifestWork specs from source, then the +// ManifestWorkInformer handles the kube events in its local cache. +func NewManifestWorkAgentHandler(lister workv1lister.ManifestWorkNamespaceLister, watcher *watcher.ManifestWorkWatcher) generic.ResourceHandler[*workv1.ManifestWork] { + return func(action types.ResourceAction, work *workv1.ManifestWork) error { + switch action { + case types.Added: + watcher.Receive(watch.Event{Type: watch.Added, Object: work}) + case types.Modified: + lastWork, err := lister.Get(work.Name) + if err != nil { + return err + } + + resourceVersion, err := strconv.ParseInt(work.ResourceVersion, 10, 64) + if err != nil { + return fmt.Errorf("failed to parse the resourceVersion of the manifestwork %s, %v", work.Name, err) + } + + lastResourceVersion, err := strconv.ParseInt(lastWork.ResourceVersion, 10, 64) + if err != nil { + return fmt.Errorf("failed to parse the resourceVersion of the manifestwork %s, %v", lastWork.Name, err) + } + + if resourceVersion <= lastResourceVersion { + klog.Infof("The work %s resource version is less than or equal to cached, ignore", work.Name) + return nil + } + + updatedWork := work.DeepCopy() + + // restore the fields that are maintained by local agent + updatedWork.Labels = lastWork.Labels + updatedWork.Annotations = lastWork.Annotations + updatedWork.Finalizers = lastWork.Finalizers + updatedWork.Status = lastWork.Status + + watcher.Receive(watch.Event{Type: watch.Modified, Object: updatedWork}) + case types.Deleted: + // the manifestwork is deleting on the source, we just update its deletion timestamp. + lastWork, err := lister.Get(work.Name) + if errors.IsNotFound(err) { + return nil + } + + if err != nil { + return err + } + + updatedWork := lastWork.DeepCopy() + updatedWork.DeletionTimestamp = work.DeletionTimestamp + watcher.Receive(watch.Event{Type: watch.Modified, Object: updatedWork}) + default: + return fmt.Errorf("unsupported resource action %s", action) + } + + return nil + } +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/clientbuilder.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/clientbuilder.go new file mode 100644 index 000000000..c75dd34c4 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/clientbuilder.go @@ -0,0 +1,270 @@ +package work + +import ( + "context" + "fmt" + "time" + + "k8s.io/client-go/rest" + "k8s.io/klog/v2" + + workclientset "open-cluster-management.io/api/client/work/clientset/versioned" + workv1client "open-cluster-management.io/api/client/work/clientset/versioned/typed/work/v1" + workinformers "open-cluster-management.io/api/client/work/informers/externalversions" + workv1informers "open-cluster-management.io/api/client/work/informers/externalversions/work/v1" + workv1 "open-cluster-management.io/api/work/v1" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" + agentclient "open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/client" + agenthandler "open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/handler" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/internal" + sourceclient "open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/client" + sourcehandler "open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/handler" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/watcher" +) + +const defaultInformerResyncTime = 10 * time.Minute + +// ClientHolder holds a manifestwork client that implements the ManifestWorkInterface based on different configuration +// and a ManifestWorkInformer that is built with the manifestWork client. +// +// ClientHolder also implements the ManifestWorksGetter interface. +type ClientHolder struct { + workClientSet workclientset.Interface + manifestWorkInformer workv1informers.ManifestWorkInformer +} + +var _ workv1client.ManifestWorksGetter = &ClientHolder{} + +// WorkInterface returns a workclientset Interface +func (h *ClientHolder) WorkInterface() workclientset.Interface { + return h.workClientSet +} + +// ManifestWorks returns a ManifestWorkInterface +func (h *ClientHolder) ManifestWorks(namespace string) workv1client.ManifestWorkInterface { + return h.workClientSet.WorkV1().ManifestWorks(namespace) +} + +// ManifestWorkInformer returns a ManifestWorkInformer +func (h *ClientHolder) ManifestWorkInformer() workv1informers.ManifestWorkInformer { + return h.manifestWorkInformer +} + +// ClientHolderBuilder builds the ClientHolder with different configuration. +type ClientHolderBuilder struct { + config any + codecs []generic.Codec[*workv1.ManifestWork] + informerOptions []workinformers.SharedInformerOption + informerResyncTime time.Duration + sourceID string + clusterName string + clientID string +} + +// NewClientHolderBuilder returns a ClientHolderBuilder with a given configuration. +// +// Available configurations: +// - Kubeconfig (*rest.Config): builds a manifestwork client with kubeconfig +// - MQTTOptions (*mqtt.MQTTOptions): builds a manifestwork client based on cloudevents with MQTT +// - GRPCOptions (*grpc.GRPCOptions): builds a manifestwork client based on cloudevents with GRPC +// +// TODO using a specified config instead of any +func NewClientHolderBuilder(config any) *ClientHolderBuilder { + return &ClientHolderBuilder{ + config: config, + informerResyncTime: defaultInformerResyncTime, + } +} + +// WithClientID set the client ID for source/agent cloudevents client. +func (b *ClientHolderBuilder) WithClientID(clientID string) *ClientHolderBuilder { + b.clientID = clientID + return b +} + +// WithSourceID set the source ID when building a manifestwork client for a source. +func (b *ClientHolderBuilder) WithSourceID(sourceID string) *ClientHolderBuilder { + b.sourceID = sourceID + return b +} + +// WithClusterName set the managed cluster name when building a manifestwork client for an agent. +func (b *ClientHolderBuilder) WithClusterName(clusterName string) *ClientHolderBuilder { + b.clusterName = clusterName + return b +} + +// WithCodecs add codecs when building a manifestwork client based on cloudevents. +func (b *ClientHolderBuilder) WithCodecs(codecs ...generic.Codec[*workv1.ManifestWork]) *ClientHolderBuilder { + b.codecs = codecs + return b +} + +// WithInformerConfig set the ManifestWorkInformer configs. If the resync time is not set, the default time (10 minutes) +// will be used when building the ManifestWorkInformer. +func (b *ClientHolderBuilder) WithInformerConfig( + resyncTime time.Duration, options ...workinformers.SharedInformerOption) *ClientHolderBuilder { + b.informerResyncTime = resyncTime + b.informerOptions = options + return b +} + +// NewSourceClientHolder returns a ClientHolder for source +func (b *ClientHolderBuilder) NewSourceClientHolder(ctx context.Context) (*ClientHolder, error) { + switch config := b.config.(type) { + case *rest.Config: + return b.newKubeClients(config) + case *mqtt.MQTTOptions: + return b.newSourceClients(ctx, mqtt.NewSourceOptions(config, b.clientID, b.sourceID)) + case *grpc.GRPCOptions: + return b.newSourceClients(ctx, grpc.NewSourceOptions(config, b.sourceID)) + default: + return nil, fmt.Errorf("unsupported client configuration type %T", config) + } +} + +// NewAgentClientHolder returns a ClientHolder for agent +func (b *ClientHolderBuilder) NewAgentClientHolder(ctx context.Context) (*ClientHolder, error) { + switch config := b.config.(type) { + case *rest.Config: + return b.newKubeClients(config) + case *mqtt.MQTTOptions: + return b.newAgentClients(ctx, mqtt.NewAgentOptions(config, b.clusterName, b.clientID)) + case *grpc.GRPCOptions: + return b.newAgentClients(ctx, grpc.NewAgentOptions(config, b.clusterName, b.clientID)) + default: + return nil, fmt.Errorf("unsupported client configuration type %T", config) + } +} + +func (b *ClientHolderBuilder) newAgentClients(ctx context.Context, agentOptions *options.CloudEventsAgentOptions) (*ClientHolder, error) { + if len(b.clientID) == 0 { + return nil, fmt.Errorf("client id is required") + } + + if len(b.clusterName) == 0 { + return nil, fmt.Errorf("cluster name is required") + } + + workLister := &ManifestWorkLister{} + watcher := watcher.NewManifestWorkWatcher() + cloudEventsClient, err := generic.NewCloudEventAgentClient[*workv1.ManifestWork]( + ctx, + agentOptions, + workLister, + ManifestWorkStatusHash, + b.codecs..., + ) + if err != nil { + return nil, err + } + + manifestWorkClient := agentclient.NewManifestWorkAgentClient(cloudEventsClient, watcher) + workClient := &internal.WorkV1ClientWrapper{ManifestWorkClient: manifestWorkClient} + workClientSet := &internal.WorkClientSetWrapper{WorkV1ClientWrapper: workClient} + factory := workinformers.NewSharedInformerFactoryWithOptions(workClientSet, b.informerResyncTime, b.informerOptions...) + informers := factory.Work().V1().ManifestWorks() + manifestWorkLister := informers.Lister() + namespacedLister := manifestWorkLister.ManifestWorks(b.clusterName) + + // Set informer lister back to work lister and client. + workLister.Lister = manifestWorkLister + // TODO the work client and informer share a same store in the current implementation, ideally, the store should be + // only written from the server. we may need to revisit the implementation in the future. + manifestWorkClient.SetLister(namespacedLister) + + cloudEventsClient.Subscribe(ctx, agenthandler.NewManifestWorkAgentHandler(namespacedLister, watcher)) + + go func() { + for { + select { + case <-ctx.Done(): + return + case <-cloudEventsClient.ReconnectedChan(): + // when receiving a client reconnected signal, we resync all sources for this agent + // TODO after supporting multiple sources, we should only resync agent known sources + if err := cloudEventsClient.Resync(ctx, types.SourceAll); err != nil { + klog.Errorf("failed to send resync request, %v", err) + } + } + } + }() + + return &ClientHolder{ + workClientSet: workClientSet, + manifestWorkInformer: informers, + }, nil +} + +func (b *ClientHolderBuilder) newSourceClients(ctx context.Context, sourceOptions *options.CloudEventsSourceOptions) (*ClientHolder, error) { + if len(b.clientID) == 0 { + return nil, fmt.Errorf("client id is required") + } + + if len(b.sourceID) == 0 { + return nil, fmt.Errorf("source id is required") + } + + workLister := &ManifestWorkLister{} + watcher := watcher.NewManifestWorkWatcher() + cloudEventsClient, err := generic.NewCloudEventSourceClient[*workv1.ManifestWork]( + ctx, + sourceOptions, + workLister, + ManifestWorkStatusHash, + b.codecs..., + ) + if err != nil { + return nil, err + } + + manifestWorkClient := sourceclient.NewManifestWorkSourceClient(b.sourceID, cloudEventsClient, watcher) + workClient := &internal.WorkV1ClientWrapper{ManifestWorkClient: manifestWorkClient} + workClientSet := &internal.WorkClientSetWrapper{WorkV1ClientWrapper: workClient} + factory := workinformers.NewSharedInformerFactoryWithOptions(workClientSet, b.informerResyncTime, b.informerOptions...) + informers := factory.Work().V1().ManifestWorks() + manifestWorkLister := informers.Lister() + // Set informer lister back to work lister and client. + workLister.Lister = manifestWorkLister + manifestWorkClient.SetLister(manifestWorkLister) + + sourceHandler := sourcehandler.NewManifestWorkSourceHandler(manifestWorkLister, watcher) + cloudEventsClient.Subscribe(ctx, sourceHandler.HandlerFunc()) + + go sourceHandler.Run(ctx.Done()) + go func() { + for { + select { + case <-ctx.Done(): + return + case <-cloudEventsClient.ReconnectedChan(): + // when receiving a client reconnected signal, we resync all clusters for this source + if err := cloudEventsClient.Resync(ctx, types.ClusterAll); err != nil { + klog.Errorf("failed to send resync request, %v", err) + } + } + } + }() + + return &ClientHolder{ + workClientSet: workClientSet, + manifestWorkInformer: informers, + }, nil +} + +func (b *ClientHolderBuilder) newKubeClients(config *rest.Config) (*ClientHolder, error) { + kubeWorkClientSet, err := workclientset.NewForConfig(config) + if err != nil { + return nil, err + } + + factory := workinformers.NewSharedInformerFactoryWithOptions(kubeWorkClientSet, b.informerResyncTime, b.informerOptions...) + return &ClientHolder{ + workClientSet: kubeWorkClientSet, + manifestWorkInformer: factory.Work().V1().ManifestWorks(), + }, nil +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/common/common.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/common/common.go new file mode 100644 index 000000000..53134eb89 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/common/common.go @@ -0,0 +1,29 @@ +package common + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + + workv1 "open-cluster-management.io/api/work/v1" +) + +const ( + // CloudEventsDataTypeAnnotationKey is the key of the cloudevents data type annotation. + CloudEventsDataTypeAnnotationKey = "cloudevents.open-cluster-management.io/datatype" + + // CloudEventsGenerationAnnotationKey is the key of the manifestwork generation annotation. + CloudEventsGenerationAnnotationKey = "cloudevents.open-cluster-management.io/generation" +) + +// CloudEventsOriginalSourceLabelKey is the key of the cloudevents original source label. +const CloudEventsOriginalSourceLabelKey = "cloudevents.open-cluster-management.io/originalsource" + +// ManifestsDeleted represents the manifests are deleted. +const ManifestsDeleted = "Deleted" + +const ( + CreateRequestAction = "create_request" + UpdateRequestAction = "update_request" + DeleteRequestAction = "delete_request" +) + +var ManifestWorkGR = schema.GroupResource{Group: workv1.GroupName, Resource: "manifestworks"} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/configloader.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/configloader.go new file mode 100644 index 000000000..724143c33 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/configloader.go @@ -0,0 +1,81 @@ +package work + +import ( + "fmt" + + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt" +) + +const ( + ConfigTypeKube = "kube" + ConfigTypeMQTT = "mqtt" + ConfigTypeGRPC = "grpc" +) + +// ConfigLoader loads a configuration object with a configuration file. +type ConfigLoader struct { + configType string + configPath string + + kubeConfig *rest.Config +} + +// NewConfigLoader returns a ConfigLoader with the given configuration type and configuration file path. +// +// Available configuration types: +// - kube +// - mqtt +// - grpc +func NewConfigLoader(configType, configPath string) *ConfigLoader { + return &ConfigLoader{ + configType: configType, + configPath: configPath, + } +} + +// WithKubeConfig sets a kube config, this config will be used when configuration type is kube and the kube +// configuration file path not set. +func (l *ConfigLoader) WithKubeConfig(kubeConfig *rest.Config) *ConfigLoader { + l.kubeConfig = kubeConfig + return l +} + +// TODO using a specified config instead of any +func (l *ConfigLoader) LoadConfig() (string, any, error) { + switch l.configType { + case ConfigTypeKube: + if l.configPath == "" { + if l.kubeConfig == nil { + return "", nil, fmt.Errorf("neither the kube config path nor kube config object was specified") + } + + return l.kubeConfig.Host, l.kubeConfig, nil + } + + kubeConfig, err := clientcmd.BuildConfigFromFlags("", l.configPath) + if err != nil { + return "", nil, err + } + + return kubeConfig.Host, kubeConfig, nil + case ConfigTypeMQTT: + mqttOptions, err := mqtt.BuildMQTTOptionsFromFlags(l.configPath) + if err != nil { + return "", nil, err + } + + return mqttOptions.BrokerHost, mqttOptions, nil + case ConfigTypeGRPC: + grpcOptions, err := grpc.BuildGRPCOptionsFromFlags(l.configPath) + if err != nil { + return "", nil, err + } + + return grpcOptions.URL, grpcOptions, nil + } + + return "", nil, fmt.Errorf("unsupported config type %s", l.configType) +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/internal/clientset.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/internal/clientset.go new file mode 100644 index 000000000..768a6339b --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/internal/clientset.go @@ -0,0 +1,54 @@ +package internal + +import ( + discovery "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" + + workclientset "open-cluster-management.io/api/client/work/clientset/versioned" + workv1client "open-cluster-management.io/api/client/work/clientset/versioned/typed/work/v1" + workv1alpha1client "open-cluster-management.io/api/client/work/clientset/versioned/typed/work/v1alpha1" + sourceclient "open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/client" +) + +// WorkClientSetWrapper wraps a work client that has a manifestwork client to a work clientset interface, this wrapper +// will helps us to build manifestwork informer factory easily. +type WorkClientSetWrapper struct { + WorkV1ClientWrapper *WorkV1ClientWrapper +} + +var _ workclientset.Interface = &WorkClientSetWrapper{} + +func (c *WorkClientSetWrapper) WorkV1() workv1client.WorkV1Interface { + return c.WorkV1ClientWrapper +} + +func (c *WorkClientSetWrapper) WorkV1alpha1() workv1alpha1client.WorkV1alpha1Interface { + return nil +} + +func (c *WorkClientSetWrapper) Discovery() discovery.DiscoveryInterface { + return nil +} + +// WorkV1ClientWrapper wraps a manifestwork client to a WorkV1Interface +type WorkV1ClientWrapper struct { + ManifestWorkClient workv1client.ManifestWorkInterface +} + +var _ workv1client.WorkV1Interface = &WorkV1ClientWrapper{} + +func (c *WorkV1ClientWrapper) ManifestWorks(namespace string) workv1client.ManifestWorkInterface { + if sourceManifestWorkClient, ok := c.ManifestWorkClient.(*sourceclient.ManifestWorkSourceClient); ok { + sourceManifestWorkClient.SetNamespace(namespace) + return sourceManifestWorkClient + } + return c.ManifestWorkClient +} + +func (c *WorkV1ClientWrapper) AppliedManifestWorks() workv1client.AppliedManifestWorkInterface { + return nil +} + +func (c *WorkV1ClientWrapper) RESTClient() rest.Interface { + return nil +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/lister.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/lister.go new file mode 100644 index 000000000..0911ee381 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/lister.go @@ -0,0 +1,31 @@ +package work + +import ( + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + + workv1lister "open-cluster-management.io/api/client/work/listers/work/v1" + workv1 "open-cluster-management.io/api/work/v1" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/common" +) + +// ManifestWorkLister list the ManifestWorks from a ManifestWorkInformer's local cache. +type ManifestWorkLister struct { + Lister workv1lister.ManifestWorkLister +} + +// List returns the ManifestWorks from a ManifestWorkInformer's local cache. +func (l *ManifestWorkLister) List(options types.ListOptions) ([]*workv1.ManifestWork, error) { + selector := labels.Everything() + if options.Source != types.SourceAll { + req, err := labels.NewRequirement(common.CloudEventsOriginalSourceLabelKey, selection.Equals, []string{options.Source}) + if err != nil { + return nil, err + } + + selector = labels.NewSelector().Add(*req) + } + + return l.Lister.ManifestWorks(options.ClusterName).List(selector) +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/client/manifestwork.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/client/manifestwork.go new file mode 100644 index 000000000..bc0e92eff --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/client/manifestwork.go @@ -0,0 +1,225 @@ +package client + +import ( + "context" + "fmt" + "strconv" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kubetypes "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/klog/v2" + + workv1client "open-cluster-management.io/api/client/work/clientset/versioned/typed/work/v1" + workv1lister "open-cluster-management.io/api/client/work/listers/work/v1" + workv1 "open-cluster-management.io/api/work/v1" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/common" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/utils" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/watcher" +) + +// ManifestWorkSourceClient implements the ManifestWorkInterface. +type ManifestWorkSourceClient struct { + cloudEventsClient *generic.CloudEventSourceClient[*workv1.ManifestWork] + watcher *watcher.ManifestWorkWatcher + lister workv1lister.ManifestWorkLister + namespace string + sourceID string +} + +var _ workv1client.ManifestWorkInterface = &ManifestWorkSourceClient{} + +func NewManifestWorkSourceClient(sourceID string, + cloudEventsClient *generic.CloudEventSourceClient[*workv1.ManifestWork], + watcher *watcher.ManifestWorkWatcher) *ManifestWorkSourceClient { + return &ManifestWorkSourceClient{ + cloudEventsClient: cloudEventsClient, + watcher: watcher, + sourceID: sourceID, + } +} + +func (c *ManifestWorkSourceClient) SetLister(lister workv1lister.ManifestWorkLister) { + c.lister = lister +} + +func (mw *ManifestWorkSourceClient) SetNamespace(namespace string) { + mw.namespace = namespace +} + +func (c *ManifestWorkSourceClient) Create(ctx context.Context, manifestWork *workv1.ManifestWork, opts metav1.CreateOptions) (*workv1.ManifestWork, error) { + _, err := c.lister.ManifestWorks(c.namespace).Get(manifestWork.Name) + if err == nil { + return nil, errors.NewAlreadyExists(common.ManifestWorkGR, manifestWork.Name) + } + + if !errors.IsNotFound(err) { + return nil, err + } + + eventDataType, err := types.ParseCloudEventsDataType(manifestWork.Annotations[common.CloudEventsDataTypeAnnotationKey]) + if err != nil { + return nil, err + } + + eventType := types.CloudEventsType{ + CloudEventsDataType: *eventDataType, + SubResource: types.SubResourceSpec, + Action: common.CreateRequestAction, + } + + generation, err := getWorkGeneration(manifestWork) + if err != nil { + return nil, err + } + + newWork := manifestWork.DeepCopy() + newWork.UID = kubetypes.UID(utils.UID(c.sourceID, c.namespace, newWork.Name)) + newWork.Generation = generation + ensureSourceLabel(c.sourceID, newWork) + if err := c.cloudEventsClient.Publish(ctx, eventType, newWork); err != nil { + return nil, err + } + + // add the new work to the ManifestWorkInformer local cache. + c.watcher.Receive(watch.Event{Type: watch.Added, Object: newWork}) + return newWork.DeepCopy(), nil +} + +func (c *ManifestWorkSourceClient) Update(ctx context.Context, manifestWork *workv1.ManifestWork, opts metav1.UpdateOptions) (*workv1.ManifestWork, error) { + return nil, errors.NewMethodNotSupported(common.ManifestWorkGR, "update") +} + +func (c *ManifestWorkSourceClient) UpdateStatus(ctx context.Context, manifestWork *workv1.ManifestWork, opts metav1.UpdateOptions) (*workv1.ManifestWork, error) { + return nil, errors.NewMethodNotSupported(common.ManifestWorkGR, "updatestatus") +} + +func (c *ManifestWorkSourceClient) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + work, err := c.lister.ManifestWorks(c.namespace).Get(name) + if errors.IsNotFound(err) { + return nil + } + if err != nil { + return err + } + + eventDataType, err := types.ParseCloudEventsDataType(work.Annotations[common.CloudEventsDataTypeAnnotationKey]) + if err != nil { + return err + } + + eventType := types.CloudEventsType{ + CloudEventsDataType: *eventDataType, + SubResource: types.SubResourceSpec, + Action: common.DeleteRequestAction, + } + + deletingWork := work.DeepCopy() + now := metav1.Now() + deletingWork.DeletionTimestamp = &now + + if err := c.cloudEventsClient.Publish(ctx, eventType, deletingWork); err != nil { + return err + } + + // update the deleting work in the ManifestWorkInformer local cache. + c.watcher.Receive(watch.Event{Type: watch.Modified, Object: deletingWork}) + return nil +} + +func (c *ManifestWorkSourceClient) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + return errors.NewMethodNotSupported(common.ManifestWorkGR, "deletecollection") +} + +func (c *ManifestWorkSourceClient) Get(ctx context.Context, name string, opts metav1.GetOptions) (*workv1.ManifestWork, error) { + klog.V(4).Infof("getting manifestwork %s", name) + return c.lister.ManifestWorks(c.namespace).Get(name) +} + +func (c *ManifestWorkSourceClient) List(ctx context.Context, opts metav1.ListOptions) (*workv1.ManifestWorkList, error) { + klog.V(4).Infof("list manifestworks") + // send resync request to fetch manifestwork status from agents when the ManifestWorkInformer starts + if err := c.cloudEventsClient.Resync(ctx, types.ClusterAll); err != nil { + return nil, err + } + + return &workv1.ManifestWorkList{}, nil +} + +func (c *ManifestWorkSourceClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return c.watcher, nil +} + +func (c *ManifestWorkSourceClient) Patch(ctx context.Context, name string, pt kubetypes.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *workv1.ManifestWork, err error) { + klog.V(4).Infof("patching manifestwork %s", name) + + if len(subresources) != 0 { + return nil, fmt.Errorf("unsupported to update subresources %v", subresources) + } + + lastWork, err := c.lister.ManifestWorks(c.namespace).Get(name) + if err != nil { + return nil, err + } + + patchedWork, err := utils.Patch(pt, lastWork, data) + if err != nil { + return nil, err + } + + generation, err := getWorkGeneration(patchedWork) + if err != nil { + return nil, err + } + + if generation <= lastWork.Generation { + return nil, fmt.Errorf("the work %s/%s current generation %d is less than or equal to the last generation %d", + c.namespace, name, generation, lastWork.Generation) + } + + eventDataType, err := types.ParseCloudEventsDataType(lastWork.Annotations[common.CloudEventsDataTypeAnnotationKey]) + if err != nil { + return nil, err + } + + eventType := types.CloudEventsType{ + CloudEventsDataType: *eventDataType, + SubResource: types.SubResourceSpec, + Action: common.UpdateRequestAction, + } + + newWork := patchedWork.DeepCopy() + newWork.Generation = generation + if err := c.cloudEventsClient.Publish(ctx, eventType, newWork); err != nil { + return nil, err + } + + // refresh the work in the ManifestWorkInformer local cache with patched work. + c.watcher.Receive(watch.Event{Type: watch.Modified, Object: newWork}) + return newWork.DeepCopy(), nil +} + +func getWorkGeneration(work *workv1.ManifestWork) (int64, error) { + generation, ok := work.Annotations[common.CloudEventsGenerationAnnotationKey] + if !ok { + return -1, fmt.Errorf("the annotation %s is not found from work %s", common.CloudEventsGenerationAnnotationKey, work.UID) + } + + generationInt, err := strconv.Atoi(generation) + if err != nil { + return -1, err + } + + return int64(generationInt), nil +} + +func ensureSourceLabel(sourceID string, work *workv1.ManifestWork) { + if work.Labels == nil { + work.Labels = map[string]string{} + } + + work.Labels[common.CloudEventsOriginalSourceLabelKey] = sourceID +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/handler/resourcehandler.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/handler/resourcehandler.go new file mode 100644 index 000000000..0ad31f15d --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/handler/resourcehandler.go @@ -0,0 +1,167 @@ +package handler + +import ( + "fmt" + "strconv" + "time" + + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/labels" + kubetypes "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" + workv1lister "open-cluster-management.io/api/client/work/listers/work/v1" + + workv1 "open-cluster-management.io/api/work/v1" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/common" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/watcher" +) + +const ManifestWorkFinalizer = "cloudevents.open-cluster-management.io/manifest-work-cleanup" + +type ManifestWorkSourceHandler struct { + works workqueue.RateLimitingInterface + lister workv1lister.ManifestWorkLister + watcher *watcher.ManifestWorkWatcher +} + +// NewManifestWorkSourceHandler returns a ResourceHandler for a ManifestWork source client. It sends the kube events +// with ManifestWorWatcher after CloudEventSourceClient received the ManifestWork status from agent, then the +// ManifestWorkInformer handles the kube events in its local cache. +func NewManifestWorkSourceHandler(lister workv1lister.ManifestWorkLister, watcher *watcher.ManifestWorkWatcher) *ManifestWorkSourceHandler { + return &ManifestWorkSourceHandler{ + works: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "manifestwork-source-handler"), + lister: lister, + watcher: watcher, + } +} + +func (h *ManifestWorkSourceHandler) Run(stopCh <-chan struct{}) { + defer h.works.ShutDown() + + // start a goroutine to handle the works from the queue + // the .Until will re-kick the runWorker one second after the runWorker completes + go wait.Until(h.runWorker, time.Second, stopCh) + + // wait until we're told to stop + <-stopCh +} + +func (h *ManifestWorkSourceHandler) HandlerFunc() generic.ResourceHandler[*workv1.ManifestWork] { + return func(action types.ResourceAction, obj *workv1.ManifestWork) error { + switch action { + case types.StatusModified: + h.works.Add(obj) + default: + return fmt.Errorf("unsupported resource action %s", action) + } + return nil + } +} + +func (h *ManifestWorkSourceHandler) runWorker() { + // hot loop until we're told to stop. processNextEvent will automatically wait until there's work available, so + // we don't worry about secondary waits + for h.processNextWork() { + } +} + +// processNextWork deals with one key off the queue. +func (h *ManifestWorkSourceHandler) processNextWork() bool { + // pull the next event item from queue. + // events queue blocks until it can return an item to be processed + key, quit := h.works.Get() + if quit { + // the current queue is shutdown and becomes empty, quit this process + return false + } + defer h.works.Done(key) + + if err := h.handleWork(key.(*workv1.ManifestWork)); err != nil { + // we failed to handle the work, we should requeue the item to work on later + // this method will add a backoff to avoid hotlooping on particular items + h.works.AddRateLimited(key) + return true + } + + // we handle the event successfully, tell the queue to stop tracking history for this event + h.works.Forget(key) + return true +} + +func (h *ManifestWorkSourceHandler) handleWork(work *workv1.ManifestWork) error { + lastWork := h.getWorkByUID(work.UID) + if lastWork == nil { + // the work is not found, this may be the client is restarted and the local cache is not ready, requeue this + // work + return errors.NewNotFound(common.ManifestWorkGR, string(work.UID)) + } + + updatedWork := lastWork.DeepCopy() + if meta.IsStatusConditionTrue(work.Status.Conditions, common.ManifestsDeleted) { + updatedWork.Finalizers = []string{} + h.watcher.Receive(watch.Event{Type: watch.Deleted, Object: updatedWork}) + return nil + } + + resourceVersion, err := strconv.Atoi(work.ResourceVersion) + if err != nil { + klog.Errorf("invalid resource version for work %s/%s, %v", lastWork.Namespace, lastWork.Name, err) + return nil + } + + if int64(resourceVersion) > lastWork.Generation { + klog.Warningf("the work %s/%s resource version %d is great than its generation %d, ignore", + lastWork.Namespace, lastWork.Name, resourceVersion, work.Generation) + return nil + } + + // no status change + if equality.Semantic.DeepEqual(lastWork.Status, work.Status) { + return nil + } + + // the work has been handled by agent, we ensure a finalizer on the work + updatedWork.Finalizers = ensureFinalizers(updatedWork.Finalizers) + updatedWork.Status = work.Status + h.watcher.Receive(watch.Event{Type: watch.Modified, Object: updatedWork}) + return nil +} + +func (h *ManifestWorkSourceHandler) getWorkByUID(uid kubetypes.UID) *workv1.ManifestWork { + works, err := h.lister.List(labels.Everything()) + if err != nil { + klog.Errorf("failed to lists works, %v", err) + return nil + } + + for _, work := range works { + if work.UID == uid { + return work + } + } + + return nil +} + +func ensureFinalizers(workFinalizers []string) []string { + has := false + for _, f := range workFinalizers { + if f == ManifestWorkFinalizer { + has = true + break + } + } + + if !has { + workFinalizers = append(workFinalizers, ManifestWorkFinalizer) + } + + return workFinalizers +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/statushash.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/statushash.go new file mode 100644 index 000000000..642f78609 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/statushash.go @@ -0,0 +1,18 @@ +package work + +import ( + "crypto/sha256" + "encoding/json" + "fmt" + + workv1 "open-cluster-management.io/api/work/v1" +) + +// ManifestWorkStatusHash returns the SHA256 checksum of a ManifestWork status. +func ManifestWorkStatusHash(work *workv1.ManifestWork) (string, error) { + statusBytes, err := json.Marshal(work.Status) + if err != nil { + return "", fmt.Errorf("failed to marshal work status, %v", err) + } + return fmt.Sprintf("%x", sha256.Sum256(statusBytes)), nil +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/utils/utils.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/utils/utils.go new file mode 100644 index 000000000..51b0b3354 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/utils/utils.go @@ -0,0 +1,55 @@ +package utils + +import ( + "encoding/json" + "fmt" + + jsonpatch "github.com/evanphx/json-patch" + "github.com/google/uuid" + "k8s.io/apimachinery/pkg/types" + workv1 "open-cluster-management.io/api/work/v1" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/common" +) + +// Patch applies the patch to a work with the patch type. +func Patch(patchType types.PatchType, work *workv1.ManifestWork, patchData []byte) (*workv1.ManifestWork, error) { + workData, err := json.Marshal(work) + if err != nil { + return nil, err + } + + var patchedData []byte + switch patchType { + case types.JSONPatchType: + var patchObj jsonpatch.Patch + patchObj, err = jsonpatch.DecodePatch(patchData) + if err != nil { + return nil, err + } + patchedData, err = patchObj.Apply(workData) + if err != nil { + return nil, err + } + + case types.MergePatchType: + patchedData, err = jsonpatch.MergePatch(workData, patchData) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unsupported patch type: %s", patchType) + } + + patchedWork := &workv1.ManifestWork{} + if err := json.Unmarshal(patchedData, patchedWork); err != nil { + return nil, err + } + + return patchedWork, nil +} + +// UID returns a v5 UUID based on sourceID, work name and namespace to make sure it is consistent +func UID(sourceID, namespace, name string) string { + id := fmt.Sprintf("%s-%s-%s-%s", sourceID, common.ManifestWorkGR.String(), namespace, name) + return uuid.NewSHA1(uuid.NameSpaceOID, []byte(id)).String() +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/watcher/watcher.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/watcher/watcher.go new file mode 100644 index 000000000..d22a7512a --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/watcher/watcher.go @@ -0,0 +1,64 @@ +package watcher + +import ( + "sync" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/klog/v2" +) + +// ManifestWorkWatcher implements the watch.Interface. It returns a chan which will receive all the events. +type ManifestWorkWatcher struct { + sync.Mutex + + result chan watch.Event + done chan struct{} +} + +var _ watch.Interface = &ManifestWorkWatcher{} + +func NewManifestWorkWatcher() *ManifestWorkWatcher { + mw := &ManifestWorkWatcher{ + // It's easy for a consumer to add buffering via an extra + // goroutine/channel, but impossible for them to remove it, + // so nonbuffered is better. + result: make(chan watch.Event), + // If the watcher is externally stopped there is no receiver anymore + // and the send operations on the result channel, especially the + // error reporting might block forever. + // Therefore a dedicated stop channel is used to resolve this blocking. + done: make(chan struct{}), + } + + return mw +} + +// ResultChan implements Interface. +func (mw *ManifestWorkWatcher) ResultChan() <-chan watch.Event { + return mw.result +} + +// Stop implements Interface. +func (mw *ManifestWorkWatcher) Stop() { + // Call Close() exactly once by locking and setting a flag. + mw.Lock() + defer mw.Unlock() + // closing a closed channel always panics, therefore check before closing + select { + case <-mw.done: + close(mw.result) + default: + close(mw.done) + } +} + +// Receive a event from the work client and sends down the result channel. +func (mw *ManifestWorkWatcher) Receive(evt watch.Event) { + if klog.V(4).Enabled() { + obj, _ := meta.Accessor(evt.Object) + klog.V(4).Infof("Receive the event %v for %v", evt.Type, obj.GetName()) + } + + mw.result <- evt +}